ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
55be3496-37ca-4495-9404-30bbf91e9b67 | cpp | tensorflow/tensorflow | comparison_util | third_party/xla/xla/comparison_util.cc | third_party/xla/xla/comparison_util_test.cc | #include "xla/comparison_util.h"
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/primitive_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
bool IsValidComparison(xla::PrimitiveType type, Comparison::Order order) {
if (primitive_util::IsFloatingPointType(type) ||
primitive_util::IsComplexType(type)) {
return true;
}
if (primitive_util::IsIntegralType(type) || type == PRED) {
return order == Comparison::Order::kTotal;
}
LOG(FATAL) << "Unsupported type: " << PrimitiveType_Name(type);
}
PrimitiveType DefaultPrimitiveType(Comparison::Type type) {
switch (type) {
case Comparison::Type::kFloat:
case Comparison::Type::kFloatTotalOrder:
return PrimitiveType::F32;
case Comparison::Type::kSigned:
return PrimitiveType::S32;
case Comparison::Type::kUnsigned:
return PrimitiveType::U32;
}
}
Comparison::Order DefaultOrdering(Comparison::Type type) {
switch (type) {
case Comparison::Type::kFloat:
return Comparison::Order::kPartial;
case Comparison::Type::kFloatTotalOrder:
case Comparison::Type::kSigned:
case Comparison::Type::kUnsigned:
return Comparison::Order::kTotal;
}
}
Comparison::Order DefaultOrdering(PrimitiveType type) {
if (primitive_util::IsFloatingPointType(type) ||
primitive_util::IsComplexType(type)) {
return Comparison::Order::kPartial;
}
if (primitive_util::IsIntegralType(type) || type == PRED) {
return Comparison::Order::kTotal;
}
LOG(FATAL) << "Unsupported type: " << PrimitiveType_Name(type);
}
Comparison::Direction Converse(Comparison::Direction direction) {
switch (direction) {
case Comparison::Direction::kEq:
return Comparison::Direction::kEq;
case Comparison::Direction::kNe:
return Comparison::Direction::kNe;
case Comparison::Direction::kGe:
return Comparison::Direction::kLe;
case Comparison::Direction::kGt:
return Comparison::Direction::kLt;
case Comparison::Direction::kLe:
return Comparison::Direction::kGe;
case Comparison::Direction::kLt:
return Comparison::Direction::kGt;
}
}
Comparison::Direction Inverse(Comparison::Direction direction) {
switch (direction) {
case Comparison::Direction::kEq:
return Comparison::Direction::kNe;
case Comparison::Direction::kNe:
return Comparison::Direction::kEq;
case Comparison::Direction::kGe:
return Comparison::Direction::kLt;
case Comparison::Direction::kGt:
return Comparison::Direction::kLe;
case Comparison::Direction::kLe:
return Comparison::Direction::kGt;
case Comparison::Direction::kLt:
return Comparison::Direction::kGe;
}
}
}
std::string ComparisonDirectionToString(Comparison::Direction direction) {
switch (direction) {
case Comparison::Direction::kEq:
return "EQ";
case Comparison::Direction::kNe:
return "NE";
case Comparison::Direction::kGe:
return "GE";
case Comparison::Direction::kGt:
return "GT";
case Comparison::Direction::kLe:
return "LE";
case Comparison::Direction::kLt:
return "LT";
default:
LOG(FATAL) << "Attempted to print uninitialized comparison direction";
}
}
std::string ComparisonTypeToString(Comparison::Type type) {
switch (type) {
case Comparison::Type::kFloat:
return "FLOAT";
case Comparison::Type::kFloatTotalOrder:
return "TOTALORDER";
case Comparison::Type::kSigned:
return "SIGNED";
case Comparison::Type::kUnsigned:
return "UNSIGNED";
}
}
absl::string_view ComparisonPrimitiveTypeToString(PrimitiveType type) {
return PrimitiveType_Name(type);
}
absl::string_view ComparisonOrderToString(Comparison::Order order) {
switch (order) {
case Comparison::Order::kPartial:
return "PARTIALORDER";
case Comparison::Order::kTotal:
return "TOTALORDER";
}
}
absl::StatusOr<Comparison::Direction> StringToComparisonDirection(
absl::string_view direction) {
static auto* map =
new absl::flat_hash_map<std::string, Comparison::Direction>({
{"EQ", Comparison::Direction::kEq},
{"NE", Comparison::Direction::kNe},
{"GE", Comparison::Direction::kGe},
{"GT", Comparison::Direction::kGt},
{"LE", Comparison::Direction::kLe},
{"LT", Comparison::Direction::kLt},
});
auto it = map->find(direction);
if (it == map->end()) {
return InvalidArgument("Unknown comparison direction: %s", direction);
}
return it->second;
}
absl::StatusOr<Comparison::Order> StringToComparisonOrder(
absl::string_view order) {
static auto* map = new absl::flat_hash_map<std::string, Comparison::Order>({
{"TOTALORDER", Comparison::Order::kTotal},
{"PARTIALORDER", Comparison::Order::kPartial},
});
auto it = map->find(order);
if (it == map->end()) {
return InvalidArgument("Unknown comparison type: %s", order);
}
return it->second;
}
absl::StatusOr<Comparison::Type> StringToComparisonType(
absl::string_view comparison) {
static auto* map = new absl::flat_hash_map<std::string, Comparison::Type>({
{"FLOAT", Comparison::Type::kFloat},
{"TOTALORDER", Comparison::Type::kFloatTotalOrder},
{"SIGNED", Comparison::Type::kSigned},
{"UNSIGNED", Comparison::Type::kUnsigned},
});
auto it = map->find(comparison);
if (it == map->end()) {
return InvalidArgument("Unknown comparison type: %s", comparison);
}
return it->second;
}
Comparison::Type Comparison::DefaultComparisonType(PrimitiveType type) {
if (primitive_util::IsFloatingPointType(type) ||
primitive_util::IsComplexType(type)) {
return Type::kFloat;
}
if (primitive_util::IsSignedIntegralType(type)) {
return Type::kSigned;
}
if (primitive_util::IsUnsignedIntegralType(type) || type == PRED) {
return Type::kUnsigned;
}
LOG(FATAL) << "Unexpected: " << PrimitiveType_Name(type);
}
Comparison::Comparison(Direction dir, PrimitiveType type, Order order)
: dir_(dir),
primitive_type_(type),
order_(order),
type_(DefaultComparisonType(type)) {
CHECK(IsValidComparison(primitive_type_, order_));
}
Comparison::Comparison(Direction dir, PrimitiveType type)
: dir_(dir),
primitive_type_(type),
order_(DefaultOrdering(type)),
type_(DefaultComparisonType(type)) {
CHECK(IsValidComparison(primitive_type_, order_));
}
Comparison::Comparison(Direction dir, Type type)
: dir_(dir),
primitive_type_(DefaultPrimitiveType(type)),
order_(DefaultOrdering(type)),
type_(type) {
CHECK(IsValidComparison(primitive_type_, order_));
}
Comparison Comparison::Converse() const {
return Comparison(xla::Converse(dir_), primitive_type_, order_);
}
std::optional<Comparison> Comparison::Inverse() const {
if (IsPartialOrder()) {
return std::nullopt;
}
if (primitive_util::IsArrayType(primitive_type_)) {
return Comparison(xla::Inverse(dir_), primitive_type_, order_);
}
return std::nullopt;
}
bool Comparison::IsReflexive() const {
switch (dir_) {
case Direction::kEq:
case Direction::kGe:
case Direction::kLe:
return IsTotalOrder();
case Direction::kNe:
case Direction::kGt:
case Direction::kLt:
return false;
}
}
bool Comparison::IsAntireflexive() const {
switch (dir_) {
case Direction::kNe:
return IsTotalOrder();
case Direction::kGt:
case Direction::kLt:
return true;
case Direction::kEq:
case Direction::kGe:
case Direction::kLe:
return false;
}
}
std::string Comparison::ToString(std::string prefix1, std::string prefix2,
std::string prefix3) const {
return absl::StrCat(prefix1, ComparisonDirectionToString(dir_), prefix2,
ComparisonPrimitiveTypeToString(primitive_type_), prefix3,
ComparisonOrderToString(order_));
}
} | #include "xla/comparison_util.h"
#include <cstdint>
#include <limits>
#include "xla/test.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
using ::testing::Eq;
TEST(Comparison, FloatsDefaultToPartialOrder) {
EXPECT_EQ(
Comparison(Comparison::Direction::kGe, PrimitiveType::BF16).GetOrder(),
Comparison::Order::kPartial);
EXPECT_EQ(
Comparison(Comparison::Direction::kGe, PrimitiveType::F32).GetOrder(),
Comparison::Order::kPartial);
EXPECT_EQ(
Comparison(Comparison::Direction::kGe, PrimitiveType::C64).GetOrder(),
Comparison::Order::kPartial);
}
TEST(Comparison, IntegersDefaultToTotalOrder) {
EXPECT_EQ(
Comparison(Comparison::Direction::kGe, PrimitiveType::S32).GetOrder(),
Comparison::Order::kTotal);
EXPECT_EQ(
Comparison(Comparison::Direction::kGe, PrimitiveType::U8).GetOrder(),
Comparison::Order::kTotal);
EXPECT_EQ(
Comparison(Comparison::Direction::kGe, PrimitiveType::PRED).GetOrder(),
Comparison::Order::kTotal);
}
TEST(Comparison, LegacyConstructorDefaultsToX32) {
EXPECT_EQ(Comparison(Comparison::Direction::kGe, Comparison::Type::kFloat)
.GetPrimitiveType(),
xla::PrimitiveType::F32);
EXPECT_EQ(
Comparison(Comparison::Direction::kGe, Comparison::Type::kFloatTotalOrder)
.GetPrimitiveType(),
xla::PrimitiveType::F32);
EXPECT_EQ(Comparison(Comparison::Direction::kGe, Comparison::Type::kSigned)
.GetPrimitiveType(),
xla::PrimitiveType::S32);
EXPECT_EQ(Comparison(Comparison::Direction::kGe, Comparison::Type::kUnsigned)
.GetPrimitiveType(),
xla::PrimitiveType::U32);
}
TEST(Comparison, PartialOrderReflexivity) {
EXPECT_FALSE(
Comparison(Comparison::Direction::kEq, PrimitiveType::F32).IsReflexive());
EXPECT_FALSE(
Comparison(Comparison::Direction::kLe, PrimitiveType::F32).IsReflexive());
EXPECT_FALSE(
Comparison(Comparison::Direction::kLt, PrimitiveType::S32).IsReflexive());
}
TEST(Comparison, TotalOrderReflexivity) {
EXPECT_TRUE(Comparison(Comparison::Direction::kLe, PrimitiveType::BF16,
Comparison::Order::kTotal)
.IsReflexive());
EXPECT_TRUE(Comparison(Comparison::Direction::kGe, PrimitiveType::F32,
Comparison::Order::kTotal)
.IsReflexive());
EXPECT_TRUE(
Comparison(Comparison::Direction::kEq, PrimitiveType::S32).IsReflexive());
EXPECT_FALSE(Comparison(Comparison::Direction::kNe, PrimitiveType::F32,
Comparison::Order::kTotal)
.IsReflexive());
EXPECT_FALSE(Comparison(Comparison::Direction::kLt, PrimitiveType::F64,
Comparison::Order::kTotal)
.IsReflexive());
}
TEST(Comparison, PartialOrderAntiReflexivity) {
EXPECT_TRUE(Comparison(Comparison::Direction::kGt, PrimitiveType::F32)
.IsAntireflexive());
EXPECT_TRUE(Comparison(Comparison::Direction::kLt, PrimitiveType::F32,
Comparison::Order::kTotal)
.IsAntireflexive());
EXPECT_FALSE(Comparison(Comparison::Direction::kEq, PrimitiveType::F32,
Comparison::Order::kTotal)
.IsAntireflexive());
}
TEST(Comparison, TotalOrderAntiReflexivity) {
EXPECT_TRUE(Comparison(Comparison::Direction::kNe, PrimitiveType::BF16,
Comparison::Order::kTotal)
.IsAntireflexive());
EXPECT_TRUE(Comparison(Comparison::Direction::kNe, PrimitiveType::S32)
.IsAntireflexive());
EXPECT_FALSE(Comparison(Comparison::Direction::kEq, PrimitiveType::F32,
Comparison::Order::kTotal)
.IsAntireflexive());
EXPECT_FALSE(Comparison(Comparison::Direction::kLe, PrimitiveType::F64,
Comparison::Order::kTotal)
.IsAntireflexive());
EXPECT_FALSE(Comparison(Comparison::Direction::kLe, PrimitiveType::S8)
.IsAntireflexive());
}
TEST(Comparison, Converse) {
EXPECT_THAT(
Comparison(Comparison::Direction::kLe, PrimitiveType::S8).Converse(),
Eq(Comparison(Comparison::Direction::kGe, PrimitiveType::S8)));
EXPECT_THAT(
Comparison(Comparison::Direction::kEq, PrimitiveType::U16).Converse(),
Eq(Comparison(Comparison::Direction::kEq, PrimitiveType::U16)));
EXPECT_THAT(
Comparison(Comparison::Direction::kGt, PrimitiveType::F32).Converse(),
Eq(Comparison(Comparison::Direction::kLt, PrimitiveType::F32)));
}
TEST(Comparison, PartialOrderFloatsShouldNotHaveInverse) {
EXPECT_FALSE(Comparison(Comparison::Direction::kGt, PrimitiveType::F32)
.Inverse()
.has_value());
}
TEST(Comparison, Inverse) {
EXPECT_THAT(
*Comparison(Comparison::Direction::kLe, PrimitiveType::S64).Inverse(),
Eq(Comparison(Comparison::Direction::kGt, PrimitiveType::S64)));
EXPECT_THAT(
*Comparison(Comparison::Direction::kEq, PrimitiveType::U16).Inverse(),
Eq(Comparison(Comparison::Direction::kNe, PrimitiveType::U16)));
EXPECT_THAT(*Comparison(Comparison::Direction::kGt, PrimitiveType::F32,
Comparison::Order::kTotal)
.Inverse(),
Eq(Comparison(Comparison::Direction::kLe, PrimitiveType::F32,
Comparison::Order::kTotal)));
}
TEST(Comparison, ToString) {
EXPECT_EQ(
Comparison(Comparison::Direction::kLt, PrimitiveType::F32).ToString(),
".LT.F32.PARTIALORDER");
EXPECT_EQ(
Comparison(Comparison::Direction::kEq, PrimitiveType::S8).ToString(),
".EQ.S8.TOTALORDER");
EXPECT_EQ(Comparison(Comparison::Direction::kGe, PrimitiveType::C128)
.ToString("_1_", "_2_", "_3_"),
"_1_GE_2_C128_3_PARTIALORDER");
}
TEST(Comparison, TotalOrderFloatComparison) {
EXPECT_TRUE(Comparison(Comparison::Direction::kEq, PrimitiveType::F32,
Comparison::Order::kTotal)
.Compare<float>(std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN()));
EXPECT_TRUE(Comparison(Comparison::Direction::kLt, PrimitiveType::F32,
Comparison::Order::kTotal)
.Compare<float>(1.0f, 2.0f));
EXPECT_FALSE(Comparison(Comparison::Direction::kLt, PrimitiveType::F32,
Comparison::Order::kTotal)
.Compare<float>(std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity()));
EXPECT_TRUE(Comparison(Comparison::Direction::kLt, PrimitiveType::F32,
Comparison::Order::kTotal)
.Compare<float>(-0.0f, +0.0f));
EXPECT_TRUE(Comparison(Comparison::Direction::kNe, PrimitiveType::F32,
Comparison::Order::kTotal)
.Compare<float>(+0.0f, -0.0f));
EXPECT_FALSE(Comparison(Comparison::Direction::kGt, PrimitiveType::F32,
Comparison::Order::kTotal)
.Compare<float>(-0.1f, 0.1f));
}
TEST(Comparison, TotalOrderBfloat16Comparison) {
EXPECT_TRUE(Comparison(Comparison::Direction::kEq, PrimitiveType::BF16,
Comparison::Order::kTotal)
.Compare<xla::bfloat16>(
std::numeric_limits<xla::bfloat16>::quiet_NaN(),
std::numeric_limits<xla::bfloat16>::quiet_NaN()));
EXPECT_TRUE(
Comparison(Comparison::Direction::kLt, PrimitiveType::BF16,
Comparison::Order::kTotal)
.Compare<xla::bfloat16>(xla::bfloat16(1.0f), xla::bfloat16(2.0f)));
EXPECT_FALSE(Comparison(Comparison::Direction::kLt, PrimitiveType::BF16,
Comparison::Order::kTotal)
.Compare<xla::bfloat16>(
std::numeric_limits<xla::bfloat16>::infinity(),
-std::numeric_limits<xla::bfloat16>::infinity()));
EXPECT_TRUE(
Comparison(Comparison::Direction::kLt, PrimitiveType::BF16,
Comparison::Order::kTotal)
.Compare<xla::bfloat16>(xla::bfloat16(-0.0f), xla::bfloat16(+0.0f)));
EXPECT_TRUE(
Comparison(Comparison::Direction::kGt, PrimitiveType::BF16,
Comparison::Order::kTotal)
.Compare<xla::bfloat16>(xla::bfloat16(+0.0f), xla::bfloat16(-0.0f)));
EXPECT_FALSE(
Comparison(Comparison::Direction::kGt, PrimitiveType::BF16,
Comparison::Order::kTotal)
.Compare<xla::bfloat16>(xla::bfloat16(-0.1f), xla::bfloat16(0.1f)));
}
TEST(Comparison, Compare) {
EXPECT_TRUE(Comparison(Comparison::Direction::kLt, PrimitiveType::F32)
.Compare<float>(1.0f, 2.0f));
EXPECT_TRUE(
Comparison(Comparison::Direction::kGe, PrimitiveType::BF16)
.Compare<xla::bfloat16>(xla::bfloat16(2.0f), xla::bfloat16(1.0f)));
EXPECT_FALSE(Comparison(Comparison::Direction::kNe, PrimitiveType::S64)
.Compare<int64_t>(1'000'000, 1'000'000));
EXPECT_TRUE(Comparison(Comparison::Direction::kEq, PrimitiveType::U8)
.Compare<uint8_t>(63, 63));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/comparison_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/comparison_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9b3032c4-cbe2-4b9a-85fc-77dc67256bd2 | cpp | google/cel-cpp | equality_functions | runtime/standard/equality_functions.cc | runtime/standard/equality_functions_test.cc | #include "runtime/standard/equality_functions.h"
#include <cstdint>
#include <functional>
#include <optional>
#include <type_traits>
#include <utility>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "absl/types/optional.h"
#include "base/builtins.h"
#include "base/function_adapter.h"
#include "base/kind.h"
#include "common/casting.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "internal/number.h"
#include "internal/status_macros.h"
#include "runtime/function_registry.h"
#include "runtime/internal/errors.h"
#include "runtime/register_function_helper.h"
#include "runtime/runtime_options.h"
namespace cel {
namespace {
using ::cel::Cast;
using ::cel::InstanceOf;
using ::cel::builtin::kEqual;
using ::cel::builtin::kInequal;
using ::cel::internal::Number;
struct HomogenousEqualProvider {
static constexpr bool kIsHeterogeneous = false;
absl::StatusOr<absl::optional<bool>> operator()(ValueManager& value_factory,
const Value& lhs,
const Value& rhs) const;
};
struct HeterogeneousEqualProvider {
static constexpr bool kIsHeterogeneous = true;
absl::StatusOr<absl::optional<bool>> operator()(ValueManager& value_factory,
const Value& lhs,
const Value& rhs) const;
};
template <class Type>
absl::optional<bool> Inequal(Type lhs, Type rhs) {
return lhs != rhs;
}
template <>
absl::optional<bool> Inequal(const StringValue& lhs, const StringValue& rhs) {
return !lhs.Equals(rhs);
}
template <>
absl::optional<bool> Inequal(const BytesValue& lhs, const BytesValue& rhs) {
return !lhs.Equals(rhs);
}
template <>
absl::optional<bool> Inequal(const NullValue&, const NullValue&) {
return false;
}
template <>
absl::optional<bool> Inequal(const TypeValue& lhs, const TypeValue& rhs) {
return lhs.name() != rhs.name();
}
template <class Type>
absl::optional<bool> Equal(Type lhs, Type rhs) {
return lhs == rhs;
}
template <>
absl::optional<bool> Equal(const StringValue& lhs, const StringValue& rhs) {
return lhs.Equals(rhs);
}
template <>
absl::optional<bool> Equal(const BytesValue& lhs, const BytesValue& rhs) {
return lhs.Equals(rhs);
}
template <>
absl::optional<bool> Equal(const NullValue&, const NullValue&) {
return true;
}
template <>
absl::optional<bool> Equal(const TypeValue& lhs, const TypeValue& rhs) {
return lhs.name() == rhs.name();
}
template <typename EqualsProvider>
absl::StatusOr<absl::optional<bool>> ListEqual(ValueManager& factory,
const ListValue& lhs,
const ListValue& rhs) {
if (&lhs == &rhs) {
return true;
}
CEL_ASSIGN_OR_RETURN(auto lhs_size, lhs.Size());
CEL_ASSIGN_OR_RETURN(auto rhs_size, rhs.Size());
if (lhs_size != rhs_size) {
return false;
}
for (int i = 0; i < lhs_size; ++i) {
CEL_ASSIGN_OR_RETURN(auto lhs_i, lhs.Get(factory, i));
CEL_ASSIGN_OR_RETURN(auto rhs_i, rhs.Get(factory, i));
CEL_ASSIGN_OR_RETURN(absl::optional<bool> eq,
EqualsProvider()(factory, lhs_i, rhs_i));
if (!eq.has_value() || !*eq) {
return eq;
}
}
return true;
}
absl::StatusOr<absl::optional<bool>> OpaqueEqual(ValueManager& manager,
const OpaqueValue& lhs,
const OpaqueValue& rhs) {
Value result;
CEL_RETURN_IF_ERROR(lhs.Equal(manager, rhs, result));
if (auto bool_value = As<BoolValue>(result); bool_value) {
return bool_value->NativeValue();
}
return TypeConversionError(result.GetTypeName(), "bool").NativeValue();
}
absl::optional<Number> NumberFromValue(const Value& value) {
if (value.Is<IntValue>()) {
return Number::FromInt64(value.GetInt().NativeValue());
} else if (value.Is<UintValue>()) {
return Number::FromUint64(value.GetUint().NativeValue());
} else if (value.Is<DoubleValue>()) {
return Number::FromDouble(value.GetDouble().NativeValue());
}
return absl::nullopt;
}
absl::StatusOr<absl::optional<Value>> CheckAlternativeNumericType(
ValueManager& value_factory, const Value& key, const MapValue& rhs) {
absl::optional<Number> number = NumberFromValue(key);
if (!number.has_value()) {
return absl::nullopt;
}
if (!InstanceOf<IntValue>(key) && number->LosslessConvertibleToInt()) {
Value entry;
bool ok;
CEL_ASSIGN_OR_RETURN(
std::tie(entry, ok),
rhs.Find(value_factory, value_factory.CreateIntValue(number->AsInt())));
if (ok) {
return entry;
}
}
if (!InstanceOf<UintValue>(key) && number->LosslessConvertibleToUint()) {
Value entry;
bool ok;
CEL_ASSIGN_OR_RETURN(std::tie(entry, ok),
rhs.Find(value_factory, value_factory.CreateUintValue(
number->AsUint())));
if (ok) {
return entry;
}
}
return absl::nullopt;
}
template <typename EqualsProvider>
absl::StatusOr<absl::optional<bool>> MapEqual(ValueManager& value_factory,
const MapValue& lhs,
const MapValue& rhs) {
if (&lhs == &rhs) {
return true;
}
if (lhs.Size() != rhs.Size()) {
return false;
}
CEL_ASSIGN_OR_RETURN(auto iter, lhs.NewIterator(value_factory));
while (iter->HasNext()) {
CEL_ASSIGN_OR_RETURN(auto lhs_key, iter->Next(value_factory));
Value rhs_value;
bool rhs_ok;
CEL_ASSIGN_OR_RETURN(std::tie(rhs_value, rhs_ok),
rhs.Find(value_factory, lhs_key));
if (!rhs_ok && EqualsProvider::kIsHeterogeneous) {
CEL_ASSIGN_OR_RETURN(
auto maybe_rhs_value,
CheckAlternativeNumericType(value_factory, lhs_key, rhs));
rhs_ok = maybe_rhs_value.has_value();
if (rhs_ok) {
rhs_value = std::move(*maybe_rhs_value);
}
}
if (!rhs_ok) {
return false;
}
CEL_ASSIGN_OR_RETURN(auto lhs_value, lhs.Get(value_factory, lhs_key));
CEL_ASSIGN_OR_RETURN(absl::optional<bool> eq,
EqualsProvider()(value_factory, lhs_value, rhs_value));
if (!eq.has_value() || !*eq) {
return eq;
}
}
return true;
}
template <typename Type, typename Op>
std::function<Value(cel::ValueManager& factory, Type, Type)> WrapComparison(
Op op, absl::string_view name) {
return [op = std::move(op), name](cel::ValueManager& factory, Type lhs,
Type rhs) -> Value {
absl::optional<bool> result = op(lhs, rhs);
if (result.has_value()) {
return factory.CreateBoolValue(*result);
}
return factory.CreateErrorValue(
cel::runtime_internal::CreateNoMatchingOverloadError(name));
};
}
template <class Type>
absl::Status RegisterEqualityFunctionsForType(cel::FunctionRegistry& registry) {
using FunctionAdapter =
cel::RegisterHelper<BinaryFunctionAdapter<Value, Type, Type>>;
CEL_RETURN_IF_ERROR(FunctionAdapter::RegisterGlobalOverload(
kInequal, WrapComparison<Type>(&Inequal<Type>, kInequal), registry));
CEL_RETURN_IF_ERROR(FunctionAdapter::RegisterGlobalOverload(
kEqual, WrapComparison<Type>(&Equal<Type>, kEqual), registry));
return absl::OkStatus();
}
template <typename Type, typename Op>
auto ComplexEquality(Op&& op) {
return [op = std::forward<Op>(op)](cel::ValueManager& f, const Type& t1,
const Type& t2) -> absl::StatusOr<Value> {
CEL_ASSIGN_OR_RETURN(absl::optional<bool> result, op(f, t1, t2));
if (!result.has_value()) {
return f.CreateErrorValue(
cel::runtime_internal::CreateNoMatchingOverloadError(kEqual));
}
return f.CreateBoolValue(*result);
};
}
template <typename Type, typename Op>
auto ComplexInequality(Op&& op) {
return [op = std::forward<Op>(op)](cel::ValueManager& f, Type t1,
Type t2) -> absl::StatusOr<Value> {
CEL_ASSIGN_OR_RETURN(absl::optional<bool> result, op(f, t1, t2));
if (!result.has_value()) {
return f.CreateErrorValue(
cel::runtime_internal::CreateNoMatchingOverloadError(kInequal));
}
return f.CreateBoolValue(!*result);
};
}
template <class Type>
absl::Status RegisterComplexEqualityFunctionsForType(
absl::FunctionRef<absl::StatusOr<absl::optional<bool>>(ValueManager&, Type,
Type)>
op,
cel::FunctionRegistry& registry) {
using FunctionAdapter = cel::RegisterHelper<
BinaryFunctionAdapter<absl::StatusOr<Value>, Type, Type>>;
CEL_RETURN_IF_ERROR(FunctionAdapter::RegisterGlobalOverload(
kInequal, ComplexInequality<Type>(op), registry));
CEL_RETURN_IF_ERROR(FunctionAdapter::RegisterGlobalOverload(
kEqual, ComplexEquality<Type>(op), registry));
return absl::OkStatus();
}
absl::Status RegisterHomogenousEqualityFunctions(
cel::FunctionRegistry& registry) {
CEL_RETURN_IF_ERROR(RegisterEqualityFunctionsForType<bool>(registry));
CEL_RETURN_IF_ERROR(RegisterEqualityFunctionsForType<int64_t>(registry));
CEL_RETURN_IF_ERROR(RegisterEqualityFunctionsForType<uint64_t>(registry));
CEL_RETURN_IF_ERROR(RegisterEqualityFunctionsForType<double>(registry));
CEL_RETURN_IF_ERROR(
RegisterEqualityFunctionsForType<const cel::StringValue&>(registry));
CEL_RETURN_IF_ERROR(
RegisterEqualityFunctionsForType<const cel::BytesValue&>(registry));
CEL_RETURN_IF_ERROR(
RegisterEqualityFunctionsForType<absl::Duration>(registry));
CEL_RETURN_IF_ERROR(RegisterEqualityFunctionsForType<absl::Time>(registry));
CEL_RETURN_IF_ERROR(
RegisterEqualityFunctionsForType<const cel::NullValue&>(registry));
CEL_RETURN_IF_ERROR(
RegisterEqualityFunctionsForType<const cel::TypeValue&>(registry));
CEL_RETURN_IF_ERROR(
RegisterComplexEqualityFunctionsForType<const cel::ListValue&>(
&ListEqual<HomogenousEqualProvider>, registry));
CEL_RETURN_IF_ERROR(
RegisterComplexEqualityFunctionsForType<const cel::MapValue&>(
&MapEqual<HomogenousEqualProvider>, registry));
return absl::OkStatus();
}
absl::Status RegisterNullMessageEqualityFunctions(FunctionRegistry& registry) {
CEL_RETURN_IF_ERROR(
(cel::RegisterHelper<
BinaryFunctionAdapter<bool, const StructValue&, const NullValue&>>::
RegisterGlobalOverload(
kEqual,
[](ValueManager&, const StructValue&, const NullValue&) {
return false;
},
registry)));
CEL_RETURN_IF_ERROR(
(cel::RegisterHelper<
BinaryFunctionAdapter<bool, const NullValue&, const StructValue&>>::
RegisterGlobalOverload(
kEqual,
[](ValueManager&, const NullValue&, const StructValue&) {
return false;
},
registry)));
CEL_RETURN_IF_ERROR(
(cel::RegisterHelper<
BinaryFunctionAdapter<bool, const StructValue&, const NullValue&>>::
RegisterGlobalOverload(
kInequal,
[](ValueManager&, const StructValue&, const NullValue&) {
return true;
},
registry)));
return cel::RegisterHelper<
BinaryFunctionAdapter<bool, const NullValue&, const StructValue&>>::
RegisterGlobalOverload(
kInequal,
[](ValueManager&, const NullValue&, const StructValue&) {
return true;
},
registry);
}
template <typename EqualsProvider>
absl::StatusOr<absl::optional<bool>> HomogenousValueEqual(ValueManager& factory,
const Value& v1,
const Value& v2) {
if (v1->kind() != v2->kind()) {
return absl::nullopt;
}
static_assert(std::is_lvalue_reference_v<decltype(Cast<StringValue>(v1))>,
"unexpected value copy");
switch (v1->kind()) {
case ValueKind::kBool:
return Equal<bool>(Cast<BoolValue>(v1).NativeValue(),
Cast<BoolValue>(v2).NativeValue());
case ValueKind::kNull:
return Equal<const NullValue&>(Cast<NullValue>(v1), Cast<NullValue>(v2));
case ValueKind::kInt:
return Equal<int64_t>(Cast<IntValue>(v1).NativeValue(),
Cast<IntValue>(v2).NativeValue());
case ValueKind::kUint:
return Equal<uint64_t>(Cast<UintValue>(v1).NativeValue(),
Cast<UintValue>(v2).NativeValue());
case ValueKind::kDouble:
return Equal<double>(Cast<DoubleValue>(v1).NativeValue(),
Cast<DoubleValue>(v2).NativeValue());
case ValueKind::kDuration:
return Equal<absl::Duration>(Cast<DurationValue>(v1).NativeValue(),
Cast<DurationValue>(v2).NativeValue());
case ValueKind::kTimestamp:
return Equal<absl::Time>(Cast<TimestampValue>(v1).NativeValue(),
Cast<TimestampValue>(v2).NativeValue());
case ValueKind::kCelType:
return Equal<const TypeValue&>(Cast<TypeValue>(v1), Cast<TypeValue>(v2));
case ValueKind::kString:
return Equal<const StringValue&>(Cast<StringValue>(v1),
Cast<StringValue>(v2));
case ValueKind::kBytes:
return Equal<const cel::BytesValue&>(v1.GetBytes(), v2.GetBytes());
case ValueKind::kList:
return ListEqual<EqualsProvider>(factory, Cast<ListValue>(v1),
Cast<ListValue>(v2));
case ValueKind::kMap:
return MapEqual<EqualsProvider>(factory, Cast<MapValue>(v1),
Cast<MapValue>(v2));
case ValueKind::kOpaque:
return OpaqueEqual(factory, Cast<OpaqueValue>(v1), Cast<OpaqueValue>(v2));
default:
return absl::nullopt;
}
}
absl::StatusOr<Value> EqualOverloadImpl(ValueManager& factory, const Value& lhs,
const Value& rhs) {
CEL_ASSIGN_OR_RETURN(absl::optional<bool> result,
runtime_internal::ValueEqualImpl(factory, lhs, rhs));
if (result.has_value()) {
return factory.CreateBoolValue(*result);
}
return factory.CreateErrorValue(
cel::runtime_internal::CreateNoMatchingOverloadError(kEqual));
}
absl::StatusOr<Value> InequalOverloadImpl(ValueManager& factory,
const Value& lhs, const Value& rhs) {
CEL_ASSIGN_OR_RETURN(absl::optional<bool> result,
runtime_internal::ValueEqualImpl(factory, lhs, rhs));
if (result.has_value()) {
return factory.CreateBoolValue(!*result);
}
return factory.CreateErrorValue(
cel::runtime_internal::CreateNoMatchingOverloadError(kInequal));
}
absl::Status RegisterHeterogeneousEqualityFunctions(
cel::FunctionRegistry& registry) {
using Adapter = cel::RegisterHelper<
BinaryFunctionAdapter<absl::StatusOr<Value>, const Value&, const Value&>>;
CEL_RETURN_IF_ERROR(
Adapter::RegisterGlobalOverload(kEqual, &EqualOverloadImpl, registry));
CEL_RETURN_IF_ERROR(Adapter::RegisterGlobalOverload(
kInequal, &InequalOverloadImpl, registry));
return absl::OkStatus();
}
absl::StatusOr<absl::optional<bool>> HomogenousEqualProvider::operator()(
ValueManager& factory, const Value& lhs, const Value& rhs) const {
return HomogenousValueEqual<HomogenousEqualProvider>(factory, lhs, rhs);
}
absl::StatusOr<absl::optional<bool>> HeterogeneousEqualProvider::operator()(
ValueManager& factory, const Value& lhs, const Value& rhs) const {
return runtime_internal::ValueEqualImpl(factory, lhs, rhs);
}
}
namespace runtime_internal {
absl::StatusOr<absl::optional<bool>> ValueEqualImpl(ValueManager& value_factory,
const Value& v1,
const Value& v2) {
if (v1->kind() == v2->kind()) {
if (InstanceOf<StructValue>(v1) && InstanceOf<StructValue>(v2)) {
CEL_ASSIGN_OR_RETURN(Value result,
Cast<StructValue>(v1).Equal(value_factory, v2));
if (InstanceOf<BoolValue>(result)) {
return Cast<BoolValue>(result).NativeValue();
}
return false;
}
return HomogenousValueEqual<HeterogeneousEqualProvider>(value_factory, v1,
v2);
}
absl::optional<Number> lhs = NumberFromValue(v1);
absl::optional<Number> rhs = NumberFromValue(v2);
if (rhs.has_value() && lhs.has_value()) {
return *lhs == *rhs;
}
if (InstanceOf<ErrorValue>(v1) || InstanceOf<UnknownValue>(v1) ||
InstanceOf<ErrorValue>(v2) || InstanceOf<UnknownValue>(v2)) {
return absl::nullopt;
}
return false;
}
}
absl::Status RegisterEqualityFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
if (options.enable_heterogeneous_equality) {
CEL_RETURN_IF_ERROR(RegisterHeterogeneousEqualityFunctions(registry));
} else {
CEL_RETURN_IF_ERROR(RegisterHomogenousEqualityFunctions(registry));
CEL_RETURN_IF_ERROR(RegisterNullMessageEqualityFunctions(registry));
}
return absl::OkStatus();
}
} | #include "runtime/standard/equality_functions.h"
#include <vector>
#include "base/builtins.h"
#include "base/function_descriptor.h"
#include "base/kind.h"
#include "internal/testing.h"
#include "runtime/function_registry.h"
#include "runtime/runtime_options.h"
namespace cel {
namespace {
using ::testing::UnorderedElementsAre;
MATCHER_P3(MatchesDescriptor, name, receiver, expected_kinds, "") {
const FunctionDescriptor& descriptor = *arg;
const std::vector<Kind>& types = expected_kinds;
return descriptor.name() == name && descriptor.receiver_style() == receiver &&
descriptor.types() == types;
}
TEST(RegisterEqualityFunctionsHomogeneous, RegistersEqualOperators) {
FunctionRegistry registry;
RuntimeOptions options;
options.enable_heterogeneous_equality = false;
ASSERT_OK(RegisterEqualityFunctions(registry, options));
auto overloads = registry.ListFunctions();
EXPECT_THAT(
overloads[builtin::kEqual],
UnorderedElementsAre(
MatchesDescriptor(builtin::kEqual, false,
std::vector<Kind>{Kind::kList, Kind::kList}),
MatchesDescriptor(builtin::kEqual, false,
std::vector<Kind>{Kind::kMap, Kind::kMap}),
MatchesDescriptor(builtin::kEqual, false,
std::vector<Kind>{Kind::kBool, Kind::kBool}),
MatchesDescriptor(builtin::kEqual, false,
std::vector<Kind>{Kind::kInt, Kind::kInt}),
MatchesDescriptor(builtin::kEqual, false,
std::vector<Kind>{Kind::kUint, Kind::kUint}),
MatchesDescriptor(builtin::kEqual, false,
std::vector<Kind>{Kind::kDouble, Kind::kDouble}),
MatchesDescriptor(
builtin::kEqual, false,
std::vector<Kind>{Kind::kDuration, Kind::kDuration}),
MatchesDescriptor(
builtin::kEqual, false,
std::vector<Kind>{Kind::kTimestamp, Kind::kTimestamp}),
MatchesDescriptor(builtin::kEqual, false,
std::vector<Kind>{Kind::kString, Kind::kString}),
MatchesDescriptor(builtin::kEqual, false,
std::vector<Kind>{Kind::kBytes, Kind::kBytes}),
MatchesDescriptor(builtin::kEqual, false,
std::vector<Kind>{Kind::kType, Kind::kType}),
MatchesDescriptor(builtin::kEqual, false,
std::vector<Kind>{Kind::kStruct, Kind::kNullType}),
MatchesDescriptor(builtin::kEqual, false,
std::vector<Kind>{Kind::kNullType, Kind::kStruct}),
MatchesDescriptor(
builtin::kEqual, false,
std::vector<Kind>{Kind::kNullType, Kind::kNullType})));
EXPECT_THAT(
overloads[builtin::kInequal],
UnorderedElementsAre(
MatchesDescriptor(builtin::kInequal, false,
std::vector<Kind>{Kind::kList, Kind::kList}),
MatchesDescriptor(builtin::kInequal, false,
std::vector<Kind>{Kind::kMap, Kind::kMap}),
MatchesDescriptor(builtin::kInequal, false,
std::vector<Kind>{Kind::kBool, Kind::kBool}),
MatchesDescriptor(builtin::kInequal, false,
std::vector<Kind>{Kind::kInt, Kind::kInt}),
MatchesDescriptor(builtin::kInequal, false,
std::vector<Kind>{Kind::kUint, Kind::kUint}),
MatchesDescriptor(builtin::kInequal, false,
std::vector<Kind>{Kind::kDouble, Kind::kDouble}),
MatchesDescriptor(
builtin::kInequal, false,
std::vector<Kind>{Kind::kDuration, Kind::kDuration}),
MatchesDescriptor(
builtin::kInequal, false,
std::vector<Kind>{Kind::kTimestamp, Kind::kTimestamp}),
MatchesDescriptor(builtin::kInequal, false,
std::vector<Kind>{Kind::kString, Kind::kString}),
MatchesDescriptor(builtin::kInequal, false,
std::vector<Kind>{Kind::kBytes, Kind::kBytes}),
MatchesDescriptor(builtin::kInequal, false,
std::vector<Kind>{Kind::kType, Kind::kType}),
MatchesDescriptor(builtin::kInequal, false,
std::vector<Kind>{Kind::kStruct, Kind::kNullType}),
MatchesDescriptor(builtin::kInequal, false,
std::vector<Kind>{Kind::kNullType, Kind::kStruct}),
MatchesDescriptor(
builtin::kInequal, false,
std::vector<Kind>{Kind::kNullType, Kind::kNullType})));
}
TEST(RegisterEqualityFunctionsHeterogeneous, RegistersEqualOperators) {
FunctionRegistry registry;
RuntimeOptions options;
options.enable_heterogeneous_equality = true;
ASSERT_OK(RegisterEqualityFunctions(registry, options));
auto overloads = registry.ListFunctions();
EXPECT_THAT(
overloads[builtin::kEqual],
UnorderedElementsAre(MatchesDescriptor(
builtin::kEqual, false, std::vector<Kind>{Kind::kAny, Kind::kAny})));
EXPECT_THAT(overloads[builtin::kInequal],
UnorderedElementsAre(MatchesDescriptor(
builtin::kInequal, false,
std::vector<Kind>{Kind::kAny, Kind::kAny})));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/standard/equality_functions.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/standard/equality_functions_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
2b7b518f-026f-4331-bdbc-8e050845acbb | cpp | google/tensorstore | json_binding | tensorstore/internal/json_binding/json_binding.h | tensorstore/internal/json_binding/json_binding_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_BINDING_JSON_H_
#define TENSORSTORE_INTERNAL_JSON_BINDING_JSON_H_
#include <functional>
#include <limits>
#include <map>
#include <string>
#include <string_view>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json/same.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_json_binding {
namespace empty_binder {
constexpr inline auto EmptyBinder = [](auto is_loading, const auto& options,
auto* obj, auto* j) -> absl::Status {
return absl::OkStatus();
};
}
using empty_binder::EmptyBinder;
namespace loose_value_as_binder {
constexpr inline auto LooseValueAsBinder =
[](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
return internal_json::JsonRequireValueAs(*j, obj, false);
} else {
*j = *obj;
return absl::OkStatus();
}
};
}
using loose_value_as_binder::LooseValueAsBinder;
namespace value_as_binder {
constexpr inline auto ValueAsBinder = [](auto is_loading, const auto& options,
auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
return internal_json::JsonRequireValueAs(*j, obj, true);
} else {
*j = *obj;
return absl::OkStatus();
}
};
}
using value_as_binder::ValueAsBinder;
template <>
constexpr inline auto DefaultBinder<bool> = ValueAsBinder;
template <>
constexpr inline auto DefaultBinder<std::int64_t> = ValueAsBinder;
template <>
constexpr inline auto DefaultBinder<std::string> = ValueAsBinder;
template <>
constexpr inline auto DefaultBinder<uint64_t> = ValueAsBinder;
template <>
constexpr inline auto DefaultBinder<double> = ValueAsBinder;
template <>
constexpr inline auto DefaultBinder<std::nullptr_t> = ValueAsBinder;
namespace loose_float_binder {
constexpr inline auto LooseFloatBinder =
[](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
double x;
auto status = internal_json::JsonRequireValueAs(*j, &x, false);
if (status.ok()) *obj = x;
return status;
} else {
*j = static_cast<double>(*obj);
return absl::OkStatus();
}
};
}
using loose_float_binder::LooseFloatBinder;
namespace float_binder {
constexpr inline auto FloatBinder = [](auto is_loading, const auto& options,
auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
double x;
auto status = internal_json::JsonRequireValueAs(*j, &x, true);
if (status.ok()) *obj = x;
return status;
} else {
*j = static_cast<double>(*obj);
return absl::OkStatus();
}
};
}
using float_binder::FloatBinder;
template <typename T>
constexpr inline auto
DefaultBinder<T, std::enable_if_t<std::is_floating_point_v<T>>> =
FloatBinder;
template <typename T>
constexpr auto LooseInteger(T min = std::numeric_limits<T>::min(),
T max = std::numeric_limits<T>::max()) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
return internal_json::JsonRequireInteger(*j, obj, false, min,
max);
} else {
*j = *obj;
return absl::OkStatus();
}
};
}
template <typename T>
constexpr auto Integer(T min = std::numeric_limits<T>::min(),
T max = std::numeric_limits<T>::max()) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
return internal_json::JsonRequireInteger(*j, obj, true, min,
max);
} else {
*j = *obj;
return absl::OkStatus();
}
};
}
template <typename T>
constexpr inline auto
DefaultBinder<T, std::enable_if_t<std::numeric_limits<T>::is_integer>> =
Integer<T>();
namespace non_empty_string_binder {
constexpr inline auto NonEmptyStringBinder =
[](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
return internal_json::JsonRequireValueAs(
*j, obj, [](const std::string& value) { return !value.empty(); },
true);
} else {
*j = *obj;
return absl::OkStatus();
}
};
}
using non_empty_string_binder::NonEmptyStringBinder;
namespace copy_binder {
constexpr inline auto CopyJsonBinder = [](auto is_loading, const auto& options,
auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
*obj = std::move(*j);
} else {
*j = *obj;
}
return absl::OkStatus();
};
}
using copy_binder::CopyJsonBinder;
template <>
constexpr inline auto DefaultBinder<::nlohmann::json> = CopyJsonBinder;
namespace object_binder {
constexpr inline auto CopyJsonObjectBinder = [](auto is_loading,
const auto& options, auto* obj,
auto* j) -> absl::Status {
if constexpr (is_loading) {
if constexpr (std::is_same_v<decltype(j), ::nlohmann::json::object_t*>) {
*obj = std::move(*j);
} else {
if (auto* j_obj = j->template get_ptr<::nlohmann::json::object_t*>()) {
*obj = std::move(*j_obj);
} else {
return internal_json::ExpectedError(*j, "object");
}
}
} else {
*j = *obj;
}
return absl::OkStatus();
};
}
using object_binder::CopyJsonObjectBinder;
template <>
constexpr inline auto DefaultBinder<::nlohmann::json::object_t> =
CopyJsonObjectBinder;
template <typename GetValue>
constexpr auto Constant(GetValue get_value) {
return [=](auto is_loading, const auto& options, auto* obj,
auto* j) -> absl::Status {
if constexpr (is_loading) {
const auto& value = get_value();
if (!internal_json::JsonSame(*j, value)) {
return internal_json::ExpectedError(*j, ::nlohmann::json(value).dump());
}
} else {
*j = get_value();
}
return absl::OkStatus();
};
}
template <typename Validator, typename Binder = decltype(DefaultBinder<>)>
constexpr auto Validate(Validator validator, Binder binder = DefaultBinder<>) {
return [=](auto is_loading, const auto& options, auto* obj,
auto* j) -> absl::Status {
if constexpr (is_loading) {
TENSORSTORE_RETURN_IF_ERROR(binder(is_loading, options, obj, j));
return internal::InvokeForStatus(validator, options, obj);
} else {
return binder(is_loading, options, obj, j);
}
};
}
template <typename Initializer>
constexpr auto Initialize(Initializer initializer) {
return [=](auto is_loading, const auto& options, [[maybe_unused]] auto* obj,
auto*) -> absl::Status {
if constexpr (is_loading) {
return internal::InvokeForStatus(initializer, obj);
} else {
return absl::OkStatus();
}
};
}
template <auto Proj, typename Binder = decltype(DefaultBinder<>)>
constexpr auto Projection(Binder binder = DefaultBinder<>) {
return [binder = std::move(binder)](auto is_loading, const auto& options,
auto* obj, auto* j) -> absl::Status {
auto&& projected = std::invoke(Proj, *obj);
return binder(is_loading, options, &projected, j);
};
}
template <typename Proj, typename Binder = decltype(DefaultBinder<>)>
constexpr auto Projection(Proj projection, Binder binder = DefaultBinder<>) {
return [projection = std::move(projection), binder = std::move(binder)](
auto is_loading, const auto& options, auto* obj,
auto* j) -> absl::Status {
auto&& projected = std::invoke(projection, *obj);
return binder(is_loading, options, &projected, j);
};
}
template <typename T = void, typename Get, typename Set,
typename Binder = decltype(DefaultBinder<>)>
constexpr auto GetterSetter(Get get, Set set, Binder binder = DefaultBinder<>) {
return [get = std::move(get), set = std::move(set),
binder = std::move(binder)](auto is_loading, const auto& options,
auto* obj, auto* j) -> absl::Status {
if constexpr (is_loading) {
using Projected = std::conditional_t<
std::is_void_v<T>,
absl::remove_cvref_t<std::invoke_result_t<Get, decltype(*obj)>>, T>;
Projected projected;
TENSORSTORE_RETURN_IF_ERROR(binder(is_loading, options, &projected, j));
return internal::InvokeForStatus(set, *obj, std::move(projected));
} else {
auto&& projected = std::invoke(get, *obj);
return binder(is_loading, options, &projected, j);
}
};
}
template <typename LoadBinder = decltype(EmptyBinder),
typename SaveBinder = decltype(EmptyBinder)>
constexpr auto LoadSave(LoadBinder load_binder = EmptyBinder,
SaveBinder save_binder = EmptyBinder) {
return [=](auto is_loading, const auto& options, auto* obj,
auto* j) -> absl::Status {
if constexpr (is_loading) {
return load_binder(is_loading, options, obj, j);
} else {
return save_binder(is_loading, options, obj, j);
}
};
}
enum IncludeDefaultsPolicy {
kMaybeIncludeDefaults,
kNeverIncludeDefaults,
kAlwaysIncludeDefaults,
};
template <IncludeDefaultsPolicy Policy = kMaybeIncludeDefaults,
typename GetDefault, typename Binder = decltype(DefaultBinder<>)>
constexpr auto DefaultValue(GetDefault get_default,
Binder binder = DefaultBinder<>) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
using T = std::remove_const_t<std::remove_pointer_t<decltype(obj)>>;
if constexpr (is_loading) {
if (j->is_discarded()) {
return internal::InvokeForStatus(get_default, obj);
}
return binder(is_loading, options, obj, j);
} else {
TENSORSTORE_RETURN_IF_ERROR(binder(is_loading, options, obj, j));
if constexpr (Policy == kAlwaysIncludeDefaults) {
return absl::OkStatus();
}
if constexpr (Policy == kMaybeIncludeDefaults) {
IncludeDefaults include_defaults(options);
if (include_defaults.include_defaults()) {
return absl::OkStatus();
}
}
T default_obj;
::nlohmann::json default_j;
if (internal::InvokeForStatus(get_default, &default_obj).ok() &&
binder(is_loading, options, &default_obj, &default_j).ok() &&
internal_json::JsonSame(default_j, *j)) {
*j = ::nlohmann::json(::nlohmann::json::value_t::discarded);
}
return absl::OkStatus();
}
};
}
template <IncludeDefaultsPolicy DefaultsPolicy = kMaybeIncludeDefaults,
typename Binder = decltype(DefaultBinder<>)>
constexpr auto DefaultInitializedValue(Binder binder = DefaultBinder<>) {
return internal_json_binding::DefaultValue<DefaultsPolicy>(
[](auto* obj) { *obj = absl::remove_cvref_t<decltype(*obj)>{}; },
std::move(binder));
}
template <IncludeDefaultsPolicy Policy = kMaybeIncludeDefaults,
typename GetDefault, typename IsDefault,
typename Binder = decltype(DefaultBinder<>)>
constexpr auto DefaultPredicate(GetDefault get_default, IsDefault is_default,
Binder binder = DefaultBinder<>) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
if (j->is_discarded()) {
return internal::InvokeForStatus(get_default, obj);
}
return binder(is_loading, options, obj, j);
} else {
bool include_defaults_value = Policy == kAlwaysIncludeDefaults;
if constexpr (Policy == kMaybeIncludeDefaults) {
IncludeDefaults include_defaults(options);
include_defaults_value = include_defaults.include_defaults();
}
if (!include_defaults_value && is_default(obj)) {
*j = ::nlohmann::json(::nlohmann::json::value_t::discarded);
return absl::OkStatus();
}
return binder(is_loading, options, obj, j);
}
};
}
template <IncludeDefaultsPolicy Policy = kMaybeIncludeDefaults,
typename IsDefault, typename Binder = decltype(DefaultBinder<>)>
constexpr auto DefaultInitializedPredicate(IsDefault is_default,
Binder binder = DefaultBinder<>) {
return internal_json_binding::DefaultPredicate<Policy>(
[](auto* obj) { *obj = absl::remove_cvref_t<decltype(*obj)>{}; },
std::move(is_default), std::move(binder));
}
template <typename T, typename TransformedValueBinder,
typename OriginalValueBinder = decltype(DefaultBinder<>)>
constexpr auto Compose(
TransformedValueBinder transformed_value_binder,
OriginalValueBinder original_value_binder = DefaultBinder<>) {
return [=](auto is_loading, const auto& options, auto* obj,
auto* j) -> absl::Status {
T value;
if constexpr (is_loading) {
TENSORSTORE_RETURN_IF_ERROR(
original_value_binder(is_loading, options, &value, j));
return transformed_value_binder(is_loading, options, obj, &value);
} else {
TENSORSTORE_RETURN_IF_ERROR(
transformed_value_binder(is_loading, options, obj, &value));
return original_value_binder(is_loading, options, &value, j);
}
};
}
template <typename GetBinder>
constexpr auto Dependent(GetBinder get_binder) {
return [=](auto is_loading, const auto& options, auto* obj,
auto*... j) -> absl::Status {
return get_binder(is_loading, options, obj, j...)(is_loading, options, obj,
j...);
};
}
namespace sequence_impl {
template <typename Loading, typename Options, typename Obj, typename J,
typename... Binder>
inline absl::Status invoke_reverse(Loading is_loading, Options& options,
Obj* obj, J* j, Binder... binder) {
absl::Status s;
std::true_type right_to_left;
right_to_left =
(((s.ok() ? (void)(s = binder(is_loading, options, obj, j)) : (void)0),
right_to_left) = ... = right_to_left);
return s;
}
template <typename Loading, typename Options, typename Obj, typename J,
typename... Binder>
inline absl::Status invoke_forward(Loading is_loading, Options& options,
Obj* obj, J* j, Binder... binder) {
absl::Status s;
[[maybe_unused]] bool ok =
(((s = binder(is_loading, options, obj, j)).ok()) && ...);
return s;
}
}
template <typename... Binder>
constexpr auto Sequence(Binder... binder) {
return [=](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
return sequence_impl::invoke_forward(is_loading, options, obj, j,
binder...);
} else {
return sequence_impl::invoke_reverse(is_loading, options, obj, j,
binder...);
}
};
}
template <typename... MemberBinder>
constexpr auto Object(MemberBinder... member_binder) {
return [=](auto is_loading, const auto& options, auto* obj,
auto* j) -> absl::Status {
::nlohmann::json::object_t* j_obj;
if constexpr (is_loading) {
if constexpr (std::is_same_v<::nlohmann::json*, decltype(j)>) {
j_obj = j->template get_ptr<::nlohmann::json::object_t*>();
if (!j_obj) {
return internal_json::ExpectedError(*j, "object");
}
} else {
j_obj = j;
}
TENSORSTORE_RETURN_IF_ERROR(sequence_impl::invoke_forward(
is_loading, options, obj, j_obj, member_binder...));
if (!j_obj->empty()) {
return internal_json::JsonExtraMembersError(*j_obj);
}
return absl::OkStatus();
} else {
if constexpr (std::is_same_v<::nlohmann::json*, decltype(j)>) {
*j = ::nlohmann::json::object_t();
j_obj = j->template get_ptr<::nlohmann::json::object_t*>();
} else {
j_obj = j;
j_obj->clear();
}
return sequence_impl::invoke_reverse(is_loading, options, obj, j_obj,
member_binder...);
}
};
}
template <bool kDropDiscarded, typename MemberName, typename Binder>
struct MemberBinderImpl {
MemberName name;
Binder binder;
template <typename Options, typename Obj>
absl::Status operator()(std::true_type is_loading, const Options& options,
Obj* obj, ::nlohmann::json::object_t* j_obj) const {
::nlohmann::json j_member = internal_json::JsonExtractMember(j_obj, name);
if constexpr (kDropDiscarded) {
if (j_member.is_discarded()) return absl::OkStatus();
}
auto status = binder(is_loading, options, obj, &j_member);
return status.ok()
? status
: MaybeAnnotateStatus(
status, tensorstore::StrCat("Error parsing object member ",
QuoteString(name)));
}
template <typename Options, typename Obj>
absl::Status operator()(std::false_type is_loading, const Options& options,
Obj* obj, ::nlohmann::json::object_t* j_obj) const {
::nlohmann::json j_member(::nlohmann::json::value_t::discarded);
TENSORSTORE_RETURN_IF_ERROR(
binder(is_loading, options, obj, &j_member),
MaybeAnnotateStatus(
_, tensorstore::StrCat("Error converting object member ",
QuoteString(name))));
if (!j_member.is_discarded()) {
j_obj->emplace(name, std::move(j_member));
}
return absl::OkStatus();
}
};
template <typename MemberName, typename Binder = decltype(DefaultBinder<>)>
constexpr auto Member(MemberName name, Binder binder = DefaultBinder<>) {
return MemberBinderImpl<false, MemberName, Binder>{std::move(name),
std::move(binder)};
}
template <typename MemberName, typename Binder = decltype(DefaultBinder<>)>
constexpr auto OptionalMember(MemberName name,
Binder binder = DefaultBinder<>) {
return MemberBinderImpl<true, MemberName, Binder>{std::move(name),
std::move(binder)};
}
template <typename... MemberName>
constexpr auto AtMostOne(MemberName... names) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json::object_t* j) -> absl::Status {
if constexpr (is_loading) {
const auto has_member = [&](auto name) {
return j->find(name) == j->end() ? 0 : 1;
};
if ((has_member(names) + ...) > 1) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"At most one of ",
absl::StrJoin({QuoteString(std::string_view(names))...}, ", "),
" members is allowed"));
}
}
return absl::OkStatus();
};
}
template <typename... MemberName>
constexpr auto AtLeastOne(MemberName... names) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json::object_t* j) -> absl::Status {
if constexpr (is_loading) {
const auto has_member = [&](auto name) {
return j->find(name) == j->end() ? 0 : 1;
};
if ((has_member(names) + ...) == 0) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"At least one of ",
absl::StrJoin(
std::make_tuple(QuoteString(std::string_view(names))...), ", "),
" members must be specified"));
}
}
return absl::OkStatus();
};
}
namespace discard_extra_members_binder {
constexpr inline auto DiscardExtraMembers =
[](auto is_loading, const auto& options, auto* obj,
::nlohmann::json::object_t* j_obj) -> absl::Status {
if constexpr (is_loading) {
j_obj->clear();
}
return absl::OkStatus();
};
}
using discard_extra_members_binder::DiscardExtraMembers;
}
}
#endif | #include "tensorstore/internal/json_binding/json_binding.h"
#include <cstdint>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace jb = tensorstore::internal_json_binding;
using ::nlohmann::json;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::ParseJson;
using ::tensorstore::internal_json::JsonParseArray;
using ::tensorstore::internal_json::JsonValidateArrayLength;
TEST(JsonTest, SimpleParse) {
const char kArray[] = R"({ "foo": "bar" })";
auto x = ParseJson("");
EXPECT_TRUE(x.is_discarded());
auto y = ParseJson(kArray);
EXPECT_FALSE(y.is_discarded());
auto one = ParseJson("1");
EXPECT_FALSE(one.is_discarded());
}
TEST(JsonParseArrayTest, Basic) {
bool size_received = false;
std::vector<std::pair<::nlohmann::json, std::ptrdiff_t>> elements;
EXPECT_EQ(absl::OkStatus(),
JsonParseArray(
::nlohmann::json{1, 2, 3},
[&](std::ptrdiff_t s) {
EXPECT_EQ(3, s);
size_received = true;
return JsonValidateArrayLength(s, 3);
},
[&](const ::nlohmann::json& j, std::ptrdiff_t i) {
EXPECT_TRUE(size_received);
elements.emplace_back(j, i);
return absl::OkStatus();
}));
EXPECT_TRUE(size_received);
EXPECT_THAT(elements, ::testing::ElementsAre(::testing::Pair(1, 0),
::testing::Pair(2, 1),
::testing::Pair(3, 2)));
}
TEST(JsonParseArrayTest, NotArray) {
EXPECT_THAT(JsonParseArray(
::nlohmann::json(3),
[&](std::ptrdiff_t s) { return absl::OkStatus(); },
[&](const ::nlohmann::json& j, std::ptrdiff_t i) {
return absl::OkStatus();
}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected array, but received: 3"));
}
TEST(JsonValidateArrayLength, Success) {
EXPECT_EQ(absl::OkStatus(), JsonValidateArrayLength(3, 3));
}
TEST(JsonValidateArrayLength, Failure) {
EXPECT_THAT(JsonValidateArrayLength(3, 4),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Array has length 3 but should have length 4"));
}
TEST(JsonParseArrayTest, SizeCallbackError) {
EXPECT_THAT(
JsonParseArray(
::nlohmann::json{1, 2, 3},
[&](std::ptrdiff_t s) { return absl::UnknownError("size_callback"); },
[&](const ::nlohmann::json& j, std::ptrdiff_t i) {
return absl::OkStatus();
}),
MatchesStatus(absl::StatusCode::kUnknown, "size_callback"));
}
TEST(JsonParseArrayTest, ElementCallbackError) {
EXPECT_THAT(JsonParseArray(
::nlohmann::json{1, 2, 3},
[&](std::ptrdiff_t s) { return absl::OkStatus(); },
[&](const ::nlohmann::json& j, std::ptrdiff_t i) {
if (i == 0) return absl::OkStatus();
return absl::UnknownError("element");
}),
MatchesStatus(absl::StatusCode::kUnknown,
"Error parsing value at position 1: element"));
}
TEST(JsonBindingTest, Example) {
struct Foo {
int x;
std::string y;
std::optional<int> z;
};
constexpr auto FooBinder = [] {
return jb::Object(
jb::Member("x", jb::Projection(&Foo::x)),
jb::Member("y", jb::Projection(&Foo::y, jb::DefaultValue([](auto* y) {
*y = "default";
}))),
jb::Member("z", jb::Projection(&Foo::z)));
};
EXPECT_EQ(::nlohmann::json({{"x", 3}}),
jb::ToJson(Foo{3, "default", std::nullopt}, FooBinder(),
tensorstore::IncludeDefaults{false}));
auto value =
jb::FromJson<Foo>({{"x", 3}, {"y", "value"}, {"z", 10}}, FooBinder())
.value();
EXPECT_EQ(3, value.x);
EXPECT_EQ("value", value.y);
EXPECT_EQ(10, value.z);
}
TEST(JsonBindingTest, SequenceOrder) {
auto binder = jb::Sequence(
[](auto is_loading, const auto& options, int* obj, auto* j) {
*obj = 1;
return absl::OkStatus();
},
[](auto is_loading, const auto& options, int* obj, auto* j) {
*obj = 3;
return absl::OkStatus();
});
int x = 0;
::nlohmann::json j({{"x", 3}});
EXPECT_TRUE(binder(std::true_type{}, jb::NoOptions{}, &x, &j).ok());
EXPECT_EQ(3, x);
EXPECT_TRUE(binder(std::false_type{}, jb::NoOptions{}, &x, &j).ok());
EXPECT_EQ(1, x);
}
TEST(JsonBindingTest, ValueAsBinder) {
tensorstore::TestJsonBinderRoundTrip<bool>(
{
{true, ::nlohmann::json(true)},
},
jb::ValueAsBinder);
tensorstore::TestJsonBinderRoundTrip<std::int64_t>(
{
{3, ::nlohmann::json(3)},
},
jb::ValueAsBinder);
tensorstore::TestJsonBinderRoundTrip<uint64_t>(
{
{4, ::nlohmann::json(4)},
},
jb::ValueAsBinder);
tensorstore::TestJsonBinderRoundTrip<double>(
{
{5, ::nlohmann::json(5)},
{5.0, ::nlohmann::json(5.0)},
},
jb::ValueAsBinder);
tensorstore::TestJsonBinderRoundTrip<std::string>(
{
{"a", ::nlohmann::json("a")},
{"", ::nlohmann::json("")},
},
jb::ValueAsBinder);
}
TEST(JsonBindingTest, LooseValueAsBinder) {
using testing::Eq;
tensorstore::TestJsonBinderFromJson<bool>(
{
{::nlohmann::json(true), Eq(true)},
{::nlohmann::json("true"), Eq(true)},
},
jb::LooseValueAsBinder);
tensorstore::TestJsonBinderFromJson<std::int64_t>(
{
{::nlohmann::json(3), Eq(3)},
{::nlohmann::json(3.0), Eq(3)},
{::nlohmann::json("3"), Eq(3)},
},
jb::LooseValueAsBinder);
tensorstore::TestJsonBinderFromJson<uint64_t>(
{
{::nlohmann::json(4), Eq(4)},
{::nlohmann::json(4.0), Eq(4)},
{::nlohmann::json("4"), Eq(4)},
},
jb::LooseValueAsBinder);
tensorstore::TestJsonBinderFromJson<double>(
{
{::nlohmann::json(5.0), Eq(5.0)},
{::nlohmann::json(5), Eq(5.0)},
{::nlohmann::json("5"), Eq(5.0)},
},
jb::LooseValueAsBinder);
tensorstore::TestJsonBinderRoundTrip<std::string>(
{
{"a", ::nlohmann::json("a")},
{"", ::nlohmann::json("")},
},
jb::LooseValueAsBinder);
}
TEST(JsonBindingTest, NonEmptyStringBinder) {
using testing::Eq;
tensorstore::TestJsonBinderRoundTrip<std::string>(
{
{"a", ::nlohmann::json("a")},
},
jb::NonEmptyStringBinder);
tensorstore::TestJsonBinderFromJson<std::string>(
{
{"", MatchesStatus(absl::StatusCode::kInvalidArgument,
"Validation of string failed, received: \"\"")},
},
jb::NonEmptyStringBinder);
}
TEST(JsonBindingTest, FloatBinders) {
using testing::Eq;
tensorstore::TestJsonBinderFromJson<float>(
{
{::nlohmann::json(5.0), Eq(5.0f)},
{::nlohmann::json(5), Eq(5.0f)},
},
jb::FloatBinder);
tensorstore::TestJsonBinderFromJson<double>(
{
{::nlohmann::json(5.0), Eq(5.0)},
{::nlohmann::json(5), Eq(5.0)},
},
jb::FloatBinder);
tensorstore::TestJsonBinderFromJson<float>(
{
{::nlohmann::json(5.0), Eq(5.0f)},
{::nlohmann::json(5), Eq(5.0f)},
{::nlohmann::json("5"), Eq(5.0f)},
},
jb::LooseFloatBinder);
tensorstore::TestJsonBinderFromJson<double>(
{
{::nlohmann::json(5.0), Eq(5.0)},
{::nlohmann::json(5), Eq(5.0)},
{::nlohmann::json("5"), Eq(5.0)},
},
jb::LooseFloatBinder);
}
TEST(JsonBindingTest, DefaultValueDiscarded) {
const auto binder =
jb::DefaultValue([](auto* obj) { *obj = 3; },
jb::DefaultValue([](auto* obj) { *obj = 3; }));
tensorstore::TestJsonBinderRoundTrip<int>(
{
{3, ::nlohmann::json(::nlohmann::json::value_t::discarded)},
{4, 4},
},
binder, tensorstore::IncludeDefaults{false});
tensorstore::TestJsonBinderRoundTrip<int>(
{
{3, 3},
{4, 4},
},
binder, tensorstore::IncludeDefaults{true});
}
TEST(JsonBindingTest, GetterSetter) {
struct Foo {
int x;
int get_x() const { return x; }
void set_x(int value) { this->x = value; }
};
const auto FooBinder =
jb::Object(jb::Member("x", jb::GetterSetter(&Foo::get_x, &Foo::set_x)));
EXPECT_EQ(::nlohmann::json({{"x", 3}}), jb::ToJson(Foo{3}, FooBinder));
auto value = jb::FromJson<Foo>({{"x", 3}}, FooBinder).value();
EXPECT_EQ(3, value.x);
}
TEST(JsonBindingTest, Constant) {
const auto binder = jb::Constant([] { return 3; });
EXPECT_THAT(jb::ToJson("ignored", binder),
::testing::Optional(::nlohmann::json(3)));
EXPECT_THAT(jb::FromJson<std::string>(::nlohmann::json(3), binder),
::testing::Optional(std::string{}));
EXPECT_THAT(jb::FromJson<std::string>(::nlohmann::json(4), binder),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected 3, but received: 4"));
}
TEST(JsonBindingTest, ObjectMember) {
tensorstore::TestJsonBinderRoundTrip<int>(
{
{3, ::nlohmann::json({{"x", 3}})},
},
jb::Object(jb::Member("x")));
}
TEST(JsonBindingTest, ObjectOptionalMember) {
struct Foo {
int x = 1;
};
const auto FooBinder =
jb::Object(jb::OptionalMember("x", jb::Projection(&Foo::x)),
jb::DiscardExtraMembers);
EXPECT_EQ(::nlohmann::json({{"x", 3}}), jb::ToJson(Foo{3}, FooBinder));
{
auto value = jb::FromJson<Foo>({{"x", 3}}, FooBinder).value();
EXPECT_EQ(3, value.x);
}
{
auto value = jb::FromJson<Foo>({{"y", 3}}, FooBinder).value();
EXPECT_EQ(1, value.x);
}
}
TEST(JsonBindingTest, StaticRankBox) {
using Value = tensorstore::Box<3>;
const auto binder = jb::Object(
jb::Member("origin", jb::Projection([](auto& x) { return x.origin(); })),
jb::Member("shape", jb::Projection([](auto& x) { return x.shape(); })));
tensorstore::TestJsonBinderRoundTrip<Value>(
{
{Value({1, 2, 3}, {4, 5, 6}),
{{"origin", {1, 2, 3}}, {"shape", {4, 5, 6}}}},
},
binder);
}
TEST(JsonBindingTest, DynamicRankBox) {
using Value = tensorstore::Box<>;
const auto binder = jb::Object(
jb::Member("rank", jb::GetterSetter(
[](auto& x) { return x.rank(); },
[](auto& x, tensorstore::DimensionIndex rank) {
x.set_rank(rank);
},
jb::Integer(0))),
jb::Member("origin", jb::Projection([](auto& x) { return x.origin(); })),
jb::Member("shape", jb::Projection([](auto& x) { return x.shape(); })));
tensorstore::TestJsonBinderRoundTrip<Value>(
{
{Value({1, 2, 3}, {4, 5, 6}),
{{"rank", 3}, {"origin", {1, 2, 3}}, {"shape", {4, 5, 6}}}},
},
binder);
}
TEST(JsonBindingTest, Null) {
tensorstore::TestJsonBinderRoundTrip<std::nullptr_t>({
{nullptr, nullptr},
});
tensorstore::TestJsonBinderFromJson<std::nullptr_t>({
{42, MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected null, but received: 42")},
});
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/json_binding.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/json_binding_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
54ed2f8c-feef-413a-849e-6f58a65a0f95 | cpp | google/arolla | expr | arolla/expr/expr.cc | arolla/expr/expr_test.cc | #include "arolla/expr/expr.h"
#include <algorithm>
#include <cstddef>
#include <initializer_list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/expr/qtype_utils.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/util/status.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
absl::StatusOr<ExprNodePtr> ToLowerNode(const ExprNodePtr& node) {
const auto& op = node->op();
if (op == nullptr) {
return node;
}
ASSIGN_OR_RETURN(auto result, op->ToLowerLevel(node),
_ << "while processing node " << GetDebugSnippet(node));
if (!node->attr().IsSubsetOf(result->attr())) {
return absl::FailedPreconditionError(absl::StrFormat(
"expression %s attributes changed in ToLower from %s to "
"%s; this indicates incorrect InferAttributes() or GetOutputType() "
"of the operator %s",
GetDebugSnippet(node), absl::FormatStreamed(node->attr()),
absl::FormatStreamed(result->attr()), op->display_name()));
}
return result;
}
absl::StatusOr<ExprNodePtr> ToLowest(const ExprNodePtr& expr) {
return DeepTransform(expr, &ToLowerNode);
}
namespace {
struct ExprNodeFormatter {
void operator()(std::string* out, ExprNodePtr node) const {
absl::StrAppend(out, GetDebugSnippet(node));
}
};
bool AreExprAttributesTheSame(absl::Span<const ExprNodePtr> lexprs,
absl::Span<const ExprNodePtr> rexprs) {
if (lexprs.size() != rexprs.size()) {
return false;
}
for (size_t i = 0; i != lexprs.size(); ++i) {
if (!lexprs[i]->attr().IsIdenticalTo(rexprs[i]->attr())) {
return false;
}
}
return true;
}
}
absl::StatusOr<ExprNodePtr> MakeOpNode(ExprOperatorPtr op,
std::vector<ExprNodePtr> deps) {
ASSIGN_OR_RETURN(auto output_attr, op->InferAttributes(GetExprAttrs(deps)),
_ << "while calling " << op->display_name() << " with args {"
<< absl::StrJoin(deps, ", ", ExprNodeFormatter()) << "}");
return ExprNode::UnsafeMakeOperatorNode(std::move(op), std::move(deps),
std::move(output_attr));
}
absl::StatusOr<ExprNodePtr> BindOp(
ExprOperatorPtr op, absl::Span<const ExprNodePtr> args,
const absl::flat_hash_map<std::string, ExprNodePtr>& kwargs) {
ASSIGN_OR_RETURN(auto signature, op->GetSignature());
ASSIGN_OR_RETURN(
auto bound_args, BindArguments(signature, args, kwargs),
_ << "while binding operator '" << op->display_name() << "'");
return MakeOpNode(std::move(op), std::move(bound_args));
}
absl::StatusOr<ExprNodePtr> BindOp(
absl::string_view op_name, absl::Span<const ExprNodePtr> args,
const absl::flat_hash_map<std::string, ExprNodePtr>& kwargs) {
ASSIGN_OR_RETURN(auto op, LookupOperator(op_name));
return BindOp(std::move(op), args, kwargs);
}
absl::StatusOr<ExprNodePtr> WithNewOperator(const ExprNodePtr& node,
ExprOperatorPtr op) {
if (!node->is_op()) {
return absl::InvalidArgumentError(
"WithNewOperator works only with operator nodes");
}
return MakeOpNode(std::move(op), node->node_deps());
}
absl::StatusOr<ExprNodePtr> WithNewDependencies(const ExprNodePtr& node,
std::vector<ExprNodePtr> deps) {
const auto& old_deps = node->node_deps();
if (absl::c_equal(old_deps, deps, [](const auto& lhs, const auto& rhs) {
return lhs->fingerprint() == rhs->fingerprint();
})) {
return node;
}
if (node->is_op()) {
if (AreExprAttributesTheSame(old_deps, deps)) {
return ExprNode::UnsafeMakeOperatorNode(ExprOperatorPtr(node->op()),
std::move(deps),
ExprAttributes(node->attr()));
} else {
return MakeOpNode(node->op(), std::move(deps));
}
}
if (!deps.empty()) {
return absl::InvalidArgumentError(
"only operator nodes can have dependencies");
}
return node;
}
namespace {
template <typename Strings>
std::vector<std::string> SortedStrings(const Strings& strings) {
std::vector<std::string> result;
result.reserve(strings.size());
for (const auto& str : strings) {
result.emplace_back(str);
}
std::sort(result.begin(), result.end());
return result;
}
}
std::vector<std::string> GetLeafKeys(const ExprNodePtr& expr) {
absl::flat_hash_set<absl::string_view> result;
for (const auto& node : VisitorOrder(expr)) {
if (node->is_leaf()) {
result.emplace(node->leaf_key());
}
}
return SortedStrings(result);
}
std::vector<std::string> GetPlaceholderKeys(const ExprNodePtr& expr) {
absl::flat_hash_set<absl::string_view> result;
for (const auto& node : VisitorOrder(expr)) {
if (node->is_placeholder()) {
result.emplace(node->placeholder_key());
}
}
return SortedStrings(result);
}
absl::StatusOr<ExprNodePtr> CallOp(
absl::StatusOr<ExprOperatorPtr> status_or_op,
std::initializer_list<absl::StatusOr<ExprNodePtr>> status_or_args,
std::initializer_list<std::pair<std::string, absl::StatusOr<ExprNodePtr>>>
status_or_kwargs) {
ASSIGN_OR_RETURN(auto op, std::move(status_or_op));
ASSIGN_OR_RETURN(std::vector<ExprNodePtr> args,
LiftStatusUp(absl::Span<const absl::StatusOr<ExprNodePtr>>(
status_or_args)));
ASSIGN_OR_RETURN((absl::flat_hash_map<std::string, ExprNodePtr> kwargs),
LiftStatusUp(status_or_kwargs));
return BindOp(op, args, kwargs);
}
absl::StatusOr<ExprNodePtr> CallOp(
absl::StatusOr<ExprOperatorPtr> status_or_op,
std::vector<absl::StatusOr<ExprNodePtr>> status_or_args,
absl::flat_hash_map<std::string, absl::StatusOr<ExprNodePtr>>
status_or_kwargs) {
ASSIGN_OR_RETURN(auto op, std::move(status_or_op));
ASSIGN_OR_RETURN(auto args,
LiftStatusUp(absl::Span<const absl::StatusOr<ExprNodePtr>>(
status_or_args)));
ASSIGN_OR_RETURN((absl::flat_hash_map<std::string, ExprNodePtr> kwargs),
LiftStatusUp(status_or_kwargs));
return BindOp(op, args, kwargs);
}
absl::StatusOr<ExprNodePtr> CallOp(
absl::string_view op_name,
std::initializer_list<absl::StatusOr<ExprNodePtr>> status_or_args,
std::initializer_list<std::pair<std::string, absl::StatusOr<ExprNodePtr>>>
status_or_kwargs) {
ASSIGN_OR_RETURN(auto args,
LiftStatusUp(absl::Span<const absl::StatusOr<ExprNodePtr>>(
status_or_args)));
ASSIGN_OR_RETURN((absl::flat_hash_map<std::string, ExprNodePtr> kwargs),
LiftStatusUp(status_or_kwargs));
return BindOp(op_name, args, kwargs);
}
absl::StatusOr<ExprNodePtr> CallOp(
absl::string_view op_name,
std::vector<absl::StatusOr<ExprNodePtr>> status_or_args,
absl::flat_hash_map<std::string, absl::StatusOr<ExprNodePtr>>
status_or_kwargs) {
ASSIGN_OR_RETURN(auto args,
LiftStatusUp(absl::Span<const absl::StatusOr<ExprNodePtr>>(
status_or_args)));
ASSIGN_OR_RETURN((absl::flat_hash_map<std::string, ExprNodePtr> kwargs),
LiftStatusUp(status_or_kwargs));
return BindOp(op_name, args, kwargs);
}
} | #include "arolla/expr/expr.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "arolla/expr/annotation_utils.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/testing/test_operators.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/bytes.h"
#include "arolla/util/unit.h"
namespace arolla::expr {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::WithNameAnnotation;
using ::arolla::testing::WithQTypeAnnotation;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Not;
TEST(ExprTest, CallOp) {
ASSERT_OK_AND_ASSIGN(auto op, LookupOperator("math.add"));
EXPECT_TRUE(IsRegisteredOperator(op));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Leaf("a"), Leaf("b")}));
EXPECT_TRUE(expr->is_op());
EXPECT_TRUE(IsRegisteredOperator(expr->op()));
ASSERT_OK_AND_ASSIGN(auto expected_expr, CallOp(op, {Leaf("a"), Leaf("b")}));
EXPECT_THAT(expr, EqualsExpr(expected_expr));
}
TEST(ExprTest, AdvancedCallOp) {
auto x = Leaf("x");
auto y = Leaf("y");
auto z = Leaf("z");
auto w = Leaf("w");
auto def = Literal(kUnit);
absl::StatusOr<ExprNodePtr> x_or(x);
absl::StatusOr<ExprNodePtr> y_or(y);
absl::StatusOr<ExprNodePtr> z_or(z);
absl::StatusOr<ExprNodePtr> w_or(w);
ASSERT_OK_AND_ASSIGN(const auto sig,
ExprOperatorSignature::Make("p0, p1=, *tail", kUnit));
const auto op = std::make_shared<testing::DummyOp>(
"test.expr_test.advanced_callop.dummy_op", sig);
EXPECT_THAT(
CallOp(op, {}),
StatusIs(absl::StatusCode::kInvalidArgument));
{
ASSERT_OK_AND_ASSIGN(auto expected_expr, MakeOpNode(op, {x, def}));
EXPECT_THAT(CallOp(op, {x_or}), IsOkAndHolds(EqualsExpr(expected_expr)));
}
{
ASSERT_OK_AND_ASSIGN(auto expected_expr, MakeOpNode(op, {x, y}));
EXPECT_THAT(CallOp(op, {x_or, y_or}),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
{
ASSERT_OK_AND_ASSIGN(auto expected_expr, MakeOpNode(op, {x, y, z}));
EXPECT_THAT(CallOp(op, {x_or, y_or, z_or}),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
{
ASSERT_OK_AND_ASSIGN(auto expected_expr, MakeOpNode(op, {x, y, z, w}));
EXPECT_THAT(CallOp(op, {x_or, y_or, z_or, w_or}),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
{
ASSERT_OK_AND_ASSIGN(auto expected_expr, MakeOpNode(op, {x, y}));
EXPECT_THAT(CallOp(op, {x_or}, {{"p1", y_or}}),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
}
TEST(ExprTest, LiftStatus) {
auto x = Leaf("x");
auto y = Leaf("y");
ASSERT_OK_AND_ASSIGN(auto expected_expr, CallOp("math.add", {x, y}));
EXPECT_THAT(CallOp("math.add", {Leaf("x"), Leaf("y")}),
IsOkAndHolds(EqualsExpr(expected_expr)));
EXPECT_THAT(
CallOp("math.add", {Leaf("x"), absl::InvalidArgumentError("error")}),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(ExprTest, Literal) {
const Bytes bytes("a long string literal to ensure memory allocation");
const TypedValue qvalue = TypedValue::FromValue(bytes);
{
auto x = Literal(bytes);
ASSERT_OK_AND_ASSIGN(Bytes x_bytes, x->qvalue()->As<Bytes>());
EXPECT_THAT(x_bytes, Eq(bytes));
}
{
auto x = Literal<Bytes>(bytes);
ASSERT_OK_AND_ASSIGN(Bytes x_bytes, x->qvalue()->As<Bytes>());
EXPECT_THAT(x_bytes, Eq(bytes));
}
{
auto copy = bytes;
auto *data_raw_ptr = absl::string_view(copy).data();
auto x = Literal(std::move(copy));
EXPECT_EQ(absl::string_view(x->qvalue()->UnsafeAs<Bytes>()).data(),
data_raw_ptr);
}
{
auto copy = bytes;
auto *data_raw_ptr = absl::string_view(copy).data();
auto x = Literal<Bytes>(std::move(copy));
EXPECT_EQ(absl::string_view(x->qvalue()->UnsafeAs<Bytes>()).data(),
data_raw_ptr);
}
{
auto x = Literal(qvalue);
EXPECT_EQ(x->qvalue()->GetType(), qvalue.GetType());
EXPECT_EQ(x->qvalue()->GetRawPointer(), qvalue.GetRawPointer());
}
{
auto fn = [&]() { return qvalue; };
auto x = Literal(fn());
EXPECT_EQ(x->qvalue()->GetType(), qvalue.GetType());
EXPECT_EQ(x->qvalue()->GetRawPointer(), qvalue.GetRawPointer());
}
{
auto x = Literal(TypedValue(qvalue));
EXPECT_EQ(x->qvalue()->GetType(), qvalue.GetType());
EXPECT_EQ(x->qvalue()->GetRawPointer(), qvalue.GetRawPointer());
}
}
TEST(ExprTest, LiteralHash) {
auto x = Literal(1.0);
auto x1 = Literal(1.0);
auto y = Literal(2.0);
auto z = Literal(1);
EXPECT_THAT(x, EqualsExpr(x1));
EXPECT_THAT(x, Not(EqualsExpr(y)));
EXPECT_THAT(x, Not(EqualsExpr(z)));
}
TEST(ExprTest, WithNewOperator) {
ASSERT_OK_AND_ASSIGN(auto op1, LookupOperator("math.add"));
ASSERT_OK_AND_ASSIGN(auto op2, LookupOperator("math.multiply"));
ASSERT_OK_AND_ASSIGN(auto actual_value, CallOp(op1, {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(actual_value, WithNewOperator(actual_value, op2));
ASSERT_OK_AND_ASSIGN(auto expected_value,
CallOp(op2, {Leaf("x"), Leaf("y")}));
EXPECT_THAT(actual_value, EqualsExpr(expected_value));
}
TEST(ExprTest, WithName) {
ASSERT_OK_AND_ASSIGN(auto named_literal,
WithNameAnnotation(Literal(1.0), "a"));
EXPECT_EQ(ReadNameAnnotation(named_literal), "a");
ASSERT_OK_AND_ASSIGN(auto named_leaf, WithNameAnnotation(Leaf("x"), "a"));
EXPECT_EQ(ReadNameAnnotation(named_leaf), "a");
EXPECT_EQ(named_leaf->node_deps()[0]->leaf_key(), "x");
ASSERT_OK_AND_ASSIGN(auto named_placeholder,
WithNameAnnotation(Placeholder("x"), "a"));
EXPECT_EQ(ReadNameAnnotation(named_placeholder), "a");
EXPECT_EQ(named_placeholder->node_deps()[0]->placeholder_key(), "x");
}
TEST(ExprTest, LeafHash) {
auto x = Leaf("x");
auto x1 = Leaf("x");
auto y = Leaf("y");
ASSERT_OK_AND_ASSIGN(auto float_x, WithQTypeAnnotation(x, GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto float_x1,
WithQTypeAnnotation(x1, GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto int_x, WithQTypeAnnotation(x, GetQType<int32_t>()));
EXPECT_THAT(x, EqualsExpr(x1));
EXPECT_THAT(float_x, EqualsExpr(float_x1));
EXPECT_THAT(x, Not(EqualsExpr(y)));
EXPECT_THAT(x, Not(EqualsExpr(float_x)));
EXPECT_THAT(int_x, Not(EqualsExpr(float_x)));
}
TEST(ExprTest, PlaceholderHash) {
auto x = Placeholder("x");
auto x1 = Placeholder("x");
auto y = Placeholder("y");
EXPECT_THAT(x, EqualsExpr(x1));
EXPECT_THAT(x, Not(EqualsExpr(y)));
}
TEST(ExprTest, GetLeafKeys) {
auto l_a = Leaf("a");
auto l_b = Leaf("b");
auto p_a = Placeholder("a");
auto p_b = Placeholder("b");
{
ASSERT_OK_AND_ASSIGN(const auto expr, CallOp("math.add", {p_a, p_b}));
EXPECT_THAT(GetLeafKeys(expr), ElementsAre());
}
{
ASSERT_OK_AND_ASSIGN(const auto expr, CallOp("math.add", {l_a, p_b}));
EXPECT_THAT(GetLeafKeys(expr), ElementsAre("a"));
}
{
ASSERT_OK_AND_ASSIGN(const auto expr, CallOp("math.add", {p_a, l_b}));
EXPECT_THAT(GetLeafKeys(expr), ElementsAre("b"));
}
{
ASSERT_OK_AND_ASSIGN(const auto expr, CallOp("math.add", {l_a, l_b}));
EXPECT_THAT(GetLeafKeys(expr), ElementsAre("a", "b"));
}
}
TEST(ExprTest, GetPlaceholderKeys) {
auto l_a = Leaf("a");
auto l_b = Leaf("b");
auto p_a = Placeholder("a");
auto p_b = Placeholder("b");
{
ASSERT_OK_AND_ASSIGN(const auto expr, CallOp("math.add", {p_a, p_b}));
EXPECT_THAT(GetPlaceholderKeys(expr), ElementsAre("a", "b"));
}
{
ASSERT_OK_AND_ASSIGN(const auto expr, CallOp("math.add", {l_a, p_b}));
EXPECT_THAT(GetPlaceholderKeys(expr), ElementsAre("b"));
}
{
ASSERT_OK_AND_ASSIGN(const auto expr, CallOp("math.add", {p_a, l_b}));
EXPECT_THAT(GetPlaceholderKeys(expr), ElementsAre("a"));
}
{
ASSERT_OK_AND_ASSIGN(const auto expr, CallOp("math.add", {l_a, l_b}));
EXPECT_THAT(GetPlaceholderKeys(expr), ElementsAre());
}
}
TEST(ExprTest, WithNewDependencies) {
auto l_a = Leaf("a");
auto p_b = Placeholder("b");
auto lit = Literal(3.14);
ASSERT_OK_AND_ASSIGN(const auto expr, CallOp("math.add", {l_a, p_b}));
EXPECT_THAT(WithNewDependencies(l_a, {}), IsOkAndHolds(EqualsExpr(l_a)));
EXPECT_THAT(WithNewDependencies(p_b, {}), IsOkAndHolds(EqualsExpr(p_b)));
EXPECT_THAT(WithNewDependencies(lit, {}), IsOkAndHolds(EqualsExpr(lit)));
ASSERT_OK_AND_ASSIGN(const auto actual_expr,
WithNewDependencies(expr, {p_b, l_a}));
ASSERT_OK_AND_ASSIGN(const auto expected_expr,
CallOp("math.add", {p_b, l_a}));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
TEST(ExprTest, WithNewDependenciesOptimizations) {
auto l_a = Leaf("a");
auto l_b = Leaf("b");
auto l_a2 = Leaf("a");
ASSERT_OK_AND_ASSIGN(const auto expr, CallOp("math.add", {l_a, l_a}));
ASSERT_OK_AND_ASSIGN(const auto expr2,
WithNewDependencies(expr, {l_a2, l_a2}));
EXPECT_EQ(expr.get(), expr2.get());
ASSERT_OK_AND_ASSIGN(const auto expr3, WithNewDependencies(expr, {l_b, l_a}));
EXPECT_NE(expr.get(), expr3.get());
}
TEST(ExprTest, WithNewDependenciesAttr) {
auto l_a = Leaf("a");
ASSERT_OK_AND_ASSIGN(
const auto l_a_int,
CallOp("annotation.qtype", {l_a, Literal(GetQType<int>())}));
ASSERT_OK_AND_ASSIGN(const auto expr, CallOp("math.add", {l_a, l_a}));
EXPECT_TRUE(expr->attr().IsIdenticalTo(ExprAttributes{}));
ASSERT_OK_AND_ASSIGN(const auto expr_int,
WithNewDependencies(expr, {l_a_int, l_a_int}));
EXPECT_TRUE(expr_int->attr().IsIdenticalTo(ExprAttributes(GetQType<int>())));
ASSERT_OK_AND_ASSIGN(const auto expr2,
WithNewDependencies(expr_int, {l_a_int, l_a}));
EXPECT_TRUE(expr2->attr().IsIdenticalTo(ExprAttributes{}));
}
TEST(ExprTest, RegisterOperatorAlias) {
CHECK_OK(RegisterOperatorAlias("alias_test.add3", "test.add3").status());
CHECK_OK(RegisterOperatorAlias("alias_test.power", "test.power").status());
{
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp("alias_test.power", {Leaf("x"), Leaf("y")}));
EXPECT_THAT(ToLowerNode(expr), IsOkAndHolds(EqualsExpr(expr)));
}
{
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("alias_test.add3",
{Leaf("x"), Leaf("y"), Leaf("z")}));
ASSERT_OK_AND_ASSIGN(
auto expected_expr,
CallOp("test.add3", {Leaf("x"), Leaf("y"), Leaf("z")}));
ASSERT_OK_AND_ASSIGN(expected_expr, ToLowerNode(expected_expr));
EXPECT_THAT(ToLowerNode(expr), IsOkAndHolds(EqualsExpr(expected_expr)));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("alias_test.add3", {Literal(5), Literal(6), Literal(7)}));
EXPECT_EQ(expr->qtype(), GetQType<int>());
}
{
ASSERT_OK_AND_ASSIGN(auto alias_op, LookupOperator("alias_test.add3"));
ASSERT_OK_AND_ASSIGN(auto op, LookupOperator("test.add3"));
ASSERT_OK_AND_ASSIGN(auto actual_docstring, alias_op->GetDoc());
ASSERT_OK_AND_ASSIGN(auto expected_docstring, op->GetDoc());
EXPECT_EQ(actual_docstring, expected_docstring);
ASSERT_OK_AND_ASSIGN(auto actual_signature, alias_op->GetSignature());
ASSERT_OK_AND_ASSIGN(auto expected_signature, op->GetSignature());
EXPECT_EQ(GetExprOperatorSignatureSpec(actual_signature),
GetExprOperatorSignatureSpec(expected_signature));
}
}
TEST(ExprTest, ToLowerNode) {
auto x = Leaf("x");
auto y = Leaf("y");
auto z = Leaf("z");
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("test.add3", {x, y, z}));
ASSERT_OK_AND_ASSIGN(auto actual_expr, ToLowerNode(expr));
ASSERT_OK_AND_ASSIGN(auto xy, CallOp("math.add", {x, y}));
ASSERT_OK_AND_ASSIGN(auto expected_expr, CallOp("math.add", {xy, z}));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
TEST(ExprTest, ToLowest) {
auto a = Leaf("a");
auto b = Leaf("b");
auto c = Leaf("c");
auto d = Leaf("d");
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("test.add4", {a, b, c, d}));
ASSERT_OK_AND_ASSIGN(auto actual_expr, ToLowest(expr));
ASSERT_OK_AND_ASSIGN(auto ab, CallOp("math.add", {a, b}));
ASSERT_OK_AND_ASSIGN(auto abc, CallOp("math.add", {ab, c}));
ASSERT_OK_AND_ASSIGN(auto abcd, CallOp("math.add", {abc, d}));
EXPECT_THAT(actual_expr, EqualsExpr(abcd));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/expr.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/expr_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
84951f39-a11a-4836-802f-02491812829a | cpp | tensorflow/tensorflow | rgb_to_grayscale | tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_grayscale.cc | tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_grayscale_test.cc | #include "tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_grayscale.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace rgb_to_grayscale {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
inline void ConvertRgbToGrayscale(dim_t batches, dim_t height, dim_t width,
const float* input_data, float* output_data) {
const dim_t output_num_pixels = batches * width * height;
constexpr float kRgb2GrayscaleKernel[] = {0.2989f, 0.5870f, 0.1140f};
const float* src_ptr = input_data;
float* dst_ptr = output_data;
for (int i = 0; i < output_num_pixels; ++i) {
*dst_ptr = kRgb2GrayscaleKernel[0] * src_ptr[0] +
kRgb2GrayscaleKernel[1] * src_ptr[1] +
kRgb2GrayscaleKernel[2] * src_ptr[2];
src_ptr += 3;
dst_ptr++;
}
}
void ComputeRgbToGrayscale(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const float* img_data = reinterpret_cast<const float*>(img->Data());
const dim_t img_num_batches = img->Dims()[0];
const dim_t img_height = img->Dims()[1];
const dim_t img_width = img->Dims()[2];
const dim_t channels = img->Dims()[3];
TFLITE_DCHECK(channels == 3);
MutableDataRef* output = outputs[0];
output->Resize({img_num_batches, img_height, img_width, 1});
float* output_data = reinterpret_cast<float*>(output->Data());
ConvertRgbToGrayscale(img_num_batches, img_height, img_width, img_data,
output_data);
}
}
const Algo* Impl_RgbToGrayscale() {
static const Algo rgb_to_grayscale = {&ComputeRgbToGrayscale, nullptr};
return &rgb_to_grayscale;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_grayscale.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace rgb_to_grayscale {
namespace {
struct RgbToGrayscaleTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class RgbToGrayscaleTest
: public ::testing::TestWithParam<RgbToGrayscaleTestParams> {};
TEST_P(RgbToGrayscaleTest, FloatPixelType) {
const RgbToGrayscaleTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* rgb_to_grayscale = Impl_RgbToGrayscale();
rgb_to_grayscale->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
constexpr float kAbsError = 0.1f;
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
RgbToGrayscaleTests, RgbToGrayscaleTest,
testing::ValuesIn({
RgbToGrayscaleTestParams{{1, 3, 2, 3},
{11, 111, 211,
12, 112, 212,
21, 121, 221,
22, 122, 222,
31, 131, 231,
32, 132, 232},
{92.5f, 93.5f, 102.5f,
103.5f, 112.5f, 113.5f},
{1, 3, 2, 1}},
RgbToGrayscaleTestParams{{2, 3, 2, 3},
{11, 111, 211,
12, 112, 212,
21, 121, 221,
22, 122, 222,
31, 131, 231,
32, 132, 232,
51, 311, 411,
52, 312, 412,
61, 321, 421,
62, 322, 422,
71, 331, 431,
72, 332, 432},
{92.5f, 93.5f, 102.5f,
103.5f, 112.5f, 113.5f,
244.7f, 245.7f, 254.7f,
255.7f, 264.7f, 265.7f},
{2, 3, 2, 1}},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_grayscale.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_grayscale_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
38da609b-05a2-4452-af29-1e55ce366fdc | cpp | tensorflow/tensorflow | iterator_range | tensorflow/core/lib/gtl/iterator_range.h | third_party/xla/xla/tsl/lib/gtl/iterator_range_test.cc | #ifndef TENSORFLOW_CORE_LIB_GTL_ITERATOR_RANGE_H_
#define TENSORFLOW_CORE_LIB_GTL_ITERATOR_RANGE_H_
#include "xla/tsl/lib/gtl/iterator_range.h"
namespace tensorflow {
namespace gtl {
using ::tsl::gtl::iterator_range;
using ::tsl::gtl::make_range;
}
}
#endif | #include "xla/tsl/lib/gtl/iterator_range.h"
#include <vector>
#include "tsl/platform/macros.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace gtl {
namespace {
TEST(IteratorRange, WholeVector) {
std::vector<int> v = {2, 3, 5, 7, 11, 13};
iterator_range<std::vector<int>::iterator> range(v.begin(), v.end());
int index = 0;
for (int prime : range) {
ASSERT_LT(index, v.size());
EXPECT_EQ(v[index], prime);
++index;
}
EXPECT_EQ(v.size(), index);
}
TEST(IteratorRange, VectorMakeRange) {
std::vector<int> v = {2, 3, 5, 7, 11, 13};
auto range = make_range(v.begin(), v.end());
int index = 0;
for (int prime : range) {
ASSERT_LT(index, v.size());
EXPECT_EQ(v[index], prime);
++index;
}
EXPECT_EQ(v.size(), index);
}
TEST(IteratorRange, PartArray) {
int v[] = {2, 3, 5, 7, 11, 13};
iterator_range<int*> range(&v[1], &v[4]);
int index = 1;
for (int prime : range) {
ASSERT_LT(index, TF_ARRAYSIZE(v));
EXPECT_EQ(v[index], prime);
++index;
}
EXPECT_EQ(4, index);
}
TEST(IteratorRange, ArrayMakeRange) {
int v[] = {2, 3, 5, 7, 11, 13};
auto range = make_range(&v[1], &v[4]);
int index = 1;
for (int prime : range) {
ASSERT_LT(index, TF_ARRAYSIZE(v));
EXPECT_EQ(v[index], prime);
++index;
}
EXPECT_EQ(4, index);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/iterator_range.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/gtl/iterator_range_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8755924a-ced3-48b1-a66e-1a04454a738d | cpp | tensorflow/tensorflow | serial_device_batch_scheduler | tensorflow/core/kernels/batching_util/serial_device_batch_scheduler.h | tensorflow/core/kernels/batching_util/serial_device_batch_scheduler_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_SERIAL_DEVICE_BATCH_SCHEDULER_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_SERIAL_DEVICE_BATCH_SCHEDULER_H_
#include <algorithm>
#include <functional>
#include <memory>
#include <random>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace serving {
namespace internal {
template <typename TaskType>
class SDBSBatch;
template <typename TaskType>
class SDBSQueue;
}
template <typename TaskType>
class SerialDeviceBatchScheduler : public std::enable_shared_from_this<
SerialDeviceBatchScheduler<TaskType>> {
public:
~SerialDeviceBatchScheduler();
struct Options {
string thread_pool_name = {"batch_threads"};
int64_t num_batch_threads = port::NumSchedulableCPUs();
int64_t full_batch_scheduling_boost_micros = 0;
Env* env = Env::Default();
int64_t initial_in_flight_batches_limit = 3;
std::function<int64()> get_pending_on_serial_device;
double target_pending = 2;
int64_t batches_to_average_over = 1000;
};
static Status Create(
const Options& options,
std::shared_ptr<SerialDeviceBatchScheduler<TaskType>>* scheduler);
struct QueueOptions {
int max_batch_size = 1000;
int max_enqueued_batches = 10;
};
using BatchProcessor = std::function<void(std::unique_ptr<Batch<TaskType>>)>;
Status AddQueue(const QueueOptions& options,
BatchProcessor process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue);
double in_flight_batches_limit() {
mutex_lock l(mu_);
return in_flight_batches_limit_;
}
double recent_low_traffic_ratio() {
mutex_lock l(mu_);
return recent_low_traffic_ratio_;
}
private:
friend class internal::SDBSQueue<TaskType>;
explicit SerialDeviceBatchScheduler(const Options& options);
void ProcessBatches();
void AddBatch(const internal::SDBSBatch<TaskType>* batch);
void RemoveQueue(const internal::SDBSQueue<TaskType>* queue);
Env* env() const { return options_.env; }
const Options options_;
std::vector<const internal::SDBSBatch<TaskType>*> batches_ TF_GUARDED_BY(mu_);
std::unordered_map<const internal::SDBSQueue<TaskType>*, BatchProcessor>
queues_and_callbacks_ TF_GUARDED_BY(mu_);
std::unique_ptr<thread::ThreadPool> batch_thread_pool_;
int64_t in_flight_batches_limit_ TF_GUARDED_BY(mu_);
int64_t processing_threads_ TF_GUARDED_BY(mu_) = 0;
int64_t batch_count_ TF_GUARDED_BY(mu_) = 0;
int64_t no_batch_count_ TF_GUARDED_BY(mu_) = 0;
int64_t pending_sum_ = 0;
int64_t batch_latency_sum_ = 0;
int64_t batch_period_micros_ = 0;
double recent_low_traffic_ratio_ = 0;
mutex mu_;
SerialDeviceBatchScheduler(const SerialDeviceBatchScheduler&) = delete;
void operator=(const SerialDeviceBatchScheduler&) = delete;
};
namespace internal {
template <typename TaskType>
class SDBSQueue : public BatchScheduler<TaskType> {
public:
using QueueOptions =
typename SerialDeviceBatchScheduler<TaskType>::QueueOptions;
SDBSQueue(std::shared_ptr<SerialDeviceBatchScheduler<TaskType>> scheduler,
const QueueOptions& options);
~SDBSQueue() override;
Status Schedule(std::unique_ptr<TaskType>* task) override;
size_t NumEnqueuedTasks() const override;
size_t SchedulingCapacity() const override;
void ReleaseBatch(const SDBSBatch<TaskType>* batch);
size_t max_task_size() const override { return options_.max_batch_size; }
private:
std::shared_ptr<SerialDeviceBatchScheduler<TaskType>> scheduler_;
const QueueOptions options_;
SDBSBatch<TaskType>* current_batch_ TF_GUARDED_BY(mu_) = nullptr;
int64_t num_enqueued_batches_ TF_GUARDED_BY(mu_) = 0;
int64_t num_enqueued_tasks_ TF_GUARDED_BY(mu_) = 0;
mutable mutex mu_;
SDBSQueue(const SDBSQueue&) = delete;
void operator=(const SDBSQueue&) = delete;
};
template <typename TaskType>
class SDBSBatch : public Batch<TaskType> {
public:
SDBSBatch(SDBSQueue<TaskType>* queue, int64_t creation_time_micros)
: queue_(queue), creation_time_micros_(creation_time_micros) {}
~SDBSBatch() override {}
SDBSQueue<TaskType>* queue() const { return queue_; }
int64_t creation_time_micros() const { return creation_time_micros_; }
private:
SDBSQueue<TaskType>* queue_;
const int64_t creation_time_micros_;
SDBSBatch(const SDBSBatch&) = delete;
void operator=(const SDBSBatch&) = delete;
};
}
template <typename TaskType>
Status SerialDeviceBatchScheduler<TaskType>::Create(
const Options& options,
std::shared_ptr<SerialDeviceBatchScheduler<TaskType>>* scheduler) {
if (options.num_batch_threads < 1) {
return errors::InvalidArgument("num_batch_threads must be positive; was ",
options.num_batch_threads);
}
if (options.initial_in_flight_batches_limit < 1) {
return errors::InvalidArgument(
"initial_in_flight_batches_limit must be positive; was ",
options.initial_in_flight_batches_limit);
}
if (options.initial_in_flight_batches_limit > options.num_batch_threads) {
return errors::InvalidArgument(
"initial_in_flight_batches_limit (",
options.initial_in_flight_batches_limit,
") should not be larger than num_batch_threads (",
options.num_batch_threads, ")");
}
if (options.full_batch_scheduling_boost_micros < 0) {
return errors::InvalidArgument(
"full_batch_scheduling_boost_micros can't be negative; was ",
options.full_batch_scheduling_boost_micros);
}
if (options.batches_to_average_over < 1) {
return errors::InvalidArgument(
"batches_to_average_over should be "
"greater than or equal to 1; was ",
options.batches_to_average_over);
}
if (options.target_pending <= 0) {
return errors::InvalidArgument(
"target_pending should be larger than zero; was ",
options.target_pending);
}
if (!options.get_pending_on_serial_device) {
return errors::InvalidArgument(
"get_pending_on_serial_device must be "
"specified");
}
scheduler->reset(new SerialDeviceBatchScheduler<TaskType>(options));
return absl::OkStatus();
}
template <typename TaskType>
SerialDeviceBatchScheduler<TaskType>::SerialDeviceBatchScheduler(
const Options& options)
: options_(options),
in_flight_batches_limit_(options.initial_in_flight_batches_limit),
processing_threads_(options.initial_in_flight_batches_limit) {
batch_thread_pool_.reset(new thread::ThreadPool(
env(), options.thread_pool_name, options.num_batch_threads));
for (int i = 0; i < processing_threads_; i++) {
batch_thread_pool_->Schedule(
std::bind(&SerialDeviceBatchScheduler<TaskType>::ProcessBatches, this));
}
}
template <typename TaskType>
SerialDeviceBatchScheduler<TaskType>::~SerialDeviceBatchScheduler() {
{
mutex_lock l(mu_);
processing_threads_ = 0;
}
batch_thread_pool_.reset();
}
template <typename TaskType>
Status SerialDeviceBatchScheduler<TaskType>::AddQueue(
const QueueOptions& options, BatchProcessor process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue) {
if (options.max_batch_size <= 0) {
return errors::InvalidArgument("max_batch_size must be positive; was ",
options.max_batch_size);
}
if (options.max_enqueued_batches <= 0) {
return errors::InvalidArgument(
"max_enqueued_batches must be positive; was ",
options.max_enqueued_batches);
}
internal::SDBSQueue<TaskType>* SDBS_queue_raw;
queue->reset(SDBS_queue_raw = new internal::SDBSQueue<TaskType>(
this->shared_from_this(), options));
mutex_lock l(mu_);
queues_and_callbacks_[SDBS_queue_raw] = process_batch_callback;
return absl::OkStatus();
}
template <typename TaskType>
void SerialDeviceBatchScheduler<TaskType>::AddBatch(
const internal::SDBSBatch<TaskType>* batch) {
mutex_lock l(mu_);
batches_.push_back(batch);
}
template <typename TaskType>
void SerialDeviceBatchScheduler<TaskType>::RemoveQueue(
const internal::SDBSQueue<TaskType>* queue) {
mutex_lock l(mu_);
queues_and_callbacks_.erase(queue);
}
template <typename TaskType>
void SerialDeviceBatchScheduler<TaskType>::ProcessBatches() {
const int64_t kIdleThreadSleepTimeMicros = 1000;
const double kMaxNoBatchRatio = .1;
const double kLowTrafficMovingAverageFactor = .1;
for (;;) {
mu_.lock();
if (processing_threads_ < 1 ||
processing_threads_ > in_flight_batches_limit_) {
processing_threads_--;
mu_.unlock();
break;
}
if (batches_.empty()) {
no_batch_count_++;
int64_t sleep_time = batch_period_micros_ ? batch_period_micros_
: kIdleThreadSleepTimeMicros;
mu_.unlock();
env()->SleepForMicroseconds(sleep_time);
continue;
}
auto best_it = batches_.begin();
double best_score =
(*best_it)->creation_time_micros() -
options_.full_batch_scheduling_boost_micros * (*best_it)->size() /
static_cast<double>((*best_it)->queue()->max_task_size());
for (auto it = batches_.begin() + 1; it != batches_.end(); it++) {
const double score =
(*it)->creation_time_micros() -
options_.full_batch_scheduling_boost_micros * (*it)->size() /
static_cast<double>((*it)->queue()->max_task_size());
if (score < best_score) {
best_score = score;
best_it = it;
}
}
const internal::SDBSBatch<TaskType>* batch = *best_it;
batches_.erase(best_it);
batch->queue()->ReleaseBatch(batch);
auto callback = queues_and_callbacks_[batch->queue()];
mu_.unlock();
int64_t start_time = env()->NowMicros();
callback(std::unique_ptr<Batch<TaskType>>(
const_cast<internal::SDBSBatch<TaskType>*>(batch)));
int64_t end_time = env()->NowMicros();
mu_.lock();
batch_count_++;
batch_latency_sum_ += end_time - start_time;
pending_sum_ += options_.get_pending_on_serial_device();
if (batch_count_ == options_.batches_to_average_over) {
recent_low_traffic_ratio_ *= (1 - kLowTrafficMovingAverageFactor);
if (no_batch_count_ < kMaxNoBatchRatio * batch_count_) {
double avg_pending = pending_sum_ / static_cast<double>(batch_count_);
batch_period_micros_ =
batch_latency_sum_ / batch_count_ / in_flight_batches_limit_;
in_flight_batches_limit_ +=
std::round(options_.target_pending - avg_pending);
in_flight_batches_limit_ =
std::max(in_flight_batches_limit_, int64_t{1});
in_flight_batches_limit_ =
std::min(in_flight_batches_limit_, options_.num_batch_threads);
if (processing_threads_ > 0 &&
processing_threads_ < in_flight_batches_limit_) {
int extra_threads = in_flight_batches_limit_ - processing_threads_;
for (int i = 0; i < extra_threads; i++) {
batch_thread_pool_->Schedule(std::bind(
&SerialDeviceBatchScheduler<TaskType>::ProcessBatches, this));
}
processing_threads_ = in_flight_batches_limit_;
}
} else {
recent_low_traffic_ratio_ += kLowTrafficMovingAverageFactor;
}
batch_count_ = 0;
no_batch_count_ = 0;
pending_sum_ = 0;
batch_latency_sum_ = 0;
}
mu_.unlock();
}
}
namespace internal {
template <typename TaskType>
SDBSQueue<TaskType>::SDBSQueue(
std::shared_ptr<SerialDeviceBatchScheduler<TaskType>> scheduler,
const QueueOptions& options)
: scheduler_(scheduler), options_(options) {}
template <typename TaskType>
SDBSQueue<TaskType>::~SDBSQueue() {
const int kSleepMicros = 1000;
for (;;) {
{
mutex_lock l(mu_);
if (num_enqueued_batches_ == 0) {
break;
}
}
scheduler_->env()->SleepForMicroseconds(kSleepMicros);
}
scheduler_->RemoveQueue(this);
}
template <typename TaskType>
Status SDBSQueue<TaskType>::Schedule(std::unique_ptr<TaskType>* task) {
SDBSBatch<TaskType>* new_batch = nullptr;
size_t size = (*task)->size();
if (size > options_.max_batch_size) {
return errors::InvalidArgument("Task size ", size,
" is larger than maximum batch size ",
options_.max_batch_size);
}
{
mutex_lock l(mu_);
if (current_batch_ &&
current_batch_->size() + size > options_.max_batch_size) {
if (num_enqueued_batches_ >= options_.max_enqueued_batches) {
return errors::Unavailable("The batch scheduling queue is full");
}
current_batch_->Close();
current_batch_ = nullptr;
}
if (!current_batch_) {
num_enqueued_batches_++;
current_batch_ = new_batch =
new SDBSBatch<TaskType>(this, scheduler_->env()->NowMicros());
}
current_batch_->AddTask(std::move(*task));
num_enqueued_tasks_++;
}
if (new_batch != nullptr) scheduler_->AddBatch(new_batch);
return absl::OkStatus();
}
template <typename TaskType>
void SDBSQueue<TaskType>::ReleaseBatch(const SDBSBatch<TaskType>* batch) {
mutex_lock l(mu_);
num_enqueued_batches_--;
num_enqueued_tasks_ -= batch->num_tasks();
if (batch == current_batch_) {
current_batch_->Close();
current_batch_ = nullptr;
}
}
template <typename TaskType>
size_t SDBSQueue<TaskType>::NumEnqueuedTasks() const {
mutex_lock l(mu_);
return num_enqueued_tasks_;
}
template <typename TaskType>
size_t SDBSQueue<TaskType>::SchedulingCapacity() const {
mutex_lock l(mu_);
const int current_batch_capacity =
current_batch_ ? options_.max_batch_size - current_batch_->size() : 0;
const int spare_batches =
options_.max_enqueued_batches - num_enqueued_batches_;
return spare_batches * options_.max_batch_size + current_batch_capacity;
}
}
}
}
#endif | #include "tensorflow/core/kernels/batching_util/serial_device_batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/fake_clock_env.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace serving {
namespace anonymous {
class FakeTask : public BatchTask {
public:
explicit FakeTask(size_t size) : size_(size) {}
~FakeTask() override = default;
size_t size() const override { return size_; }
private:
const size_t size_;
FakeTask(const FakeTask&) = delete;
void operator=(const FakeTask&) = delete;
};
Status ScheduleTask(size_t task_size, BatchScheduler<FakeTask>* scheduler) {
std::unique_ptr<FakeTask> task(new FakeTask(task_size));
Status status = scheduler->Schedule(&task);
CHECK_EQ(status.ok(), task == nullptr);
return status;
}
std::unique_ptr<Thread> CreateFakeClockAdvancerThread(
test_util::FakeClockEnv* env, Notification* start, Notification* stop) {
return std::unique_ptr<Thread>(Env::Default()->StartThread(
{}, "FakeClockAdvancerThread", [env, start, stop] {
start->WaitForNotification();
while (!stop->HasBeenNotified()) {
env->AdvanceByMicroseconds(10);
Env::Default()->SleepForMicroseconds(10);
}
}));
}
TEST(SerialDeviceBatchSchedulerTest, BadOptions) {
using Scheduler = SerialDeviceBatchScheduler<FakeTask>;
std::shared_ptr<Scheduler> scheduler;
Scheduler::Options default_options;
default_options.get_pending_on_serial_device = []() { return 0; };
Scheduler::Options options = default_options;
options.num_batch_threads = 0;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = default_options;
options.initial_in_flight_batches_limit = 0;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = default_options;
options.num_batch_threads = 5;
options.initial_in_flight_batches_limit = 8;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = default_options;
options.batches_to_average_over = -5;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = default_options;
options.target_pending = 0;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
}
TEST(SerialDeviceBatchSchedulerTest, InFlightBatchesLimit) {
SerialDeviceBatchScheduler<FakeTask>::Options options;
options.num_batch_threads = 3;
options.initial_in_flight_batches_limit = 2;
options.batches_to_average_over = 1000;
options.get_pending_on_serial_device = []() { return 0; };
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
EXPECT_GT(batch->num_tasks(), 0);
mu.lock();
int batch_num = ++processed_batches;
mu.unlock();
if (batch_num == 2) {
Env::Default()->SleepForMicroseconds(1000);
finish_processing.Notify();
}
if (batch_num == 3) {
ASSERT_TRUE(finish_processing.HasBeenNotified());
}
finish_processing.WaitForNotification();
};
std::shared_ptr<SerialDeviceBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
SerialDeviceBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue1;
std::unique_ptr<BatchScheduler<FakeTask>> queue2;
std::unique_ptr<BatchScheduler<FakeTask>> queue3;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue1));
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue2));
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue3));
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
TF_ASSERT_OK(ScheduleTask(100, queue2.get()));
TF_ASSERT_OK(ScheduleTask(100, queue3.get()));
}
TEST(SerialDeviceBatchSchedulerTest, PendingOnSerialDevice) {
mutex mu;
int pending;
SerialDeviceBatchScheduler<FakeTask>::Options options;
options.num_batch_threads = 3;
options.initial_in_flight_batches_limit = 1;
options.batches_to_average_over = 1;
options.target_pending = 3;
options.get_pending_on_serial_device = [&mu, &pending]() {
mutex_lock l(mu);
return pending;
};
std::shared_ptr<SerialDeviceBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
SerialDeviceBatchScheduler<FakeTask>::Create(options, &scheduler));
int processed_batches = 0;
Notification start_processing;
auto queue_callback = [&mu, &processed_batches, &start_processing, &pending,
&scheduler](std::unique_ptr<Batch<FakeTask>> batch) {
int batch_num;
{
mutex_lock l(mu);
batch_num = ++processed_batches;
}
switch (batch_num) {
case 1:
start_processing.WaitForNotification();
{
mutex_lock l(mu);
pending = 3;
}
break;
case 2:
CHECK_EQ(scheduler->in_flight_batches_limit(), 1);
{
mutex_lock l(mu);
pending = 1;
}
break;
case 3:
CHECK_EQ(scheduler->in_flight_batches_limit(), 3);
{
mutex_lock l(mu);
pending = 3;
}
break;
default:
break;
}
};
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
for (int i = 0; i < 3; i++) {
TF_ASSERT_OK(ScheduleTask(800, queue.get()));
}
start_processing.Notify();
}
TEST(SerialDeviceBatchSchedulerTest, FullBatchSchedulingBoostMicros) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
SerialDeviceBatchScheduler<FakeTask>::Options options;
options.env = &env;
options.initial_in_flight_batches_limit = 1;
options.batches_to_average_over = 1000;
options.full_batch_scheduling_boost_micros = 10;
options.get_pending_on_serial_device = []() { return 0; };
mutex mu;
int processed_batches = 0;
auto queue_callback =
[&mu, &processed_batches](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
mutex_lock l(mu);
processed_batches++;
switch (processed_batches) {
case 1:
EXPECT_EQ(1000, batch->size());
break;
case 2:
EXPECT_EQ(100, batch->size());
break;
case 3:
EXPECT_EQ(80, batch->size());
break;
default:
EXPECT_TRUE(false) << "Should only have 3 batches";
}
};
std::shared_ptr<SerialDeviceBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
SerialDeviceBatchScheduler<FakeTask>::Create(options, &scheduler));
Env::Default()->SleepForMicroseconds(1000);
SerialDeviceBatchScheduler<FakeTask>::QueueOptions queue_options;
std::unique_ptr<BatchScheduler<FakeTask>> queue1;
std::unique_ptr<BatchScheduler<FakeTask>> queue2;
std::unique_ptr<BatchScheduler<FakeTask>> queue3;
queue_options.max_batch_size = 1000;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue1));
queue_options.max_batch_size = 1000;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue2));
queue_options.max_batch_size = 100;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue3));
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
env.AdvanceByMicroseconds(3);
TF_ASSERT_OK(ScheduleTask(1000, queue2.get()));
env.AdvanceByMicroseconds(5);
TF_ASSERT_OK(ScheduleTask(80, queue3.get()));
env.AdvanceByMicroseconds(1000);
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST(SerialDeviceBatchSchedulerTest, DeleteQueue) {
SerialDeviceBatchScheduler<FakeTask>::Options options;
options.initial_in_flight_batches_limit = 1;
options.batches_to_average_over = 1000;
options.get_pending_on_serial_device = []() { return 0; };
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
EXPECT_GT(batch->num_tasks(), 0);
finish_processing.WaitForNotification();
mu.lock();
processed_batches++;
mu.unlock();
};
std::shared_ptr<SerialDeviceBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
SerialDeviceBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
for (int i = 0; i < 2; i++) {
TF_ASSERT_OK(ScheduleTask(800, queue.get()));
}
std::unique_ptr<Thread> queue_deleter(Env::Default()->StartThread(
{}, "QueueDeleterThread",
[&queue, &mu, &processed_batches, scheduler]() mutable {
queue.reset();
{
mutex_lock l(mu);
EXPECT_GT(processed_batches, 0);
}
scheduler.reset();
mutex_lock l(mu);
EXPECT_EQ(processed_batches, 2);
}));
scheduler.reset();
Env::Default()->SleepForMicroseconds(1000);
finish_processing.Notify();
}
TEST(SerialDeviceBatchSchedulerTest, DeleteScheduler) {
SerialDeviceBatchScheduler<FakeTask>::Options options;
options.initial_in_flight_batches_limit = 1;
options.batches_to_average_over = 1000;
options.get_pending_on_serial_device = []() { return 0; };
mutex mu;
int processed_batches = 0;
Notification start_processing;
Notification finish_processing;
auto queue_callback =
[&mu, &processed_batches, &start_processing,
&finish_processing](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
EXPECT_GT(batch->num_tasks(), 0);
start_processing.WaitForNotification();
mutex_lock l(mu);
processed_batches++;
if (processed_batches == 2) {
finish_processing.Notify();
}
};
std::shared_ptr<SerialDeviceBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
SerialDeviceBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
for (int i = 0; i < 2; i++) {
TF_ASSERT_OK(ScheduleTask(800, queue.get()));
}
scheduler.reset();
start_processing.Notify();
finish_processing.WaitForNotification();
}
TEST(SerialDeviceBatchSchedulerTest, QueueCapacityInfo) {
SerialDeviceBatchScheduler<FakeTask>::Options options;
options.initial_in_flight_batches_limit = 1;
options.batches_to_average_over = 1000;
options.full_batch_scheduling_boost_micros = 1000;
options.get_pending_on_serial_device = []() { return 0; };
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
EXPECT_GT(batch->num_tasks(), 0);
mu.lock();
int batch_num = ++processed_batches;
mu.unlock();
if (batch_num == 1) {
finish_processing.WaitForNotification();
}
};
std::shared_ptr<SerialDeviceBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
SerialDeviceBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue1;
std::unique_ptr<BatchScheduler<FakeTask>> queue2;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue1));
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue2));
TF_ASSERT_OK(ScheduleTask(800, queue1.get()));
TF_ASSERT_OK(ScheduleTask(100, queue2.get()));
EXPECT_EQ(queue2->NumEnqueuedTasks(), 1);
EXPECT_EQ(queue2->SchedulingCapacity(), 9 * 1000 + 900);
TF_ASSERT_OK(ScheduleTask(100, queue2.get()));
TF_ASSERT_OK(ScheduleTask(200, queue2.get()));
EXPECT_EQ(queue2->NumEnqueuedTasks(), 3);
EXPECT_EQ(queue2->SchedulingCapacity(), 9 * 1000 + 600);
TF_ASSERT_OK(ScheduleTask(700, queue2.get()));
EXPECT_EQ(queue2->NumEnqueuedTasks(), 4);
EXPECT_EQ(queue2->SchedulingCapacity(), 8 * 1000 + 300);
finish_processing.Notify();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/serial_device_batch_scheduler.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/serial_device_batch_scheduler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ff8b0f41-e158-42d4-9735-cdca12d747b8 | cpp | abseil/abseil-cpp | string_view | absl/strings/string_view.cc | absl/strings/string_view_test.cc | #include "absl/strings/string_view.h"
#ifndef ABSL_USES_STD_STRING_VIEW
#include <algorithm>
#include <climits>
#include <cstring>
#include <ostream>
#include "absl/base/nullability.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
absl::Nullable<const char*> memmatch(absl::Nullable<const char*> phaystack,
size_t haylen,
absl::Nullable<const char*> pneedle,
size_t neelen) {
if (0 == neelen) {
return phaystack;
}
if (haylen < neelen) return nullptr;
const char* match;
const char* hayend = phaystack + haylen - neelen + 1;
while (
(match = static_cast<const char*>(memchr(
phaystack, pneedle[0], static_cast<size_t>(hayend - phaystack))))) {
if (memcmp(match, pneedle, neelen) == 0)
return match;
else
phaystack = match + 1;
}
return nullptr;
}
void WritePadding(std::ostream& o, size_t pad) {
char fill_buf[32];
memset(fill_buf, o.fill(), sizeof(fill_buf));
while (pad) {
size_t n = std::min(pad, sizeof(fill_buf));
o.write(fill_buf, static_cast<std::streamsize>(n));
pad -= n;
}
}
class LookupTable {
public:
explicit LookupTable(string_view wanted) {
for (char c : wanted) {
table_[Index(c)] = true;
}
}
bool operator[](char c) const { return table_[Index(c)]; }
private:
static unsigned char Index(char c) { return static_cast<unsigned char>(c); }
bool table_[UCHAR_MAX + 1] = {};
};
}
std::ostream& operator<<(std::ostream& o, string_view piece) {
std::ostream::sentry sentry(o);
if (sentry) {
size_t lpad = 0;
size_t rpad = 0;
if (static_cast<size_t>(o.width()) > piece.size()) {
size_t pad = static_cast<size_t>(o.width()) - piece.size();
if ((o.flags() & o.adjustfield) == o.left) {
rpad = pad;
} else {
lpad = pad;
}
}
if (lpad) WritePadding(o, lpad);
o.write(piece.data(), static_cast<std::streamsize>(piece.size()));
if (rpad) WritePadding(o, rpad);
o.width(0);
}
return o;
}
string_view::size_type string_view::find(string_view s,
size_type pos) const noexcept {
if (empty() || pos > length_) {
if (empty() && pos == 0 && s.empty()) return 0;
return npos;
}
const char* result = memmatch(ptr_ + pos, length_ - pos, s.ptr_, s.length_);
return result ? static_cast<size_type>(result - ptr_) : npos;
}
string_view::size_type string_view::find(char c, size_type pos) const noexcept {
if (empty() || pos >= length_) {
return npos;
}
const char* result =
static_cast<const char*>(memchr(ptr_ + pos, c, length_ - pos));
return result != nullptr ? static_cast<size_type>(result - ptr_) : npos;
}
string_view::size_type string_view::rfind(string_view s,
size_type pos) const noexcept {
if (length_ < s.length_) return npos;
if (s.empty()) return std::min(length_, pos);
const char* last = ptr_ + std::min(length_ - s.length_, pos) + s.length_;
const char* result = std::find_end(ptr_, last, s.ptr_, s.ptr_ + s.length_);
return result != last ? static_cast<size_type>(result - ptr_) : npos;
}
string_view::size_type string_view::rfind(char c,
size_type pos) const noexcept {
if (empty()) return npos;
for (size_type i = std::min(pos, length_ - 1);; --i) {
if (ptr_[i] == c) {
return i;
}
if (i == 0) break;
}
return npos;
}
string_view::size_type string_view::find_first_of(
string_view s, size_type pos) const noexcept {
if (empty() || s.empty()) {
return npos;
}
if (s.length_ == 1) return find_first_of(s.ptr_[0], pos);
LookupTable tbl(s);
for (size_type i = pos; i < length_; ++i) {
if (tbl[ptr_[i]]) {
return i;
}
}
return npos;
}
string_view::size_type string_view::find_first_not_of(
string_view s, size_type pos) const noexcept {
if (empty()) return npos;
if (s.length_ == 1) return find_first_not_of(s.ptr_[0], pos);
LookupTable tbl(s);
for (size_type i = pos; i < length_; ++i) {
if (!tbl[ptr_[i]]) {
return i;
}
}
return npos;
}
string_view::size_type string_view::find_first_not_of(
char c, size_type pos) const noexcept {
if (empty()) return npos;
for (; pos < length_; ++pos) {
if (ptr_[pos] != c) {
return pos;
}
}
return npos;
}
string_view::size_type string_view::find_last_of(string_view s,
size_type pos) const noexcept {
if (empty() || s.empty()) return npos;
if (s.length_ == 1) return find_last_of(s.ptr_[0], pos);
LookupTable tbl(s);
for (size_type i = std::min(pos, length_ - 1);; --i) {
if (tbl[ptr_[i]]) {
return i;
}
if (i == 0) break;
}
return npos;
}
string_view::size_type string_view::find_last_not_of(
string_view s, size_type pos) const noexcept {
if (empty()) return npos;
size_type i = std::min(pos, length_ - 1);
if (s.empty()) return i;
if (s.length_ == 1) return find_last_not_of(s.ptr_[0], pos);
LookupTable tbl(s);
for (;; --i) {
if (!tbl[ptr_[i]]) {
return i;
}
if (i == 0) break;
}
return npos;
}
string_view::size_type string_view::find_last_not_of(
char c, size_type pos) const noexcept {
if (empty()) return npos;
size_type i = std::min(pos, length_ - 1);
for (;; --i) {
if (ptr_[i] != c) {
return i;
}
if (i == 0) break;
}
return npos;
}
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr string_view::size_type string_view::npos;
constexpr string_view::size_type string_view::kMaxSize;
#endif
ABSL_NAMESPACE_END
}
#else
#ifdef __APPLE__
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace strings_internal {
extern const char kAvoidEmptyStringViewLibraryWarning;
const char kAvoidEmptyStringViewLibraryWarning = 0;
}
ABSL_NAMESPACE_END
}
#endif
#endif | #include "absl/strings/string_view.h"
#include <stdlib.h>
#include <cstddef>
#include <cstdlib>
#include <cstring>
#include <iomanip>
#include <ios>
#include <iterator>
#include <limits>
#include <map>
#include <memory>
#include <sstream>
#include <string>
#include <type_traits>
#include <utility>
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/meta/type_traits.h"
#if defined(ABSL_HAVE_STD_STRING_VIEW) || defined(__ANDROID__)
#define ABSL_EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
EXPECT_DEATH_IF_SUPPORTED(statement, ".*")
#else
#define ABSL_EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
EXPECT_DEATH_IF_SUPPORTED(statement, regex)
#endif
namespace {
static_assert(!absl::type_traits_internal::IsOwner<absl::string_view>::value &&
absl::type_traits_internal::IsView<absl::string_view>::value,
"string_view is a view, not an owner");
static_assert(absl::type_traits_internal::IsLifetimeBoundAssignment<
absl::string_view, std::string>::value,
"lifetimebound assignment not detected");
template <typename T>
struct Mallocator {
typedef T value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
size_type max_size() const {
return size_t(std::numeric_limits<size_type>::max()) / sizeof(value_type);
}
template <typename U>
struct rebind {
typedef Mallocator<U> other;
};
Mallocator() = default;
template <class U>
Mallocator(const Mallocator<U>&) {}
T* allocate(size_t n) { return static_cast<T*>(std::malloc(n * sizeof(T))); }
void deallocate(T* p, size_t) { std::free(p); }
};
template <typename T, typename U>
bool operator==(const Mallocator<T>&, const Mallocator<U>&) {
return true;
}
template <typename T, typename U>
bool operator!=(const Mallocator<T>&, const Mallocator<U>&) {
return false;
}
TEST(StringViewTest, Ctor) {
{
absl::string_view s10;
EXPECT_TRUE(s10.data() == nullptr);
EXPECT_EQ(0u, s10.length());
}
{
const char* hello = "hello";
absl::string_view s20(hello);
EXPECT_TRUE(s20.data() == hello);
EXPECT_EQ(5u, s20.length());
absl::string_view s21(hello, 4);
EXPECT_TRUE(s21.data() == hello);
EXPECT_EQ(4u, s21.length());
absl::string_view s22(hello, 6);
EXPECT_TRUE(s22.data() == hello);
EXPECT_EQ(6u, s22.length());
}
{
std::string hola = "hola";
absl::string_view s30(hola);
EXPECT_TRUE(s30.data() == hola.data());
EXPECT_EQ(4u, s30.length());
hola.push_back('\0');
hola.append("h2");
hola.push_back('\0');
absl::string_view s31(hola);
EXPECT_TRUE(s31.data() == hola.data());
EXPECT_EQ(8u, s31.length());
}
{
using mstring =
std::basic_string<char, std::char_traits<char>, Mallocator<char>>;
mstring str1("BUNGIE-JUMPING!");
const mstring str2("SLEEPING!");
absl::string_view s1(str1);
s1.remove_prefix(strlen("BUNGIE-JUM"));
absl::string_view s2(str2);
s2.remove_prefix(strlen("SLEE"));
EXPECT_EQ(s1, s2);
EXPECT_EQ(s1, "PING!");
}
}
TEST(StringViewTest, Swap) {
absl::string_view a("a");
absl::string_view b("bbb");
EXPECT_TRUE(noexcept(a.swap(b)));
a.swap(b);
EXPECT_EQ(a, "bbb");
EXPECT_EQ(b, "a");
a.swap(b);
EXPECT_EQ(a, "a");
EXPECT_EQ(b, "bbb");
}
TEST(StringViewTest, STLComparator) {
std::string s1("foo");
std::string s2("bar");
std::string s3("baz");
absl::string_view p1(s1);
absl::string_view p2(s2);
absl::string_view p3(s3);
typedef std::map<absl::string_view, int> TestMap;
TestMap map;
map.insert(std::make_pair(p1, 0));
map.insert(std::make_pair(p2, 1));
map.insert(std::make_pair(p3, 2));
EXPECT_EQ(map.size(), 3u);
TestMap::const_iterator iter = map.begin();
EXPECT_EQ(iter->second, 1);
++iter;
EXPECT_EQ(iter->second, 2);
++iter;
EXPECT_EQ(iter->second, 0);
++iter;
EXPECT_TRUE(iter == map.end());
TestMap::iterator new_iter = map.find("zot");
EXPECT_TRUE(new_iter == map.end());
new_iter = map.find("bar");
EXPECT_TRUE(new_iter != map.end());
map.erase(new_iter);
EXPECT_EQ(map.size(), 2u);
iter = map.begin();
EXPECT_EQ(iter->second, 2);
++iter;
EXPECT_EQ(iter->second, 0);
++iter;
EXPECT_TRUE(iter == map.end());
}
#define COMPARE(result, op, x, y) \
EXPECT_EQ(result, absl::string_view((x)) op absl::string_view((y))); \
EXPECT_EQ(result, absl::string_view((x)).compare(absl::string_view((y))) op 0)
TEST(StringViewTest, ComparisonOperators) {
COMPARE(true, ==, "", "");
COMPARE(true, ==, "", absl::string_view());
COMPARE(true, ==, absl::string_view(), "");
COMPARE(true, ==, "a", "a");
COMPARE(true, ==, "aa", "aa");
COMPARE(false, ==, "a", "");
COMPARE(false, ==, "", "a");
COMPARE(false, ==, "a", "b");
COMPARE(false, ==, "a", "aa");
COMPARE(false, ==, "aa", "a");
COMPARE(false, !=, "", "");
COMPARE(false, !=, "a", "a");
COMPARE(false, !=, "aa", "aa");
COMPARE(true, !=, "a", "");
COMPARE(true, !=, "", "a");
COMPARE(true, !=, "a", "b");
COMPARE(true, !=, "a", "aa");
COMPARE(true, !=, "aa", "a");
COMPARE(true, <, "a", "b");
COMPARE(true, <, "a", "aa");
COMPARE(true, <, "aa", "b");
COMPARE(true, <, "aa", "bb");
COMPARE(false, <, "a", "a");
COMPARE(false, <, "b", "a");
COMPARE(false, <, "aa", "a");
COMPARE(false, <, "b", "aa");
COMPARE(false, <, "bb", "aa");
COMPARE(true, <=, "a", "a");
COMPARE(true, <=, "a", "b");
COMPARE(true, <=, "a", "aa");
COMPARE(true, <=, "aa", "b");
COMPARE(true, <=, "aa", "bb");
COMPARE(false, <=, "b", "a");
COMPARE(false, <=, "aa", "a");
COMPARE(false, <=, "b", "aa");
COMPARE(false, <=, "bb", "aa");
COMPARE(false, >=, "a", "b");
COMPARE(false, >=, "a", "aa");
COMPARE(false, >=, "aa", "b");
COMPARE(false, >=, "aa", "bb");
COMPARE(true, >=, "a", "a");
COMPARE(true, >=, "b", "a");
COMPARE(true, >=, "aa", "a");
COMPARE(true, >=, "b", "aa");
COMPARE(true, >=, "bb", "aa");
COMPARE(false, >, "a", "a");
COMPARE(false, >, "a", "b");
COMPARE(false, >, "a", "aa");
COMPARE(false, >, "aa", "b");
COMPARE(false, >, "aa", "bb");
COMPARE(true, >, "b", "a");
COMPARE(true, >, "aa", "a");
COMPARE(true, >, "b", "aa");
COMPARE(true, >, "bb", "aa");
}
TEST(StringViewTest, ComparisonOperatorsByCharacterPosition) {
std::string x;
for (size_t i = 0; i < 256; i++) {
x += 'a';
std::string y = x;
COMPARE(true, ==, x, y);
for (size_t j = 0; j < i; j++) {
std::string z = x;
z[j] = 'b';
COMPARE(false, ==, x, z);
COMPARE(true, <, x, z);
COMPARE(true, >, z, x);
if (j + 1 < i) {
z[j + 1] = 'A';
COMPARE(false, ==, x, z);
COMPARE(true, <, x, z);
COMPARE(true, >, z, x);
z[j + 1] = 'z';
COMPARE(false, ==, x, z);
COMPARE(true, <, x, z);
COMPARE(true, >, z, x);
}
}
}
}
#undef COMPARE
template <typename T>
struct is_type {
template <typename U>
static bool same(U) {
return false;
}
static bool same(T) { return true; }
};
TEST(StringViewTest, NposMatchesStdStringView) {
EXPECT_EQ(absl::string_view::npos, std::string::npos);
EXPECT_TRUE(is_type<size_t>::same(absl::string_view::npos));
EXPECT_FALSE(is_type<size_t>::same(""));
char test[absl::string_view::npos & 1] = {0};
EXPECT_EQ(0, test[0]);
}
TEST(StringViewTest, STL1) {
const absl::string_view a("abcdefghijklmnopqrstuvwxyz");
const absl::string_view b("abc");
const absl::string_view c("xyz");
const absl::string_view d("foobar");
const absl::string_view e;
std::string temp("123");
temp += '\0';
temp += "456";
const absl::string_view f(temp);
EXPECT_EQ(a[6], 'g');
EXPECT_EQ(b[0], 'a');
EXPECT_EQ(c[2], 'z');
EXPECT_EQ(f[3], '\0');
EXPECT_EQ(f[5], '5');
EXPECT_EQ(*d.data(), 'f');
EXPECT_EQ(d.data()[5], 'r');
EXPECT_TRUE(e.data() == nullptr);
EXPECT_EQ(*a.begin(), 'a');
EXPECT_EQ(*(b.begin() + 2), 'c');
EXPECT_EQ(*(c.end() - 1), 'z');
EXPECT_EQ(*a.rbegin(), 'z');
EXPECT_EQ(*(b.rbegin() + 2), 'a');
EXPECT_EQ(*(c.rend() - 1), 'x');
EXPECT_TRUE(a.rbegin() + 26 == a.rend());
EXPECT_EQ(a.size(), 26u);
EXPECT_EQ(b.size(), 3u);
EXPECT_EQ(c.size(), 3u);
EXPECT_EQ(d.size(), 6u);
EXPECT_EQ(e.size(), 0u);
EXPECT_EQ(f.size(), 7u);
EXPECT_TRUE(!d.empty());
EXPECT_TRUE(d.begin() != d.end());
EXPECT_TRUE(d.begin() + 6 == d.end());
EXPECT_TRUE(e.empty());
EXPECT_TRUE(e.begin() == e.end());
char buf[4] = { '%', '%', '%', '%' };
EXPECT_EQ(a.copy(buf, 4), 4u);
EXPECT_EQ(buf[0], a[0]);
EXPECT_EQ(buf[1], a[1]);
EXPECT_EQ(buf[2], a[2]);
EXPECT_EQ(buf[3], a[3]);
EXPECT_EQ(a.copy(buf, 3, 7), 3u);
EXPECT_EQ(buf[0], a[7]);
EXPECT_EQ(buf[1], a[8]);
EXPECT_EQ(buf[2], a[9]);
EXPECT_EQ(buf[3], a[3]);
EXPECT_EQ(c.copy(buf, 99), 3u);
EXPECT_EQ(buf[0], c[0]);
EXPECT_EQ(buf[1], c[1]);
EXPECT_EQ(buf[2], c[2]);
EXPECT_EQ(buf[3], a[3]);
#ifdef ABSL_HAVE_EXCEPTIONS
EXPECT_THROW(a.copy(buf, 1, 27), std::out_of_range);
#else
ABSL_EXPECT_DEATH_IF_SUPPORTED(a.copy(buf, 1, 27), "absl::string_view::copy");
#endif
}
TEST(StringViewTest, STL2) {
const absl::string_view a("abcdefghijklmnopqrstuvwxyz");
const absl::string_view b("abc");
const absl::string_view c("xyz");
absl::string_view d("foobar");
const absl::string_view e;
const absl::string_view f(
"123"
"\0"
"456",
7);
d = absl::string_view();
EXPECT_EQ(d.size(), 0u);
EXPECT_TRUE(d.empty());
EXPECT_TRUE(d.data() == nullptr);
EXPECT_TRUE(d.begin() == d.end());
EXPECT_EQ(a.find(b), 0u);
EXPECT_EQ(a.find(b, 1), absl::string_view::npos);
EXPECT_EQ(a.find(c), 23u);
EXPECT_EQ(a.find(c, 9), 23u);
EXPECT_EQ(a.find(c, absl::string_view::npos), absl::string_view::npos);
EXPECT_EQ(b.find(c), absl::string_view::npos);
EXPECT_EQ(b.find(c, absl::string_view::npos), absl::string_view::npos);
EXPECT_EQ(a.find(d), 0u);
EXPECT_EQ(a.find(e), 0u);
EXPECT_EQ(a.find(d, 12), 12u);
EXPECT_EQ(a.find(e, 17), 17u);
absl::string_view g("xx not found bb");
EXPECT_EQ(a.find(g), absl::string_view::npos);
EXPECT_EQ(d.find(b), absl::string_view::npos);
EXPECT_EQ(e.find(b), absl::string_view::npos);
EXPECT_EQ(d.find(b, 4), absl::string_view::npos);
EXPECT_EQ(e.find(b, 7), absl::string_view::npos);
size_t empty_search_pos = std::string().find(std::string());
EXPECT_EQ(d.find(d), empty_search_pos);
EXPECT_EQ(d.find(e), empty_search_pos);
EXPECT_EQ(e.find(d), empty_search_pos);
EXPECT_EQ(e.find(e), empty_search_pos);
EXPECT_EQ(d.find(d, 4), std::string().find(std::string(), 4));
EXPECT_EQ(d.find(e, 4), std::string().find(std::string(), 4));
EXPECT_EQ(e.find(d, 4), std::string().find(std::string(), 4));
EXPECT_EQ(e.find(e, 4), std::string().find(std::string(), 4));
EXPECT_EQ(a.find('a'), 0u);
EXPECT_EQ(a.find('c'), 2u);
EXPECT_EQ(a.find('z'), 25u);
EXPECT_EQ(a.find('$'), absl::string_view::npos);
EXPECT_EQ(a.find('\0'), absl::string_view::npos);
EXPECT_EQ(f.find('\0'), 3u);
EXPECT_EQ(f.find('3'), 2u);
EXPECT_EQ(f.find('5'), 5u);
EXPECT_EQ(g.find('o'), 4u);
EXPECT_EQ(g.find('o', 4), 4u);
EXPECT_EQ(g.find('o', 5), 8u);
EXPECT_EQ(a.find('b', 5), absl::string_view::npos);
EXPECT_EQ(d.find('\0'), absl::string_view::npos);
EXPECT_EQ(e.find('\0'), absl::string_view::npos);
EXPECT_EQ(d.find('\0', 4), absl::string_view::npos);
EXPECT_EQ(e.find('\0', 7), absl::string_view::npos);
EXPECT_EQ(d.find('x'), absl::string_view::npos);
EXPECT_EQ(e.find('x'), absl::string_view::npos);
EXPECT_EQ(d.find('x', 4), absl::string_view::npos);
EXPECT_EQ(e.find('x', 7), absl::string_view::npos);
EXPECT_EQ(a.find(b.data(), 1, 0), 1u);
EXPECT_EQ(a.find(c.data(), 9, 0), 9u);
EXPECT_EQ(a.find(c.data(), absl::string_view::npos, 0),
absl::string_view::npos);
EXPECT_EQ(b.find(c.data(), absl::string_view::npos, 0),
absl::string_view::npos);
EXPECT_EQ(d.find(b.data(), 4, 0), absl::string_view::npos);
EXPECT_EQ(e.find(b.data(), 7, 0), absl::string_view::npos);
EXPECT_EQ(a.find(b.data(), 1), absl::string_view::npos);
EXPECT_EQ(a.find(c.data(), 9), 23u);
EXPECT_EQ(a.find(c.data(), absl::string_view::npos), absl::string_view::npos);
EXPECT_EQ(b.find(c.data(), absl::string_view::npos), absl::string_view::npos);
EXPECT_EQ(d.find(b.data(), 4), absl::string_view::npos);
EXPECT_EQ(e.find(b.data(), 7), absl::string_view::npos);
EXPECT_EQ(a.rfind(b), 0u);
EXPECT_EQ(a.rfind(b, 1), 0u);
EXPECT_EQ(a.rfind(c), 23u);
EXPECT_EQ(a.rfind(c, 22), absl::string_view::npos);
EXPECT_EQ(a.rfind(c, 1), absl::string_view::npos);
EXPECT_EQ(a.rfind(c, 0), absl::string_view::npos);
EXPECT_EQ(b.rfind(c), absl::string_view::npos);
EXPECT_EQ(b.rfind(c, 0), absl::string_view::npos);
EXPECT_EQ(a.rfind(d), std::string(a).rfind(std::string()));
EXPECT_EQ(a.rfind(e), std::string(a).rfind(std::string()));
EXPECT_EQ(a.rfind(d, 12), 12u);
EXPECT_EQ(a.rfind(e, 17), 17u);
EXPECT_EQ(a.rfind(g), absl::string_view::npos);
EXPECT_EQ(d.rfind(b), absl::string_view::npos);
EXPECT_EQ(e.rfind(b), absl::string_view::npos);
EXPECT_EQ(d.rfind(b, 4), absl::string_view::npos);
EXPECT_EQ(e.rfind(b, 7), absl::string_view::npos);
EXPECT_EQ(d.rfind(d, 4), std::string().rfind(std::string()));
EXPECT_EQ(e.rfind(d, 7), std::string().rfind(std::string()));
EXPECT_EQ(d.rfind(e, 4), std::string().rfind(std::string()));
EXPECT_EQ(e.rfind(e, 7), std::string().rfind(std::string()));
EXPECT_EQ(d.rfind(d), std::string().rfind(std::string()));
EXPECT_EQ(e.rfind(d), std::string().rfind(std::string()));
EXPECT_EQ(d.rfind(e), std::string().rfind(std::string()));
EXPECT_EQ(e.rfind(e), std::string().rfind(std::string()));
EXPECT_EQ(g.rfind('o'), 8u);
EXPECT_EQ(g.rfind('q'), absl::string_view::npos);
EXPECT_EQ(g.rfind('o', 8), 8u);
EXPECT_EQ(g.rfind('o', 7), 4u);
EXPECT_EQ(g.rfind('o', 3), absl::string_view::npos);
EXPECT_EQ(f.rfind('\0'), 3u);
EXPECT_EQ(f.rfind('\0', 12), 3u);
EXPECT_EQ(f.rfind('3'), 2u);
EXPECT_EQ(f.rfind('5'), 5u);
EXPECT_EQ(d.rfind('o'), absl::string_view::npos);
EXPECT_EQ(e.rfind('o'), absl::string_view::npos);
EXPECT_EQ(d.rfind('o', 4), absl::string_view::npos);
EXPECT_EQ(e.rfind('o', 7), absl::string_view::npos);
EXPECT_EQ(a.rfind(b.data(), 1, 0), 1u);
EXPECT_EQ(a.rfind(c.data(), 22, 0), 22u);
EXPECT_EQ(a.rfind(c.data(), 1, 0), 1u);
EXPECT_EQ(a.rfind(c.data(), 0, 0), 0u);
EXPECT_EQ(b.rfind(c.data(), 0, 0), 0u);
EXPECT_EQ(d.rfind(b.data(), 4, 0), 0u);
EXPECT_EQ(e.rfind(b.data(), 7, 0), 0u);
}
TEST(StringViewTest, STL2FindFirst) {
const absl::string_view a("abcdefghijklmnopqrstuvwxyz");
const absl::string_view b("abc");
const absl::string_view c("xyz");
absl::string_view d("foobar");
const absl::string_view e;
const absl::string_view f(
"123"
"\0"
"456",
7);
absl::string_view g("xx not found bb");
d = absl::string_view();
EXPECT_EQ(a.find_first_of(b), 0u);
EXPECT_EQ(a.find_first_of(b, 0), 0u);
EXPECT_EQ(a.find_first_of(b, 1), 1u);
EXPECT_EQ(a.find_first_of(b, 2), 2u);
EXPECT_EQ(a.find_first_of(b, 3), absl::string_view::npos);
EXPECT_EQ(a.find_first_of(c), 23u);
EXPECT_EQ(a.find_first_of(c, 23), 23u);
EXPECT_EQ(a.find_first_of(c, 24), 24u);
EXPECT_EQ(a.find_first_of(c, 25), 25u);
EXPECT_EQ(a.find_first_of(c, 26), absl::string_view::npos);
EXPECT_EQ(g.find_first_of(b), 13u);
EXPECT_EQ(g.find_first_of(c), 0u);
EXPECT_EQ(a.find_first_of(f), absl::string_view::npos);
EXPECT_EQ(f.find_first_of(a), absl::string_view::npos);
EXPECT_EQ(a.find_first_of(d), absl::string_view::npos);
EXPECT_EQ(a.find_first_of(e), absl::string_view::npos);
EXPECT_EQ(d.find_first_of(b), absl::string_view::npos);
EXPECT_EQ(e.find_first_of(b), absl::string_view::npos);
EXPECT_EQ(d.find_first_of(d), absl::string_view::npos);
EXPECT_EQ(e.find_first_of(d), absl::string_view::npos);
EXPECT_EQ(d.find_first_of(e), absl::string_view::npos);
EXPECT_EQ(e.find_first_of(e), absl::string_view::npos);
EXPECT_EQ(a.find_first_not_of(b), 3u);
EXPECT_EQ(a.find_first_not_of(c), 0u);
EXPECT_EQ(b.find_first_not_of(a), absl::string_view::npos);
EXPECT_EQ(c.find_first_not_of(a), absl::string_view::npos);
EXPECT_EQ(f.find_first_not_of(a), 0u);
EXPECT_EQ(a.find_first_not_of(f), 0u);
EXPECT_EQ(a.find_first_not_of(d), 0u);
EXPECT_EQ(a.find_first_not_of(e), 0u);
EXPECT_EQ(a.find_first_not_of(d), 0u);
EXPECT_EQ(a.find_first_not_of(e), 0u);
EXPECT_EQ(a.find_first_not_of(d, 1), 1u);
EXPECT_EQ(a.find_first_not_of(e, 1), 1u);
EXPECT_EQ(a.find_first_not_of(d, a.size() - 1), a.size() - 1);
EXPECT_EQ(a.find_first_not_of(e, a.size() - 1), a.size() - 1);
EXPECT_EQ(a.find_first_not_of(d, a.size()), absl::string_view::npos);
EXPECT_EQ(a.find_first_not_of(e, a.size()), absl::string_view::npos);
EXPECT_EQ(a.find_first_not_of(d, absl::string_view::npos),
absl::string_view::npos);
EXPECT_EQ(a.find_first_not_of(e, absl::string_view::npos),
absl::string_view::npos);
EXPECT_EQ(d.find_first_not_of(a), absl::string_view::npos);
EXPECT_EQ(e.find_first_not_of(a), absl::string_view::npos);
EXPECT_EQ(d.find_first_not_of(d), absl::string_view::npos);
EXPECT_EQ(e.find_first_not_of(d), absl::string_view::npos);
EXPECT_EQ(d.find_first_not_of(e), absl::string_view::npos);
EXPECT_EQ(e.find_first_not_of(e), absl::string_view::npos);
absl::string_view h("====");
EXPECT_EQ(h.find_first_not_of('='), absl::string_view::npos);
EXPECT_EQ(h.find_first_not_of('=', 3), absl::string_view::npos);
EXPECT_EQ(h.find_first_not_of('\0'), 0u);
EXPECT_EQ(g.find_first_not_of('x'), 2u);
EXPECT_EQ(f.find_first_not_of('\0'), 0u);
EXPECT_EQ(f.find_first_not_of('\0', 3), 4u);
EXPECT_EQ(f.find_first_not_of('\0', 2), 2u);
EXPECT_EQ(d.find_first_not_of('x'), absl::string_view::npos);
EXPECT_EQ(e.find_first_not_of('x'), absl::string_view::npos);
EXPECT_EQ(d.find_first_not_of('\0'), absl::string_view::npos);
EXPECT_EQ(e.find_first_not_of('\0'), absl::string_view::npos);
}
TEST(StringViewTest, STL2FindLast) {
const absl::string_view a("abcdefghijklmnopqrstuvwxyz");
const absl::string_view b("abc");
const absl::string_view c("xyz");
absl::string_view d("foobar");
const absl::string_view e;
const absl::string_view f(
"123"
"\0"
"456",
7);
absl::string_view g("xx not found bb");
absl::string_view h("====");
absl::string_view i("56");
d = absl::string_view();
EXPECT_EQ(h.find_last_of(a), absl::string_view::npos);
EXPECT_EQ(g.find_last_of(a), g.size() - 1);
EXPECT_EQ(a.find_last_of(b), 2u);
EXPECT_EQ(a.find_last_of(c), a.size() - 1);
EXPECT_EQ(f.find_last_of(i), 6u);
EXPECT_EQ(a.find_last_of('a'), 0u);
EXPECT_EQ(a.find_last_of('b'), 1u);
EXPECT_EQ(a.find_last_of('z'), 25u);
EXPECT_EQ(a.find_last_of('a', 5), 0u);
EXPECT_EQ(a.find_last_of('b', 5), 1u);
EXPECT_EQ(a.find_last_of('b', 0), absl::string_view::npos);
EXPECT_EQ(a.find_last_of('z', 25), 25u);
EXPECT_EQ(a.find_last_of('z', 24), absl::string_view::npos);
EXPECT_EQ(f.find_last_of(i, 5), 5u);
EXPECT_EQ(f.find_last_of(i, 6), 6u);
EXPECT_EQ(f.find_last_of(a, 4), absl::string_view::npos);
EXPECT_EQ(f.find_last_of(d), absl::string_view::npos);
EXPECT_EQ(f.find_last_of(e), absl::string_view::npos);
EXPECT_EQ(f.find_last_of(d, 4), absl::string_view::npos);
EXPECT_EQ(f.find_last_of(e, 4), absl::string_view::npos);
EXPECT_EQ(d.find_last_of(d), absl::string_view::npos);
EXPECT_EQ(d.find_last_of(e), absl::string_view::npos);
EXPECT_EQ(e.find_last_of(d), absl::string_view::npos);
EXPECT_EQ(e.find_last_of(e), absl::string_view::npos);
EXPECT_EQ(d.find_last_of(f), absl::string_view::npos);
EXPECT_EQ(e.find_last_of(f), absl::string_view::npos);
EXPECT_EQ(d.find_last_of(d, 4), absl::string_view::npos);
EXPECT_EQ(d.find_last_of(e, 4), absl::string_view::npos);
EXPECT_EQ(e.find_last_of(d, 4), absl::string_view::npos);
EXPECT_EQ(e.find_last_of(e, 4), absl::string_view::npos);
EXPECT_EQ(d.find_last_of(f, 4), absl::string_view::npos);
EXPECT_EQ(e.find_last_of(f, 4), absl::string_view::npos);
EXPECT_EQ(a.find_last_not_of(b), a.size() - 1);
EXPECT_EQ(a.find_last_not_of(c), 22u);
EXPECT_EQ(b.find_last_not_of(a), absl::string_view::npos);
EXPECT_EQ(b.find_last_not_of(b), absl::string_view::npos);
EXPECT_EQ(f.find_last_not_of(i), 4u);
EXPECT_EQ(a.find_last_not_of(c, 24), 22u);
EXPECT_EQ(a.find_last_not_of(b, 3), 3u);
EXPECT_EQ(a.find_last_not_of(b, 2), absl::string_view::npos);
EXPECT_EQ(f.find_last_not_of(d), f.size() - 1);
EXPECT_EQ(f.find_last_not_of(e), f.size() - 1);
EXPECT_EQ(f.find_last_not_of(d, 4), 4u);
EXPECT_EQ(f.find_last_not_of(e, 4), 4u);
EXPECT_EQ(d.find_last_not_of(d), absl::string_view::npos);
EXPECT_EQ(d.find_last_not_of(e), absl::string_view::npos);
EXPECT_EQ(e.find_last_not_of(d), absl::string_view::npos);
EXPECT_EQ(e.find_last_not_of(e), absl::string_view::npos);
EXPECT_EQ(d.find_last_not_of(f), absl::string_view::npos);
EXPECT_EQ(e.find_last_not_of(f), absl::string_view::npos);
EXPECT_EQ(d.find_last_not_of(d, 4), absl::string_view::npos);
EXPECT_EQ(d.find_last_not_of(e, 4), absl::string_view::npos);
EXPECT_EQ(e.find_last_not_of(d, 4), absl::string_view::npos);
EXPECT_EQ(e.find_last_not_of(e, 4), absl::string_view::npos);
EXPECT_EQ(d.find_last_not_of(f, 4), absl::string_view::npos);
EXPECT_EQ(e.find_last_not_of(f, 4), absl::string_view::npos);
EXPECT_EQ(h.find_last_not_of('x'), h.size() - 1);
EXPECT_EQ(h.find_last_not_of('='), absl::string_view::npos);
EXPECT_EQ(b.find_last_not_of('c'), 1u);
EXPECT_EQ(h.find_last_not_of('x', 2), 2u);
EXPECT_EQ(h.find_last_not_of('=', 2), absl::string_view::npos);
EXPECT_EQ(b.find_last_not_of('b', 1), 0u);
EXPECT_EQ(d.find_last_not_of('x'), absl::string_view::npos);
EXPECT_EQ(e.find_last_not_of('x'), absl::string_view::npos);
EXPECT_EQ(d.find_last_not_of('\0'), absl::string_view::npos);
EXPECT_EQ(e.find_last_not_of('\0'), absl::string_view::npos);
}
TEST(StringViewTest, STL2Substr) {
const absl::string_view a("abcdefghijklmnopqrstuvwxyz");
const absl::string_view b("abc");
const absl::string_view c("xyz");
absl::string_view d("foobar");
const absl::string_view e;
d = absl::string_view();
EXPECT_EQ(a.substr(0, 3), b);
EXPECT_EQ(a.substr(23), c);
EXPECT_EQ(a.substr(23, 3), c);
EXPECT_EQ(a.substr(23, 99), c);
EXPECT_EQ(a.substr(0), a);
EXPECT_EQ(a.substr(), a);
EXPECT_EQ(a.substr(3, 2), "de");
EXPECT_EQ(d.substr(0, 99), e);
EXPECT_EQ(a.substr(0, absl::string_view::npos), a);
EXPECT_EQ(a.substr(23, absl::string_view::npos), c);
#ifdef ABSL_HAVE_EXCEPTIONS
EXPECT_THROW((void)a.substr(99, 2), std::out_of_range);
#else
ABSL_EXPECT_DEATH_IF_SUPPORTED((void)a.substr(99, 2),
"absl::string_view::substr");
#endif
}
TEST(StringViewTest, TruncSubstr) {
const absl::string_view hi("hi");
EXPECT_EQ("", absl::ClippedSubstr(hi, 0, 0));
EXPECT_EQ("h", absl::ClippedSubstr(hi, 0, 1));
EXPECT_EQ("hi", absl::ClippedSubstr(hi, 0));
EXPECT_EQ("i", absl::ClippedSubstr(hi, 1));
EXPECT_EQ("", absl::ClippedSubstr(hi, 2));
EXPECT_EQ("", absl::ClippedSubstr(hi, 3));
EXPECT_EQ("", absl::ClippedSubstr(hi, 3, 2));
}
TEST(StringViewTest, UTF8) {
std::string utf8 = "\u00E1";
std::string utf8_twice = utf8 + " " + utf8;
size_t utf8_len = strlen(utf8.data());
EXPECT_EQ(utf8_len, absl::string_view(utf8_twice).find_first_of(" "));
EXPECT_EQ(utf8_len, absl::string_view(utf8_twice).find_first_of(" \t"));
}
TEST(StringViewTest, FindConformance) {
struct {
std::string haystack;
std::string needle;
} specs[] = {
{"", ""},
{"", "a"},
{"a", ""},
{"a", "a"},
{"a", "b"},
{"aa", ""},
{"aa", "a"},
{"aa", "b"},
{"ab", "a"},
{"ab", "b"},
{"abcd", ""},
{"abcd", "a"},
{"abcd", "d"},
{"abcd", "ab"},
{"abcd", "bc"},
{"abcd", "cd"},
{"abcd", "abcd"},
};
for (const auto& s : specs) {
SCOPED_TRACE(s.haystack);
SCOPED_TRACE(s.needle);
std::string st = s.haystack;
absl::string_view sp = s.haystack;
for (size_t i = 0; i <= sp.size(); ++i) {
size_t pos = (i == sp.size()) ? absl::string_view::npos : i;
SCOPED_TRACE(pos);
EXPECT_EQ(sp.find(s.needle, pos),
st.find(s.needle, pos));
EXPECT_EQ(sp.rfind(s.needle, pos),
st.rfind(s.needle, pos));
EXPECT_EQ(sp.find_first_of(s.needle, pos),
st.find_first_of(s.needle, pos));
EXPECT_EQ(sp.find_first_not_of(s.needle, pos),
st.find_first_not_of(s.needle, pos));
EXPECT_EQ(sp.find_last_of(s.needle, pos),
st.find_last_of(s.needle, pos));
EXPECT_EQ(sp.find_last_not_of(s.needle, pos),
st.find_last_not_of(s.needle, pos));
}
}
}
TEST(StringViewTest, Remove) {
absl::string_view a("foobar");
std::string s1("123");
s1 += '\0';
s1 += "456";
absl::string_view e;
std::string s2;
absl::string_view c(a);
c.remove_prefix(3);
EXPECT_EQ(c, "bar");
c = a;
c.remove_prefix(0);
EXPECT_EQ(c, a);
c.remove_prefix(c.size());
EXPECT_EQ(c, e);
c = a;
c.remove_suffix(3);
EXPECT_EQ(c, "foo");
c = a;
c.remove_suffix(0);
EXPECT_EQ(c, a);
c.remove_suffix(c.size());
EXPECT_EQ(c, e);
}
TEST(StringViewTest, Set) {
absl::string_view a("foobar");
absl::string_view empty;
absl::string_view b;
b = absl::string_view("foobar", 6);
EXPECT_EQ(b, a);
b = absl::string_view("foobar", 0);
EXPECT_EQ(b, empty);
b = absl::string_view("foobar", 7);
EXPECT_NE(b, a);
b = absl::string_view("foobar");
EXPECT_EQ(b, a);
}
TEST(StringViewTest, FrontBack) {
static const char arr[] = "abcd";
const absl::string_view csp(arr, 4);
EXPECT_EQ(&arr[0], &csp.front());
EXPECT_EQ(&arr[3], &csp.back());
}
TEST(StringViewTest, FrontBackSingleChar) {
static const char c = 'a';
const absl::string_view csp(&c, 1);
EXPECT_EQ(&c, &csp.front());
EXPECT_EQ(&c, &csp.back());
}
TEST(StringViewTest, FrontBackEmpty) {
#ifndef ABSL_USES_STD_STRING_VIEW
#if !defined(NDEBUG) || ABSL_OPTION_HARDENED
absl::string_view sv;
ABSL_EXPECT_DEATH_IF_SUPPORTED(sv.front(), "");
ABSL_EXPECT_DEATH_IF_SUPPORTED(sv.back(), "");
#endif
#endif
}
#if !defined(ABSL_USES_STD_STRING_VIEW) || \
(!(defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE >= 9) && \
!defined(_LIBCPP_VERSION) && !defined(_MSC_VER))
#define ABSL_HAVE_STRING_VIEW_FROM_NULLPTR 1
#endif
TEST(StringViewTest, NULLInput) {
absl::string_view s;
EXPECT_EQ(s.data(), nullptr);
EXPECT_EQ(s.size(), 0u);
#ifdef ABSL_HAVE_STRING_VIEW_FROM_NULLPTR
char* null_str = nullptr;
s = absl::string_view(null_str);
EXPECT_EQ(s.data(), nullptr);
EXPECT_EQ(s.size(), 0u);
EXPECT_EQ("", std::string(s));
#endif
}
TEST(StringViewTest, Comparisons2) {
absl::string_view abc("abcdefghijklmnopqrstuvwxyz");
EXPECT_EQ(abc, absl::string_view("abcdefghijklmnopqrstuvwxyz"));
EXPECT_EQ(abc.compare(absl::string_view("abcdefghijklmnopqrstuvwxyz")), 0);
EXPECT_LT(abc, absl::string_view("abcdefghijklmnopqrstuvwxzz"));
EXPECT_LT(abc.compare(absl::string_view("abcdefghijklmnopqrstuvwxzz")), 0);
EXPECT_GT(abc, absl::string_view("abcdefghijklmnopqrstuvwxyy"));
EXPECT_GT(abc.compare(absl::string_view("abcdefghijklmnopqrstuvwxyy")), 0);
absl::string_view digits("0123456789");
auto npos = absl::string_view::npos;
EXPECT_EQ(digits.compare(3, npos, absl::string_view("3456789")), 0);
EXPECT_EQ(digits.compare(3, 4, absl::string_view("3456")), 0);
EXPECT_EQ(digits.compare(10, 0, absl::string_view()), 0);
EXPECT_EQ(digits.compare(3, 4, absl::string_view("0123456789"), 3, 4),
0);
EXPECT_LT(digits.compare(3, 4, absl::string_view("0123456789"), 3, 5),
0);
EXPECT_LT(digits.compare(0, npos, absl::string_view("0123456789"), 3, 5),
0);
EXPECT_EQ(digits.compare(3, 4, "3456"), 0);
EXPECT_EQ(digits.compare(3, npos, "3456789"), 0);
EXPECT_EQ(digits.compare(10, 0, ""), 0);
EXPECT_EQ(digits.compare(3, 4, "0123456789", 3, 4), 0);
EXPECT_LT(digits.compare(3, 4, "0123456789", 3, 5), 0);
EXPECT_LT(digits.compare(0, npos, "0123456789", 3, 5), 0);
}
TEST(StringViewTest, At) {
absl::string_view abc = "abc";
EXPECT_EQ(abc.at(0), 'a');
EXPECT_EQ(abc.at(1), 'b');
EXPECT_EQ(abc.at(2), 'c');
#ifdef ABSL_HAVE_EXCEPTIONS
EXPECT_THROW((void)abc.at(3), std::out_of_range);
#else
ABSL_EXPECT_DEATH_IF_SUPPORTED((void)abc.at(3), "absl::string_view::at");
#endif
}
#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
TEST(StringViewTest, StartsWith) {
const absl::string_view a("foobar");
const absl::string_view b("123\0abc", 7);
const absl::string_view e;
EXPECT_TRUE(a.starts_with(a));
EXPECT_TRUE(a.starts_with("foo"));
EXPECT_TRUE(a.starts_with('f'));
EXPECT_TRUE(a.starts_with(e));
EXPECT_TRUE(b.starts_with(b));
EXPECT_TRUE(b.starts_with('1'));
EXPECT_TRUE(b.starts_with(e));
EXPECT_TRUE(e.starts_with(""));
EXPECT_FALSE(a.starts_with(b));
EXPECT_FALSE(b.starts_with(a));
EXPECT_FALSE(e.starts_with(a));
EXPECT_FALSE(a.starts_with('r'));
EXPECT_FALSE(a.starts_with('\0'));
EXPECT_FALSE(e.starts_with('r'));
EXPECT_FALSE(e.starts_with('\0'));
constexpr absl::string_view kFooBar("foobar");
constexpr absl::string_view kFoo("foo");
constexpr absl::string_view kBar("bar");
constexpr bool k1 = kFooBar.starts_with(kFoo);
EXPECT_TRUE(k1);
constexpr bool k2 = kFooBar.starts_with(kBar);
EXPECT_FALSE(k2);
constexpr bool k3 = kFooBar.starts_with('f');
EXPECT_TRUE(k3);
constexpr bool k4 = kFooBar.starts_with("fo");
EXPECT_TRUE(k4);
}
TEST(StringViewTest, EndsWith) {
const absl::string_view a("foobar");
const absl::string_view b("123\0abc", 7);
const absl::string_view e;
EXPECT_TRUE(a.ends_with(a));
EXPECT_TRUE(a.ends_with('r'));
EXPECT_TRUE(a.ends_with("bar"));
EXPECT_TRUE(a.ends_with(e));
EXPECT_TRUE(b.ends_with(b));
EXPECT_TRUE(b.ends_with('c'));
EXPECT_TRUE(b.ends_with(e));
EXPECT_TRUE(e.ends_with(""));
EXPECT_FALSE(a.ends_with(b));
EXPECT_FALSE(b.ends_with(a));
EXPECT_FALSE(e.ends_with(a));
EXPECT_FALSE(a.ends_with('f'));
EXPECT_FALSE(a.ends_with('\0'));
EXPECT_FALSE(e.ends_with('r'));
EXPECT_FALSE(e.ends_with('\0'));
constexpr absl::string_view kFooBar("foobar");
constexpr absl::string_view kFoo("foo");
constexpr absl::string_view kBar("bar");
constexpr bool k1 = kFooBar.ends_with(kFoo);
EXPECT_FALSE(k1);
constexpr bool k2 = kFooBar.ends_with(kBar);
EXPECT_TRUE(k2);
constexpr bool k3 = kFooBar.ends_with('r');
EXPECT_TRUE(k3);
constexpr bool k4 = kFooBar.ends_with("ar");
EXPECT_TRUE(k4);
}
#endif
struct MyCharAlloc : std::allocator<char> {};
TEST(StringViewTest, ExplicitConversionOperator) {
absl::string_view sp = "hi";
EXPECT_EQ(sp, std::string(sp));
}
TEST(StringViewTest, NullSafeStringView) {
{
absl::string_view s = absl::NullSafeStringView(nullptr);
EXPECT_EQ(nullptr, s.data());
EXPECT_EQ(0u, s.size());
EXPECT_EQ(absl::string_view(), s);
}
{
static const char kHi[] = "hi";
absl::string_view s = absl::NullSafeStringView(kHi);
EXPECT_EQ(kHi, s.data());
EXPECT_EQ(strlen(kHi), s.size());
EXPECT_EQ(absl::string_view("hi"), s);
}
}
TEST(StringViewTest, ConstexprNullSafeStringView) {
{
constexpr absl::string_view s = absl::NullSafeStringView(nullptr);
EXPECT_EQ(nullptr, s.data());
EXPECT_EQ(0u, s.size());
EXPECT_EQ(absl::string_view(), s);
}
{
static constexpr char kHi[] = "hi";
absl::string_view s = absl::NullSafeStringView(kHi);
EXPECT_EQ(kHi, s.data());
EXPECT_EQ(strlen(kHi), s.size());
EXPECT_EQ(absl::string_view("hi"), s);
}
{
constexpr absl::string_view s = absl::NullSafeStringView("hello");
EXPECT_EQ(s.size(), 5u);
EXPECT_EQ("hello", s);
}
}
TEST(StringViewTest, ConstexprCompiles) {
constexpr absl::string_view sp;
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wnonnull"
#endif
#ifdef ABSL_HAVE_STRING_VIEW_FROM_NULLPTR
constexpr absl::string_view cstr(nullptr);
#endif
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
constexpr absl::string_view cstr_len("cstr", 4);
#if defined(ABSL_USES_STD_STRING_VIEW)
#if !defined(__GLIBCXX__)
#define ABSL_HAVE_CONSTEXPR_STRING_VIEW_FROM_CSTR 1
#endif
#else
#if ABSL_HAVE_BUILTIN(__builtin_strlen) || \
(defined(__GNUC__) && !defined(__clang__))
#define ABSL_HAVE_CONSTEXPR_STRING_VIEW_FROM_CSTR 1
#elif defined(__GNUC__)
#error GCC/clang should have constexpr string_view.
#endif
#if defined(_MSC_VER) && _MSC_VER >= 1910
#define ABSL_HAVE_CONSTEXPR_STRING_VIEW_FROM_CSTR 1
#endif
#endif
#ifdef ABSL_HAVE_CONSTEXPR_STRING_VIEW_FROM_CSTR
constexpr absl::string_view cstr_strlen("foo");
EXPECT_EQ(cstr_strlen.length(), 3u);
constexpr absl::string_view cstr_strlen2 = "bar";
EXPECT_EQ(cstr_strlen2, "bar");
#if ABSL_HAVE_BUILTIN(__builtin_memcmp) || \
(defined(__GNUC__) && !defined(__clang__))
#define ABSL_HAVE_CONSTEXPR_STRING_VIEW_COMPARISON 1
#endif
#ifdef ABSL_HAVE_CONSTEXPR_STRING_VIEW_COMPARISON
constexpr absl::string_view foo = "foo";
constexpr absl::string_view bar = "bar";
constexpr bool foo_eq_bar = foo == bar;
constexpr bool foo_ne_bar = foo != bar;
constexpr bool foo_lt_bar = foo < bar;
constexpr bool foo_le_bar = foo <= bar;
constexpr bool foo_gt_bar = foo > bar;
constexpr bool foo_ge_bar = foo >= bar;
constexpr int foo_compare_bar = foo.compare(bar);
EXPECT_FALSE(foo_eq_bar);
EXPECT_TRUE(foo_ne_bar);
EXPECT_FALSE(foo_lt_bar);
EXPECT_FALSE(foo_le_bar);
EXPECT_TRUE(foo_gt_bar);
EXPECT_TRUE(foo_ge_bar);
EXPECT_GT(foo_compare_bar, 0);
#endif
#endif
#if !defined(__clang__) || 3 < __clang_major__ || \
(3 == __clang_major__ && 4 < __clang_minor__)
constexpr absl::string_view::iterator const_begin_empty = sp.begin();
constexpr absl::string_view::iterator const_end_empty = sp.end();
EXPECT_EQ(const_begin_empty, const_end_empty);
#ifdef ABSL_HAVE_STRING_VIEW_FROM_NULLPTR
constexpr absl::string_view::iterator const_begin_nullptr = cstr.begin();
constexpr absl::string_view::iterator const_end_nullptr = cstr.end();
EXPECT_EQ(const_begin_nullptr, const_end_nullptr);
#endif
#endif
constexpr absl::string_view::iterator const_begin = cstr_len.begin();
constexpr absl::string_view::iterator const_end = cstr_len.end();
constexpr absl::string_view::size_type const_size = cstr_len.size();
constexpr absl::string_view::size_type const_length = cstr_len.length();
static_assert(const_begin + const_size == const_end,
"pointer arithmetic check");
static_assert(const_begin + const_length == const_end,
"pointer arithmetic check");
#ifndef _MSC_VER
EXPECT_EQ(const_begin + const_size, const_end);
EXPECT_EQ(const_begin + const_length, const_end);
#endif
constexpr bool isempty = sp.empty();
EXPECT_TRUE(isempty);
constexpr const char c = cstr_len[2];
EXPECT_EQ(c, 't');
constexpr const char cfront = cstr_len.front();
constexpr const char cback = cstr_len.back();
EXPECT_EQ(cfront, 'c');
EXPECT_EQ(cback, 'r');
constexpr const char* np = sp.data();
constexpr const char* cstr_ptr = cstr_len.data();
EXPECT_EQ(np, nullptr);
EXPECT_NE(cstr_ptr, nullptr);
constexpr size_t sp_npos = sp.npos;
EXPECT_EQ(sp_npos, static_cast<size_t>(-1));
}
constexpr char ConstexprMethodsHelper() {
#if defined(__cplusplus) && __cplusplus >= 201402L
absl::string_view str("123", 3);
str.remove_prefix(1);
str.remove_suffix(1);
absl::string_view bar;
str.swap(bar);
return bar.front();
#else
return '2';
#endif
}
TEST(StringViewTest, ConstexprMethods) {
static_assert(ConstexprMethodsHelper() == '2', "");
constexpr absl::string_view foobar("foobar", 6);
constexpr absl::string_view foo = foobar.substr(0, 3);
constexpr absl::string_view bar = foobar.substr(3);
EXPECT_EQ(foo, "foo");
EXPECT_EQ(bar, "bar");
}
TEST(StringViewTest, Noexcept) {
EXPECT_TRUE((std::is_nothrow_constructible<absl::string_view,
const std::string&>::value));
EXPECT_TRUE((std::is_nothrow_constructible<absl::string_view,
const std::string&>::value));
EXPECT_TRUE(std::is_nothrow_constructible<absl::string_view>::value);
constexpr absl::string_view sp;
EXPECT_TRUE(noexcept(sp.begin()));
EXPECT_TRUE(noexcept(sp.end()));
EXPECT_TRUE(noexcept(sp.cbegin()));
EXPECT_TRUE(noexcept(sp.cend()));
EXPECT_TRUE(noexcept(sp.rbegin()));
EXPECT_TRUE(noexcept(sp.rend()));
EXPECT_TRUE(noexcept(sp.crbegin()));
EXPECT_TRUE(noexcept(sp.crend()));
EXPECT_TRUE(noexcept(sp.size()));
EXPECT_TRUE(noexcept(sp.length()));
EXPECT_TRUE(noexcept(sp.empty()));
EXPECT_TRUE(noexcept(sp.data()));
EXPECT_TRUE(noexcept(sp.compare(sp)));
EXPECT_TRUE(noexcept(sp.find(sp)));
EXPECT_TRUE(noexcept(sp.find('f')));
EXPECT_TRUE(noexcept(sp.rfind(sp)));
EXPECT_TRUE(noexcept(sp.rfind('f')));
EXPECT_TRUE(noexcept(sp.find_first_of(sp)));
EXPECT_TRUE(noexcept(sp.find_first_of('f')));
EXPECT_TRUE(noexcept(sp.find_last_of(sp)));
EXPECT_TRUE(noexcept(sp.find_last_of('f')));
EXPECT_TRUE(noexcept(sp.find_first_not_of(sp)));
EXPECT_TRUE(noexcept(sp.find_first_not_of('f')));
EXPECT_TRUE(noexcept(sp.find_last_not_of(sp)));
EXPECT_TRUE(noexcept(sp.find_last_not_of('f')));
}
TEST(StringViewTest, BoundsCheck) {
#ifndef ABSL_USES_STD_STRING_VIEW
#if !defined(NDEBUG) || ABSL_OPTION_HARDENED
absl::string_view h = "hello";
ABSL_EXPECT_DEATH_IF_SUPPORTED(h[5], "");
ABSL_EXPECT_DEATH_IF_SUPPORTED(h[static_cast<size_t>(-1)], "");
#endif
#endif
}
TEST(ComparisonOpsTest, StringCompareNotAmbiguous) {
EXPECT_EQ("hello", std::string("hello"));
EXPECT_LT("hello", std::string("world"));
}
TEST(ComparisonOpsTest, HeterogeneousStringViewEquals) {
EXPECT_EQ(absl::string_view("hello"), std::string("hello"));
EXPECT_EQ("hello", absl::string_view("hello"));
}
TEST(FindOneCharTest, EdgeCases) {
absl::string_view a("xxyyyxx");
a.remove_prefix(1);
a.remove_suffix(1);
EXPECT_EQ(0u, a.find('x'));
EXPECT_EQ(0u, a.find('x', 0));
EXPECT_EQ(4u, a.find('x', 1));
EXPECT_EQ(4u, a.find('x', 4));
EXPECT_EQ(absl::string_view::npos, a.find('x', 5));
EXPECT_EQ(4u, a.rfind('x'));
EXPECT_EQ(4u, a.rfind('x', 5));
EXPECT_EQ(4u, a.rfind('x', 4));
EXPECT_EQ(0u, a.rfind('x', 3));
EXPECT_EQ(0u, a.rfind('x', 0));
a.remove_prefix(1);
a.remove_suffix(1);
EXPECT_EQ(absl::string_view::npos, a.find('x'));
EXPECT_EQ(absl::string_view::npos, a.rfind('x'));
}
#ifndef ABSL_HAVE_THREAD_SANITIZER
TEST(HugeStringView, TwoPointTwoGB) {
if (sizeof(size_t) <= 4)
return;
const size_t size = size_t{2200} * 1000 * 1000;
std::string s(size, 'a');
absl::string_view sp(s);
EXPECT_EQ(size, sp.length());
sp.remove_prefix(1);
EXPECT_EQ(size - 1, sp.length());
sp.remove_suffix(2);
EXPECT_EQ(size - 1 - 2, sp.length());
}
#endif
#if !defined(NDEBUG) && !defined(ABSL_USES_STD_STRING_VIEW)
TEST(NonNegativeLenTest, NonNegativeLen) {
ABSL_EXPECT_DEATH_IF_SUPPORTED(
absl::string_view("xyz", static_cast<size_t>(-1)), "len <= kMaxSize");
}
TEST(LenExceedsMaxSizeTest, LenExceedsMaxSize) {
auto max_size = absl::string_view().max_size();
absl::string_view ok_view("", max_size);
ABSL_EXPECT_DEATH_IF_SUPPORTED(absl::string_view("", max_size + 1),
"len <= kMaxSize");
}
#endif
class StringViewStreamTest : public ::testing::Test {
public:
template <typename T>
std::string Pad(const T& s, int width, char fill = 0) {
std::ostringstream oss;
if (fill != 0) {
oss << std::setfill(fill);
}
if (width < 0) {
width = -width;
oss << std::right;
}
oss << std::setw(width) << s;
return oss.str();
}
};
TEST_F(StringViewStreamTest, Padding) {
std::string s("hello");
absl::string_view sp(s);
for (int w = -64; w < 64; ++w) {
SCOPED_TRACE(w);
EXPECT_EQ(Pad(s, w), Pad(sp, w));
}
for (int w = -64; w < 64; ++w) {
SCOPED_TRACE(w);
EXPECT_EQ(Pad(s, w, '#'), Pad(sp, w, '#'));
}
}
TEST_F(StringViewStreamTest, ResetsWidth) {
std::string s = "hi";
absl::string_view sp = s;
{
std::ostringstream oss;
oss << "[" << std::setfill('#') << std::setw(5) << s << "]";
ASSERT_EQ("[###hi]", oss.str());
}
{
std::ostringstream oss;
oss << "[" << std::setfill('#') << std::setw(5) << sp << "]";
EXPECT_EQ("[###hi]", oss.str());
}
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/string_view.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/string_view_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
d2483bbb-3d3a-4291-b99e-8aba17f70f7e | cpp | google/cel-cpp | field_backed_list_impl | eval/public/containers/field_backed_list_impl.h | eval/public/containers/field_backed_list_impl_test.cc | #ifndef THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_CONTAINERS_FIELD_BACKED_LIST_IMPL_H_
#define THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_CONTAINERS_FIELD_BACKED_LIST_IMPL_H_
#include "eval/public/cel_value.h"
#include "eval/public/containers/internal_field_backed_list_impl.h"
#include "eval/public/structs/cel_proto_wrapper.h"
namespace google {
namespace api {
namespace expr {
namespace runtime {
class FieldBackedListImpl : public internal::FieldBackedListImpl {
public:
FieldBackedListImpl(const google::protobuf::Message* message,
const google::protobuf::FieldDescriptor* descriptor,
google::protobuf::Arena* arena)
: internal::FieldBackedListImpl(
message, descriptor, &CelProtoWrapper::InternalWrapMessage, arena) {
}
};
}
}
}
}
#endif | #include "eval/public/containers/field_backed_list_impl.h"
#include <memory>
#include <string>
#include "eval/testutil/test_message.pb.h"
#include "internal/testing.h"
#include "testutil/util.h"
namespace google {
namespace api {
namespace expr {
namespace runtime {
namespace {
using ::testing::Eq;
using ::testing::DoubleEq;
using testutil::EqualsProto;
std::unique_ptr<CelList> CreateList(const TestMessage* message,
const std::string& field,
google::protobuf::Arena* arena) {
const google::protobuf::FieldDescriptor* field_desc =
message->GetDescriptor()->FindFieldByName(field);
return std::make_unique<FieldBackedListImpl>(message, field_desc, arena);
}
TEST(FieldBackedListImplTest, BoolDatatypeTest) {
TestMessage message;
message.add_bool_list(true);
message.add_bool_list(false);
google::protobuf::Arena arena;
auto cel_list = CreateList(&message, "bool_list", &arena);
ASSERT_EQ(cel_list->size(), 2);
EXPECT_EQ((*cel_list)[0].BoolOrDie(), true);
EXPECT_EQ((*cel_list)[1].BoolOrDie(), false);
}
TEST(FieldBackedListImplTest, TestLength0) {
TestMessage message;
google::protobuf::Arena arena;
auto cel_list = CreateList(&message, "int32_list", &arena);
ASSERT_EQ(cel_list->size(), 0);
}
TEST(FieldBackedListImplTest, TestLength1) {
TestMessage message;
message.add_int32_list(1);
google::protobuf::Arena arena;
auto cel_list = CreateList(&message, "int32_list", &arena);
ASSERT_EQ(cel_list->size(), 1);
EXPECT_EQ((*cel_list)[0].Int64OrDie(), 1);
}
TEST(FieldBackedListImplTest, TestLength100000) {
TestMessage message;
const int kLen = 100000;
for (int i = 0; i < kLen; i++) {
message.add_int32_list(i);
}
google::protobuf::Arena arena;
auto cel_list = CreateList(&message, "int32_list", &arena);
ASSERT_EQ(cel_list->size(), kLen);
for (int i = 0; i < kLen; i++) {
EXPECT_EQ((*cel_list)[i].Int64OrDie(), i);
}
}
TEST(FieldBackedListImplTest, Int32DatatypeTest) {
TestMessage message;
message.add_int32_list(1);
message.add_int32_list(2);
google::protobuf::Arena arena;
auto cel_list = CreateList(&message, "int32_list", &arena);
ASSERT_EQ(cel_list->size(), 2);
EXPECT_EQ((*cel_list)[0].Int64OrDie(), 1);
EXPECT_EQ((*cel_list)[1].Int64OrDie(), 2);
}
TEST(FieldBackedListImplTest, Int64DatatypeTest) {
TestMessage message;
message.add_int64_list(1);
message.add_int64_list(2);
google::protobuf::Arena arena;
auto cel_list = CreateList(&message, "int64_list", &arena);
ASSERT_EQ(cel_list->size(), 2);
EXPECT_EQ((*cel_list)[0].Int64OrDie(), 1);
EXPECT_EQ((*cel_list)[1].Int64OrDie(), 2);
}
TEST(FieldBackedListImplTest, Uint32DatatypeTest) {
TestMessage message;
message.add_uint32_list(1);
message.add_uint32_list(2);
google::protobuf::Arena arena;
auto cel_list = CreateList(&message, "uint32_list", &arena);
ASSERT_EQ(cel_list->size(), 2);
EXPECT_EQ((*cel_list)[0].Uint64OrDie(), 1);
EXPECT_EQ((*cel_list)[1].Uint64OrDie(), 2);
}
TEST(FieldBackedListImplTest, Uint64DatatypeTest) {
TestMessage message;
message.add_uint64_list(1);
message.add_uint64_list(2);
google::protobuf::Arena arena;
auto cel_list = CreateList(&message, "uint64_list", &arena);
ASSERT_EQ(cel_list->size(), 2);
EXPECT_EQ((*cel_list)[0].Uint64OrDie(), 1);
EXPECT_EQ((*cel_list)[1].Uint64OrDie(), 2);
}
TEST(FieldBackedListImplTest, FloatDatatypeTest) {
TestMessage message;
message.add_float_list(1);
message.add_float_list(2);
google::protobuf::Arena arena;
auto cel_list = CreateList(&message, "float_list", &arena);
ASSERT_EQ(cel_list->size(), 2);
EXPECT_THAT((*cel_list)[0].DoubleOrDie(), DoubleEq(1));
EXPECT_THAT((*cel_list)[1].DoubleOrDie(), DoubleEq(2));
}
TEST(FieldBackedListImplTest, DoubleDatatypeTest) {
TestMessage message;
message.add_double_list(1);
message.add_double_list(2);
google::protobuf::Arena arena;
auto cel_list = CreateList(&message, "double_list", &arena);
ASSERT_EQ(cel_list->size(), 2);
EXPECT_THAT((*cel_list)[0].DoubleOrDie(), DoubleEq(1));
EXPECT_THAT((*cel_list)[1].DoubleOrDie(), DoubleEq(2));
}
TEST(FieldBackedListImplTest, StringDatatypeTest) {
TestMessage message;
message.add_string_list("1");
message.add_string_list("2");
google::protobuf::Arena arena;
auto cel_list = CreateList(&message, "string_list", &arena);
ASSERT_EQ(cel_list->size(), 2);
EXPECT_EQ((*cel_list)[0].StringOrDie().value(), "1");
EXPECT_EQ((*cel_list)[1].StringOrDie().value(), "2");
}
TEST(FieldBackedListImplTest, BytesDatatypeTest) {
TestMessage message;
message.add_bytes_list("1");
message.add_bytes_list("2");
google::protobuf::Arena arena;
auto cel_list = CreateList(&message, "bytes_list", &arena);
ASSERT_EQ(cel_list->size(), 2);
EXPECT_EQ((*cel_list)[0].BytesOrDie().value(), "1");
EXPECT_EQ((*cel_list)[1].BytesOrDie().value(), "2");
}
TEST(FieldBackedListImplTest, MessageDatatypeTest) {
TestMessage message;
TestMessage* msg1 = message.add_message_list();
TestMessage* msg2 = message.add_message_list();
msg1->set_string_value("1");
msg2->set_string_value("2");
google::protobuf::Arena arena;
auto cel_list = CreateList(&message, "message_list", &arena);
ASSERT_EQ(cel_list->size(), 2);
EXPECT_THAT(*msg1, EqualsProto(*((*cel_list)[0].MessageOrDie())));
EXPECT_THAT(*msg2, EqualsProto(*((*cel_list)[1].MessageOrDie())));
}
TEST(FieldBackedListImplTest, EnumDatatypeTest) {
TestMessage message;
message.add_enum_list(TestMessage::TEST_ENUM_1);
message.add_enum_list(TestMessage::TEST_ENUM_2);
google::protobuf::Arena arena;
auto cel_list = CreateList(&message, "enum_list", &arena);
ASSERT_EQ(cel_list->size(), 2);
EXPECT_THAT((*cel_list)[0].Int64OrDie(), Eq(TestMessage::TEST_ENUM_1));
EXPECT_THAT((*cel_list)[1].Int64OrDie(), Eq(TestMessage::TEST_ENUM_2));
}
}
}
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/containers/field_backed_list_impl.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/containers/field_backed_list_impl_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
f1b1e34e-ee3d-41fc-b951-2ce70275d5d5 | cpp | google/libaddressinput | format_element | cpp/src/format_element.cc | cpp/test/format_element_test.cc | #include "format_element.h"
#include <libaddressinput/address_field.h>
#include <cassert>
#include <ostream>
#include <string>
namespace i18n {
namespace addressinput {
FormatElement::FormatElement(AddressField field) : field_(field), literal_() {}
FormatElement::FormatElement(const std::string& literal)
: field_(COUNTRY), literal_(literal) {
assert(!literal.empty());
}
FormatElement::FormatElement() : field_(COUNTRY), literal_("\n") {}
bool FormatElement::operator==(const FormatElement& other) const {
return field_ == other.field_ && literal_ == other.literal_;
}
}
}
std::ostream& operator<<(std::ostream& o,
const i18n::addressinput::FormatElement& element) {
if (element.IsField()) {
o << "Field: " << element.GetField();
} else if (element.IsNewline()) {
o << "Newline";
} else {
o << "Literal: " << element.GetLiteral();
}
return o;
} | #include "format_element.h"
#include <libaddressinput/address_field.h>
#include <sstream>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::FormatElement;
using i18n::addressinput::SORTING_CODE;
TEST(FormatElementTest, StreamFunctionNewline) {
std::ostringstream oss;
oss << FormatElement();
EXPECT_EQ("Newline", oss.str());
}
TEST(FormatElementTest, StreamFunctionLiteral) {
std::ostringstream oss;
oss << FormatElement("Text");
EXPECT_EQ("Literal: Text", oss.str());
}
TEST(FormatElementTest, StreamFunctionField) {
std::ostringstream oss;
oss << FormatElement(SORTING_CODE);
EXPECT_EQ("Field: SORTING_CODE", oss.str());
}
TEST(FormatElementTest, IsNewline) {
EXPECT_TRUE(FormatElement().IsNewline());
EXPECT_FALSE(FormatElement(" ").IsNewline());
EXPECT_FALSE(FormatElement(SORTING_CODE).IsNewline());
}
TEST(FormatElementTest, IsField) {
EXPECT_FALSE(FormatElement().IsField());
EXPECT_FALSE(FormatElement(" ").IsField());
EXPECT_TRUE(FormatElement(SORTING_CODE).IsField());
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/format_element.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/format_element_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
d5168485-2112-465c-ac4b-17cbb673ffaa | cpp | google/cel-cpp | internal_field_backed_map_impl | eval/public/containers/internal_field_backed_map_impl.cc | eval/public/containers/internal_field_backed_map_impl_test.cc | #include "eval/public/containers/internal_field_backed_map_impl.h"
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include "google/protobuf/descriptor.h"
#include "google/protobuf/map_field.h"
#include "google/protobuf/message.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "eval/public/cel_value.h"
#include "eval/public/structs/field_access_impl.h"
#include "eval/public/structs/protobuf_value_factory.h"
#include "extensions/protobuf/internal/map_reflection.h"
namespace google::api::expr::runtime::internal {
namespace {
using google::protobuf::Descriptor;
using google::protobuf::FieldDescriptor;
using google::protobuf::MapValueConstRef;
using google::protobuf::Message;
constexpr int kKeyTag = 1;
constexpr int kValueTag = 2;
class KeyList : public CelList {
public:
KeyList(const google::protobuf::Message* message,
const google::protobuf::FieldDescriptor* descriptor,
const ProtobufValueFactory& factory, google::protobuf::Arena* arena)
: message_(message),
descriptor_(descriptor),
reflection_(message_->GetReflection()),
factory_(factory),
arena_(arena) {}
int size() const override {
return reflection_->FieldSize(*message_, descriptor_);
}
CelValue operator[](int index) const override {
const Message* entry =
&reflection_->GetRepeatedMessage(*message_, descriptor_, index);
if (entry == nullptr) {
return CelValue::CreateNull();
}
const Descriptor* entry_descriptor = entry->GetDescriptor();
const FieldDescriptor* key_desc =
entry_descriptor->FindFieldByNumber(kKeyTag);
absl::StatusOr<CelValue> key_value = CreateValueFromSingleField(
entry, key_desc, ProtoWrapperTypeOptions::kUnsetProtoDefault, factory_,
arena_);
if (!key_value.ok()) {
return CreateErrorValue(arena_, key_value.status());
}
return *key_value;
}
private:
const google::protobuf::Message* message_;
const google::protobuf::FieldDescriptor* descriptor_;
const google::protobuf::Reflection* reflection_;
const ProtobufValueFactory& factory_;
google::protobuf::Arena* arena_;
};
bool MatchesMapKeyType(const FieldDescriptor* key_desc, const CelValue& key) {
switch (key_desc->cpp_type()) {
case google::protobuf::FieldDescriptor::CPPTYPE_BOOL:
return key.IsBool();
case google::protobuf::FieldDescriptor::CPPTYPE_INT32:
case google::protobuf::FieldDescriptor::CPPTYPE_INT64:
return key.IsInt64();
case google::protobuf::FieldDescriptor::CPPTYPE_UINT32:
case google::protobuf::FieldDescriptor::CPPTYPE_UINT64:
return key.IsUint64();
case google::protobuf::FieldDescriptor::CPPTYPE_STRING:
return key.IsString();
default:
return false;
}
}
absl::Status InvalidMapKeyType(absl::string_view key_type) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid map key type: '", key_type, "'"));
}
}
FieldBackedMapImpl::FieldBackedMapImpl(
const google::protobuf::Message* message, const google::protobuf::FieldDescriptor* descriptor,
ProtobufValueFactory factory, google::protobuf::Arena* arena)
: message_(message),
descriptor_(descriptor),
key_desc_(descriptor_->message_type()->FindFieldByNumber(kKeyTag)),
value_desc_(descriptor_->message_type()->FindFieldByNumber(kValueTag)),
reflection_(message_->GetReflection()),
factory_(std::move(factory)),
arena_(arena),
key_list_(
std::make_unique<KeyList>(message, descriptor, factory_, arena)) {}
int FieldBackedMapImpl::size() const {
return reflection_->FieldSize(*message_, descriptor_);
}
absl::StatusOr<const CelList*> FieldBackedMapImpl::ListKeys() const {
return key_list_.get();
}
absl::StatusOr<bool> FieldBackedMapImpl::Has(const CelValue& key) const {
MapValueConstRef value_ref;
return LookupMapValue(key, &value_ref);
}
absl::optional<CelValue> FieldBackedMapImpl::operator[](CelValue key) const {
MapValueConstRef value_ref;
auto lookup_result = LookupMapValue(key, &value_ref);
if (!lookup_result.ok()) {
return CreateErrorValue(arena_, lookup_result.status());
}
if (!*lookup_result) {
return absl::nullopt;
}
absl::StatusOr<CelValue> result = CreateValueFromMapValue(
message_, value_desc_, &value_ref, factory_, arena_);
if (!result.ok()) {
return CreateErrorValue(arena_, result.status());
}
return *result;
}
absl::StatusOr<bool> FieldBackedMapImpl::LookupMapValue(
const CelValue& key, MapValueConstRef* value_ref) const {
if (!MatchesMapKeyType(key_desc_, key)) {
return InvalidMapKeyType(key_desc_->cpp_type_name());
}
std::string map_key_string;
google::protobuf::MapKey proto_key;
switch (key_desc_->cpp_type()) {
case google::protobuf::FieldDescriptor::CPPTYPE_BOOL: {
bool key_value;
key.GetValue(&key_value);
proto_key.SetBoolValue(key_value);
} break;
case google::protobuf::FieldDescriptor::CPPTYPE_INT32: {
int64_t key_value;
key.GetValue(&key_value);
if (key_value > std::numeric_limits<int32_t>::max() ||
key_value < std::numeric_limits<int32_t>::lowest()) {
return absl::OutOfRangeError("integer overflow");
}
proto_key.SetInt32Value(key_value);
} break;
case google::protobuf::FieldDescriptor::CPPTYPE_INT64: {
int64_t key_value;
key.GetValue(&key_value);
proto_key.SetInt64Value(key_value);
} break;
case google::protobuf::FieldDescriptor::CPPTYPE_STRING: {
CelValue::StringHolder key_value;
key.GetValue(&key_value);
map_key_string.assign(key_value.value().data(), key_value.value().size());
proto_key.SetStringValue(map_key_string);
} break;
case google::protobuf::FieldDescriptor::CPPTYPE_UINT32: {
uint64_t key_value;
key.GetValue(&key_value);
if (key_value > std::numeric_limits<uint32_t>::max()) {
return absl::OutOfRangeError("unsigned integer overlow");
}
proto_key.SetUInt32Value(key_value);
} break;
case google::protobuf::FieldDescriptor::CPPTYPE_UINT64: {
uint64_t key_value;
key.GetValue(&key_value);
proto_key.SetUInt64Value(key_value);
} break;
default:
return InvalidMapKeyType(key_desc_->cpp_type_name());
}
return cel::extensions::protobuf_internal::LookupMapValue(
*reflection_, *message_, *descriptor_, proto_key, value_ref);
}
absl::StatusOr<bool> FieldBackedMapImpl::LegacyHasMapValue(
const CelValue& key) const {
auto lookup_result = LegacyLookupMapValue(key);
if (!lookup_result.has_value()) {
return false;
}
auto result = *lookup_result;
if (result.IsError()) {
return *(result.ErrorOrDie());
}
return true;
}
absl::optional<CelValue> FieldBackedMapImpl::LegacyLookupMapValue(
const CelValue& key) const {
if (!MatchesMapKeyType(key_desc_, key)) {
return CreateErrorValue(arena_,
InvalidMapKeyType(key_desc_->cpp_type_name()));
}
int map_size = size();
for (int i = 0; i < map_size; i++) {
const Message* entry =
&reflection_->GetRepeatedMessage(*message_, descriptor_, i);
if (entry == nullptr) continue;
absl::StatusOr<CelValue> key_value = CreateValueFromSingleField(
entry, key_desc_, ProtoWrapperTypeOptions::kUnsetProtoDefault, factory_,
arena_);
if (!key_value.ok()) {
return CreateErrorValue(arena_, key_value.status());
}
bool match = false;
switch (key_desc_->cpp_type()) {
case google::protobuf::FieldDescriptor::CPPTYPE_BOOL:
match = key.BoolOrDie() == key_value->BoolOrDie();
break;
case google::protobuf::FieldDescriptor::CPPTYPE_INT32:
case google::protobuf::FieldDescriptor::CPPTYPE_INT64:
match = key.Int64OrDie() == key_value->Int64OrDie();
break;
case google::protobuf::FieldDescriptor::CPPTYPE_UINT32:
case google::protobuf::FieldDescriptor::CPPTYPE_UINT64:
match = key.Uint64OrDie() == key_value->Uint64OrDie();
break;
case google::protobuf::FieldDescriptor::CPPTYPE_STRING:
match = key.StringOrDie() == key_value->StringOrDie();
break;
default:
break;
}
if (match) {
absl::StatusOr<CelValue> value_cel_value = CreateValueFromSingleField(
entry, value_desc_, ProtoWrapperTypeOptions::kUnsetProtoDefault,
factory_, arena_);
if (!value_cel_value.ok()) {
return CreateErrorValue(arena_, value_cel_value.status());
}
return *value_cel_value;
}
}
return {};
}
} | #include "eval/public/containers/internal_field_backed_map_impl.h"
#include <array>
#include <limits>
#include <memory>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "eval/public/structs/cel_proto_wrapper.h"
#include "eval/testutil/test_message.pb.h"
#include "internal/testing.h"
namespace google::api::expr::runtime::internal {
namespace {
using ::absl_testing::StatusIs;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::UnorderedPointwise;
class FieldBackedMapTestImpl : public FieldBackedMapImpl {
public:
FieldBackedMapTestImpl(const google::protobuf::Message* message,
const google::protobuf::FieldDescriptor* descriptor,
google::protobuf::Arena* arena)
: FieldBackedMapImpl(message, descriptor,
&CelProtoWrapper::InternalWrapMessage, arena) {}
using FieldBackedMapImpl::LegacyHasMapValue;
using FieldBackedMapImpl::LegacyLookupMapValue;
};
std::unique_ptr<FieldBackedMapTestImpl> CreateMap(const TestMessage* message,
const std::string& field,
google::protobuf::Arena* arena) {
const google::protobuf::FieldDescriptor* field_desc =
message->GetDescriptor()->FindFieldByName(field);
return std::make_unique<FieldBackedMapTestImpl>(message, field_desc, arena);
}
TEST(FieldBackedMapImplTest, BadKeyTypeTest) {
TestMessage message;
google::protobuf::Arena arena;
constexpr std::array<absl::string_view, 6> map_types = {
"int64_int32_map", "uint64_int32_map", "string_int32_map",
"bool_int32_map", "int32_int32_map", "uint32_uint32_map",
};
for (auto map_type : map_types) {
auto cel_map = CreateMap(&message, std::string(map_type), &arena);
auto result = cel_map->Has(CelValue::CreateNull());
EXPECT_FALSE(result.ok());
EXPECT_THAT(result.status().code(), Eq(absl::StatusCode::kInvalidArgument));
result = cel_map->LegacyHasMapValue(CelValue::CreateNull());
EXPECT_FALSE(result.ok());
EXPECT_THAT(result.status().code(), Eq(absl::StatusCode::kInvalidArgument));
auto lookup = (*cel_map)[CelValue::CreateNull()];
EXPECT_TRUE(lookup.has_value());
EXPECT_TRUE(lookup->IsError());
EXPECT_THAT(lookup->ErrorOrDie()->code(),
Eq(absl::StatusCode::kInvalidArgument));
lookup = cel_map->LegacyLookupMapValue(CelValue::CreateNull());
EXPECT_TRUE(lookup.has_value());
EXPECT_TRUE(lookup->IsError());
EXPECT_THAT(lookup->ErrorOrDie()->code(),
Eq(absl::StatusCode::kInvalidArgument));
}
}
TEST(FieldBackedMapImplTest, Int32KeyTest) {
TestMessage message;
auto field_map = message.mutable_int32_int32_map();
(*field_map)[0] = 1;
(*field_map)[1] = 2;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "int32_int32_map", &arena);
EXPECT_EQ((*cel_map)[CelValue::CreateInt64(0)]->Int64OrDie(), 1);
EXPECT_EQ((*cel_map)[CelValue::CreateInt64(1)]->Int64OrDie(), 2);
EXPECT_TRUE(cel_map->Has(CelValue::CreateInt64(1)).value_or(false));
EXPECT_TRUE(
cel_map->LegacyHasMapValue(CelValue::CreateInt64(1)).value_or(false));
EXPECT_FALSE((*cel_map)[CelValue::CreateInt64(3)].has_value());
EXPECT_FALSE(cel_map->Has(CelValue::CreateInt64(3)).value_or(true));
EXPECT_FALSE(
cel_map->LegacyHasMapValue(CelValue::CreateInt64(3)).value_or(true));
}
TEST(FieldBackedMapImplTest, Int32KeyOutOfRangeTest) {
TestMessage message;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "int32_int32_map", &arena);
auto result = cel_map->Has(
CelValue::CreateInt64(std::numeric_limits<int32_t>::max() + 1L));
EXPECT_THAT(result.status(),
StatusIs(absl::StatusCode::kOutOfRange, HasSubstr("overflow")));
result = cel_map->Has(
CelValue::CreateInt64(std::numeric_limits<int32_t>::lowest() - 1L));
EXPECT_FALSE(result.ok());
EXPECT_THAT(result.status().code(), Eq(absl::StatusCode::kOutOfRange));
}
TEST(FieldBackedMapImplTest, Int64KeyTest) {
TestMessage message;
auto field_map = message.mutable_int64_int32_map();
(*field_map)[0] = 1;
(*field_map)[1] = 2;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "int64_int32_map", &arena);
EXPECT_EQ((*cel_map)[CelValue::CreateInt64(0)]->Int64OrDie(), 1);
EXPECT_EQ((*cel_map)[CelValue::CreateInt64(1)]->Int64OrDie(), 2);
EXPECT_TRUE(cel_map->Has(CelValue::CreateInt64(1)).value_or(false));
EXPECT_EQ(
cel_map->LegacyLookupMapValue(CelValue::CreateInt64(1))->Int64OrDie(), 2);
EXPECT_TRUE(
cel_map->LegacyHasMapValue(CelValue::CreateInt64(1)).value_or(false));
EXPECT_EQ((*cel_map)[CelValue::CreateInt64(3)].has_value(), false);
}
TEST(FieldBackedMapImplTest, BoolKeyTest) {
TestMessage message;
auto field_map = message.mutable_bool_int32_map();
(*field_map)[false] = 1;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "bool_int32_map", &arena);
EXPECT_EQ((*cel_map)[CelValue::CreateBool(false)]->Int64OrDie(), 1);
EXPECT_TRUE(cel_map->Has(CelValue::CreateBool(false)).value_or(false));
EXPECT_TRUE(
cel_map->LegacyHasMapValue(CelValue::CreateBool(false)).value_or(false));
EXPECT_EQ((*cel_map)[CelValue::CreateBool(true)].has_value(), false);
(*field_map)[true] = 2;
EXPECT_EQ((*cel_map)[CelValue::CreateBool(true)]->Int64OrDie(), 2);
}
TEST(FieldBackedMapImplTest, Uint32KeyTest) {
TestMessage message;
auto field_map = message.mutable_uint32_uint32_map();
(*field_map)[0] = 1u;
(*field_map)[1] = 2u;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "uint32_uint32_map", &arena);
EXPECT_EQ((*cel_map)[CelValue::CreateUint64(0)]->Uint64OrDie(), 1UL);
EXPECT_EQ((*cel_map)[CelValue::CreateUint64(1)]->Uint64OrDie(), 2UL);
EXPECT_TRUE(cel_map->Has(CelValue::CreateUint64(1)).value_or(false));
EXPECT_TRUE(
cel_map->LegacyHasMapValue(CelValue::CreateUint64(1)).value_or(false));
EXPECT_EQ((*cel_map)[CelValue::CreateUint64(3)].has_value(), false);
EXPECT_EQ(cel_map->Has(CelValue::CreateUint64(3)).value_or(true), false);
}
TEST(FieldBackedMapImplTest, Uint32KeyOutOfRangeTest) {
TestMessage message;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "uint32_uint32_map", &arena);
auto result = cel_map->Has(
CelValue::CreateUint64(std::numeric_limits<uint32_t>::max() + 1UL));
EXPECT_FALSE(result.ok());
EXPECT_THAT(result.status().code(), Eq(absl::StatusCode::kOutOfRange));
}
TEST(FieldBackedMapImplTest, Uint64KeyTest) {
TestMessage message;
auto field_map = message.mutable_uint64_int32_map();
(*field_map)[0] = 1;
(*field_map)[1] = 2;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "uint64_int32_map", &arena);
EXPECT_EQ((*cel_map)[CelValue::CreateUint64(0)]->Int64OrDie(), 1);
EXPECT_EQ((*cel_map)[CelValue::CreateUint64(1)]->Int64OrDie(), 2);
EXPECT_TRUE(cel_map->Has(CelValue::CreateUint64(1)).value_or(false));
EXPECT_TRUE(
cel_map->LegacyHasMapValue(CelValue::CreateUint64(1)).value_or(false));
EXPECT_EQ((*cel_map)[CelValue::CreateUint64(3)].has_value(), false);
}
TEST(FieldBackedMapImplTest, StringKeyTest) {
TestMessage message;
auto field_map = message.mutable_string_int32_map();
(*field_map)["test0"] = 1;
(*field_map)["test1"] = 2;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "string_int32_map", &arena);
std::string test0 = "test0";
std::string test1 = "test1";
std::string test_notfound = "test_notfound";
EXPECT_EQ((*cel_map)[CelValue::CreateString(&test0)]->Int64OrDie(), 1);
EXPECT_EQ((*cel_map)[CelValue::CreateString(&test1)]->Int64OrDie(), 2);
EXPECT_TRUE(cel_map->Has(CelValue::CreateString(&test1)).value_or(false));
EXPECT_TRUE(cel_map->LegacyHasMapValue(CelValue::CreateString(&test1))
.value_or(false));
EXPECT_EQ((*cel_map)[CelValue::CreateString(&test_notfound)].has_value(),
false);
}
TEST(FieldBackedMapImplTest, EmptySizeTest) {
TestMessage message;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "string_int32_map", &arena);
EXPECT_EQ(cel_map->size(), 0);
}
TEST(FieldBackedMapImplTest, RepeatedAddTest) {
TestMessage message;
auto field_map = message.mutable_string_int32_map();
(*field_map)["test0"] = 1;
(*field_map)["test1"] = 2;
(*field_map)["test0"] = 3;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "string_int32_map", &arena);
EXPECT_EQ(cel_map->size(), 2);
}
TEST(FieldBackedMapImplTest, KeyListTest) {
TestMessage message;
auto field_map = message.mutable_string_int32_map();
std::vector<std::string> keys;
std::vector<std::string> keys1;
for (int i = 0; i < 100; i++) {
keys.push_back(absl::StrCat("test", i));
(*field_map)[keys.back()] = i;
}
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "string_int32_map", &arena);
const CelList* key_list = cel_map->ListKeys().value();
EXPECT_EQ(key_list->size(), 100);
for (int i = 0; i < key_list->size(); i++) {
keys1.push_back(std::string((*key_list)[i].StringOrDie().value()));
}
EXPECT_THAT(keys, UnorderedPointwise(Eq(), keys1));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/containers/internal_field_backed_map_impl.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/containers/internal_field_backed_map_impl_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
b2c41507-2889-4121-a7a6-29ab6b433e18 | cpp | google/arolla | lifting | arolla/qexpr/lifting.h | arolla/qexpr/lifting_test.cc | #ifndef AROLLA_QEXPR_LIFTING_H_
#define AROLLA_QEXPR_LIFTING_H_
#include <cstdint>
#include <tuple>
#include <type_traits>
#include "absl/base/attributes.h"
#include "arolla/util/meta.h"
namespace arolla {
template <class T>
struct DoNotLiftTag {
using type = T;
};
template <class T>
using DecayDoNotLiftTag = meta::strip_template_t<DoNotLiftTag, T>;
template <template <class> class Lifted, class T>
using LiftedType = std::conditional_t<meta::is_wrapped_with_v<DoNotLiftTag, T>,
DecayDoNotLiftTag<T>, Lifted<T>>;
namespace lifting_internal {
template <class ArgTypeList>
struct CallOnLiftedArgsImpl;
template <>
struct CallOnLiftedArgsImpl<meta::type_list<>> {
template <class Fn, class... Ts>
ABSL_ATTRIBUTE_ALWAYS_INLINE auto operator()(Fn&& fn, Ts&&... args) const {
return std::forward<Fn>(fn)(std::forward<Ts>(args)...);
}
};
template <class LeftArg, class... LeftArgs>
struct CallOnLiftedArgsImpl<meta::type_list<LeftArg, LeftArgs...>> {
template <class Fn, class T, class... Ts>
ABSL_ATTRIBUTE_ALWAYS_INLINE auto operator()(Fn&& fn, T&& arg,
Ts&&... args) const {
if constexpr (meta::is_wrapped_with_v<DoNotLiftTag, LeftArg>) {
return CallOnLiftedArgsImpl<meta::type_list<LeftArgs...>>{}(
std::forward<Fn>(fn), std::forward<Ts>(args)...);
} else {
return CallOnLiftedArgsImpl<meta::type_list<LeftArgs...>>{}(
std::forward<Fn>(fn), std::forward<Ts>(args)...,
std::forward<T>(arg));
}
}
};
template <uint64_t kDontLiftMask, class ScalarArgsList, class LiftedArgsList,
class MergedArgsList>
struct CallShuffledArgsFn;
template <class... LiftedArgs, class... MergedArgs>
struct CallShuffledArgsFn<0, meta::type_list<>, meta::type_list<LiftedArgs...>,
meta::type_list<MergedArgs...>> {
template <class SctrictFn>
ABSL_ATTRIBUTE_ALWAYS_INLINE auto operator()(
const SctrictFn& fn, const LiftedArgs&... lifted_args,
const MergedArgs&... merged_args) const {
return fn(merged_args..., lifted_args...);
}
};
template <uint64_t kDontLiftMask, class... ScalarArgs, class... MergedArgs>
struct CallShuffledArgsFn<kDontLiftMask, meta::type_list<ScalarArgs...>,
meta::type_list<>, meta::type_list<MergedArgs...>> {
static_assert(kDontLiftMask == (1ull << sizeof...(ScalarArgs)) - 1);
template <class SctrictFn>
ABSL_ATTRIBUTE_ALWAYS_INLINE auto operator()(
const SctrictFn& fn, const ScalarArgs&... scalar_args,
const MergedArgs&... merged_args) const {
return fn(merged_args..., scalar_args...);
}
};
template <class... MergedArgs>
struct CallShuffledArgsFn<0, meta::type_list<>, meta::type_list<>,
meta::type_list<MergedArgs...>> {
template <class SctrictFn>
ABSL_ATTRIBUTE_ALWAYS_INLINE auto operator()(
const SctrictFn& fn, const MergedArgs&... merged_args) const {
return fn(merged_args...);
}
};
template <uint64_t kDontLiftMask, class ScalarArg, class... ScalarArgs,
class LiftedArg, class... LiftedArgs, class... MergedArgs>
struct CallShuffledArgsFn<
kDontLiftMask, meta::type_list<ScalarArg, ScalarArgs...>,
meta::type_list<LiftedArg, LiftedArgs...>, meta::type_list<MergedArgs...>> {
template <class SctrictFn>
ABSL_ATTRIBUTE_ALWAYS_INLINE auto operator()(
const SctrictFn& fn, const ScalarArg& scalar_arg,
const ScalarArgs&... scalar_args, const LiftedArg& lifted_arg,
const LiftedArgs&... lifted_args, MergedArgs... merged_args) const {
if constexpr (kDontLiftMask % 2 == 1) {
return CallShuffledArgsFn<kDontLiftMask / 2,
meta::type_list<ScalarArgs...>,
meta::type_list<LiftedArg, LiftedArgs...>,
meta::type_list<MergedArgs..., ScalarArg>>()(
fn, scalar_args..., lifted_arg, lifted_args..., merged_args...,
scalar_arg);
} else {
return CallShuffledArgsFn<kDontLiftMask / 2,
meta::type_list<ScalarArg, ScalarArgs...>,
meta::type_list<LiftedArgs...>,
meta::type_list<MergedArgs..., LiftedArg>>()(
fn, scalar_arg, scalar_args..., lifted_args..., merged_args...,
lifted_arg);
}
}
};
template <template <typename> class LiftedViewType, class ArgsToProcessList,
class LiftedArgList, uint64_t kDontLiftMask>
struct CaptureDontLift;
template <template <typename> class LiftedViewType, uint64_t kDontLiftMask,
class LeftArg, class... LeftArgs, class... LiftedArgs>
struct CaptureDontLift<LiftedViewType, meta::type_list<LeftArg, LeftArgs...>,
meta::type_list<LiftedArgs...>, kDontLiftMask> {
template <class Fn, class T, class... Ts>
ABSL_ATTRIBUTE_ALWAYS_INLINE auto operator()(const Fn& fn, const T& arg,
const Ts&... args) const {
if constexpr (meta::is_wrapped_with_v<DoNotLiftTag, LeftArg>) {
constexpr uint64_t total_arg_count =
1 + sizeof...(Ts) + sizeof...(LiftedArgs);
constexpr uint64_t arg_id = total_arg_count - (sizeof...(LeftArgs) + 1);
return CaptureDontLift<LiftedViewType, meta::type_list<LeftArgs...>,
meta::type_list<LiftedArgs...>,
kDontLiftMask + (1ull << arg_id)>{}(fn, args...,
arg);
} else {
return CaptureDontLift<LiftedViewType, meta::type_list<LeftArgs...>,
meta::type_list<LiftedArgs..., LeftArg>,
kDontLiftMask>{}(fn, args...);
}
}
};
template <template <typename> class LiftedViewType, uint64_t kDontLiftMask,
class... LiftedArgs>
struct CaptureDontLift<LiftedViewType, meta::type_list<>,
meta::type_list<LiftedArgs...>, kDontLiftMask> {
template <class Fn, class... Ts>
ABSL_ATTRIBUTE_ALWAYS_INLINE auto operator()(const Fn& fn,
const Ts&... args) const {
return [fn, &args...](LiftedViewType<LiftedArgs>... view_args)
ABSL_ATTRIBUTE_ALWAYS_INLINE {
return CallShuffledArgsFn<
kDontLiftMask, meta::type_list<Ts...>,
meta::type_list<LiftedViewType<LiftedArgs>...>,
meta::type_list<>>()(fn, args..., view_args...);
};
}
};
template <class ArgList>
struct LiftableArgs;
template <>
struct LiftableArgs<meta::type_list<>> {
using type = meta::type_list<>;
};
template <class T, class... Ts>
struct LiftableArgs<meta::type_list<T, Ts...>> {
using type =
meta::concat_t<meta::type_list<T>,
typename LiftableArgs<meta::type_list<Ts...>>::type>;
};
template <class T, class... Ts>
struct LiftableArgs<meta::type_list<DoNotLiftTag<T>, Ts...>> {
using type = typename LiftableArgs<meta::type_list<Ts...>>::type;
};
}
template <class... Args>
class LiftingTools {
static_assert(sizeof...(Args) <= 64, "Arg count limit is 64");
public:
using LiftableArgs =
typename lifting_internal::LiftableArgs<meta::type_list<Args...>>::type;
static constexpr bool kAllLiftable =
std::tuple_size_v<typename LiftableArgs::tuple> == sizeof...(Args);
template <template <typename> class LiftedViewType, class Fn, class... Ts>
static auto CreateFnWithDontLiftCaptured(const Fn& fn, const Ts&... args) {
static_assert(sizeof...(Args) == sizeof...(Ts));
if constexpr (kAllLiftable) {
return [fn](LiftedViewType<Args>... largs) { return fn(largs...); };
} else {
return lifting_internal::CaptureDontLift<
LiftedViewType, meta::type_list<Args...>, meta::type_list<>, 0>{}(
fn, args...);
}
}
template <class Fn, class... Ts>
ABSL_ATTRIBUTE_ALWAYS_INLINE static auto CallOnLiftedArgs(Fn&& fn,
Ts&&... args) {
static_assert(sizeof...(Args) == sizeof...(Ts));
if constexpr (kAllLiftable) {
return std::forward<Fn>(fn)(std::forward<Ts>(args)...);
} else {
return lifting_internal::CallOnLiftedArgsImpl<meta::type_list<Args...>>{}(
std::forward<Fn>(fn), std::forward<Ts>(args)...);
}
}
};
}
#endif | #include "arolla/qexpr/lifting.h"
#include <cstddef>
#include <memory>
#include <string>
#include <type_traits>
#include "gtest/gtest.h"
#include "arolla/memory/optional_value.h"
#include "arolla/util/meta.h"
namespace arolla {
namespace {
TEST(Lifting, DoNotLiftTag) {
static_assert(std::is_same_v<int, DoNotLiftTag<int>::type>);
static_assert(std::is_same_v<OptionalValue<int>,
DoNotLiftTag<OptionalValue<int>>::type>);
}
TEST(LiftingTools, LiftableArgs) {
static_assert(
std::is_same_v<LiftingTools<>::LiftableArgs, meta::type_list<>>);
static_assert(
std::is_same_v<LiftingTools<int>::LiftableArgs, meta::type_list<int>>);
static_assert(std::is_same_v<LiftingTools<DoNotLiftTag<int>>::LiftableArgs,
meta::type_list<>>);
static_assert(
std::is_same_v<LiftingTools<int, DoNotLiftTag<float>>::LiftableArgs,
meta::type_list<int>>);
static_assert(
std::is_same_v<LiftingTools<DoNotLiftTag<float>, int>::LiftableArgs,
meta::type_list<int>>);
static_assert(
std::is_same_v<
LiftingTools<std::string, DoNotLiftTag<float>, int>::LiftableArgs,
meta::type_list<std::string, int>>);
static_assert(
std::is_same_v<LiftingTools<std::string, DoNotLiftTag<float>, int,
DoNotLiftTag<char>, DoNotLiftTag<std::string>,
double>::LiftableArgs,
meta::type_list<std::string, int, double>>);
}
template <class T>
struct MyView {
using type = T;
T value;
};
TEST(LiftingTools, CreateFnWithDontLiftCaptured) {
{
using Tools = LiftingTools<int>;
auto fn = Tools::CreateFnWithDontLiftCaptured<MyView>(
[](MyView<int> x) { return x.value; }, nullptr);
EXPECT_EQ(fn(MyView<int>{5}), 5);
EXPECT_EQ(Tools::CallOnLiftedArgs(fn, MyView<int>{5}), 5);
static_assert(std::is_same_v<meta::function_traits<decltype(fn)>::arg_types,
meta::type_list<MyView<int>>>);
static_assert(
std::is_same_v<meta::function_traits<decltype(fn)>::return_type, int>);
}
const int& kFive = 5;
{
using Tools = LiftingTools<DoNotLiftTag<int>>;
auto fn = Tools::CreateFnWithDontLiftCaptured<MyView>(
[](int x) { return x; }, kFive);
EXPECT_EQ(fn(), 5);
EXPECT_EQ(Tools::CallOnLiftedArgs(fn, nullptr), 5);
static_assert(std::is_same_v<meta::function_traits<decltype(fn)>::arg_types,
meta::type_list<>>);
static_assert(
std::is_same_v<meta::function_traits<decltype(fn)>::return_type, int>);
}
{
using Tools =
LiftingTools<DoNotLiftTag<int>, float, DoNotLiftTag<std::string>>;
auto lambda = [](int x, MyView<float> y, std::string z) {
if (x != 5 || y.value != 2.0f || z != "a") {
return 0;
}
return 1;
};
auto fn = Tools::CreateFnWithDontLiftCaptured<MyView>(lambda, kFive,
nullptr, "a");
EXPECT_EQ(fn(MyView<float>{2.0f}), 1);
EXPECT_EQ(
Tools::CallOnLiftedArgs(fn, nullptr, MyView<float>{2.0f}, nullptr), 1);
static_assert(std::is_same_v<meta::function_traits<decltype(fn)>::arg_types,
meta::type_list<MyView<float>>>);
static_assert(
std::is_same_v<meta::function_traits<decltype(fn)>::return_type, int>);
}
{
using Tools =
LiftingTools<char, DoNotLiftTag<int>, float, DoNotLiftTag<std::string>>;
auto lambda = [](MyView<char> q, int x, MyView<float> y, std::string z) {
if (q.value != 'Q' || x != 5 || y.value != 2.0f || z != "a") {
return 0;
}
return 1;
};
std::string kA = "a";
auto fn = Tools::CreateFnWithDontLiftCaptured<MyView>(lambda, nullptr,
kFive, nullptr, kA);
EXPECT_EQ(fn(MyView<char>{'Q'}, MyView<float>{2.0f}), 1);
EXPECT_EQ(Tools::CallOnLiftedArgs(fn, MyView<char>{'Q'}, nullptr,
MyView<float>{2.0f}, nullptr),
1);
static_assert(std::is_same_v<meta::function_traits<decltype(fn)>::arg_types,
meta::type_list<MyView<char>, MyView<float>>>);
static_assert(
std::is_same_v<meta::function_traits<decltype(fn)>::return_type, int>);
}
}
TEST(LiftingTools, CallOnLiftedArgsWithADifferentFunction) {
using Tools =
LiftingTools<char, DoNotLiftTag<int>, float, DoNotLiftTag<std::string>>;
auto fn = [](float x, std::string z) {
if (x != 1.0f || z != "z") {
return 0;
}
return 1;
};
EXPECT_EQ(Tools::CallOnLiftedArgs(fn, 1.0f, nullptr, "z", nullptr), 1);
}
TEST(LiftingTools, CaptureNonCopiable) {
using Tools = LiftingTools<DoNotLiftTag<std::unique_ptr<int>>>;
const auto ptr = std::make_unique<int>(5);
auto fn = Tools::CreateFnWithDontLiftCaptured<MyView>(
[](const std::unique_ptr<int>& x) { return x == nullptr ? -1 : *x; },
ptr);
EXPECT_EQ(fn(), 5);
EXPECT_EQ(Tools::CallOnLiftedArgs(fn, MyView<int>{-7}), 5);
static_assert(std::is_same_v<meta::function_traits<decltype(fn)>::arg_types,
meta::type_list<>>);
static_assert(
std::is_same_v<meta::function_traits<decltype(fn)>::return_type, int>);
}
template <class T>
using ConstRef = const T&;
TEST(LiftingTools, CallNonCopiable) {
using Tools = LiftingTools<std::unique_ptr<int>>;
auto fn = Tools::CreateFnWithDontLiftCaptured<ConstRef>(
[](const std::unique_ptr<int>& x) { return x == nullptr ? -1 : *x; },
MyView<int>{-13});
EXPECT_EQ(fn(std::make_unique<int>(5)), 5);
EXPECT_EQ(Tools::CallOnLiftedArgs(fn, std::make_unique<int>(5)), 5);
static_assert(std::is_same_v<meta::function_traits<decltype(fn)>::arg_types,
meta::type_list<const std::unique_ptr<int>&>>);
static_assert(
std::is_same_v<meta::function_traits<decltype(fn)>::return_type, int>);
}
TEST(LiftingTools, CreateFnWithDontLiftCaptured64Args) {
using Tools = LiftingTools<
DoNotLiftTag<int>, int, int, int, DoNotLiftTag<int>, int, int, int,
DoNotLiftTag<int>, DoNotLiftTag<int>, int, int, int, int, int, int,
DoNotLiftTag<int>, int, int, int, DoNotLiftTag<int>, int, int, int,
int, DoNotLiftTag<int>, int, int, int, int, DoNotLiftTag<int>, int,
int, int, int, DoNotLiftTag<int>, int, DoNotLiftTag<int>, int, int,
int, int, int, DoNotLiftTag<int>, DoNotLiftTag<int>, int, int, int,
int, DoNotLiftTag<int>, DoNotLiftTag<int>, int, int, int, int, int,
int, DoNotLiftTag<int>, int, int, int, int, int, DoNotLiftTag<int>
>;
const int x = -1;
#define TEST_ARGS_8 x, x, x, x, x, x, x, x
#define TEST_ARGS_32 TEST_ARGS_8, TEST_ARGS_8, TEST_ARGS_8, TEST_ARGS_8
#define TEST_ARGS_64 TEST_ARGS_32, TEST_ARGS_32
auto fn = Tools::CreateFnWithDontLiftCaptured<ConstRef>(
[](auto... args) { return sizeof...(args); }, TEST_ARGS_64);
EXPECT_EQ(Tools::CallOnLiftedArgs(fn, TEST_ARGS_64), 64);
static_assert(
std::is_same_v<meta::function_traits<decltype(fn)>::return_type, size_t>);
#undef TEST_ARGS_8
#undef TEST_ARGS_32
#undef TEST_ARGS_64
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/lifting.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/lifting_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
13692b9c-e863-4fa0-8eed-e55cea52b26f | cpp | google/arolla | bytes | arolla/util/bytes.cc | arolla/util/bytes_test.cc | #include "arolla/util/bytes.h"
#include <cstddef>
#include <string>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "arolla/util/repr.h"
namespace arolla {
ReprToken ReprTraits<Bytes>::operator()(const Bytes& value) const {
constexpr size_t kBytesAbbrevLimit = 120;
ReprToken result;
absl::string_view bytes = value;
if (bytes.size() <= kBytesAbbrevLimit) {
result.str = absl::StrCat("b'", absl::CHexEscape(bytes), "'");
} else {
result.str =
absl::StrCat("b'", absl::CHexEscape(bytes.substr(0, kBytesAbbrevLimit)),
"... (", bytes.size(), " bytes total)'");
}
return result;
}
} | #include "arolla/util/bytes.h"
#include <string>
#include <type_traits>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/strings/string_view.h"
#include "arolla/util/repr.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla {
namespace {
using ::arolla::testing::ReprTokenEq;
using ::testing::Eq;
using ::testing::MatchesRegex;
TEST(BytesTest, Constructor) {
EXPECT_THAT(Bytes("Hello"), Eq("Hello"));
std::string hello = "Hello";
EXPECT_THAT(Bytes(hello), Eq("Hello"));
absl::string_view hello_view = hello;
EXPECT_THAT(Bytes(hello_view), Eq("Hello"));
}
TEST(BytesTest, CopyAndMoveConstructors) {
static_assert(std::is_nothrow_move_constructible<Bytes>::value);
Bytes src("Google");
Bytes copied(src);
EXPECT_THAT(copied, Eq(src));
Bytes moved(std::move(src));
EXPECT_THAT(moved, Eq(copied));
}
TEST(BytesTest, CopyAndMoveAssignment) {
static_assert(std::is_nothrow_move_assignable<Bytes>::value);
Bytes src("Google");
Bytes copied = src;
EXPECT_THAT(copied, Eq(src));
Bytes moved = std::move(src);
EXPECT_THAT(moved, Eq(copied));
}
TEST(BytesTest, AssignmentFromString) {
std::string google = "Google";
{
Bytes val("x");
val = "Google";
EXPECT_THAT(val, Eq(google));
}
{
Bytes val("x");
val = google;
EXPECT_THAT(val, Eq(google));
}
{
absl::string_view google_view = google;
Bytes val("x");
val = google_view;
EXPECT_THAT(val, Eq("Google"));
}
{
Bytes val("x");
val = std::move(google);
EXPECT_THAT(val, Eq("Google"));
}
}
TEST(BytesTest, Repr) {
EXPECT_THAT(GenReprToken(Bytes("G'\"\t\xff")),
ReprTokenEq(R"(b'G\'\"\t\xff')"));
EXPECT_THAT(Repr(Bytes(std::string(1024, 'x'))),
MatchesRegex(R"(b'x{120}[.]{3} \(1024 bytes total\)')"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/bytes.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/bytes_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
6f7e1c3b-f30a-4c73-bbc3-a36012a5b579 | cpp | tensorflow/tensorflow | gcs_dns_cache | third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_dns_cache.cc | third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_dns_cache_test.cc | #include "tsl/platform/cloud/gcs_dns_cache.h"
#include <cstring>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/retrying_utils.h"
#include "tsl/platform/status.h"
#ifndef _WIN32
#include <arpa/inet.h>
#include <netdb.h>
#include <netinet/in.h>
#include <sys/socket.h>
#else
#include <Windows.h>
#include <winsock2.h>
#include <ws2tcpip.h>
#endif
#include <sys/types.h>
namespace tsl {
namespace {
const std::vector<string>& kCachedDomainNames =
*new std::vector<string>{"www.googleapis.com", "storage.googleapis.com"};
inline void print_getaddrinfo_error(const string& name,
absl::Status return_status) {
LOG(ERROR) << "Error resolving " << name << ": " << return_status;
}
template <typename T>
const T& SelectRandomItemUniform(std::default_random_engine* random,
const std::vector<T>& items) {
CHECK_GT(items.size(), 0);
std::uniform_int_distribution<size_t> distribution(0u, items.size() - 1u);
size_t choice_index = distribution(*random);
return items[choice_index];
}
}
GcsDnsCache::GcsDnsCache(Env* env, int64_t refresh_rate_secs)
: env_(env), refresh_rate_secs_(refresh_rate_secs) {}
void GcsDnsCache::AnnotateRequest(HttpRequest* request) {
mutex_lock l(mu_);
if (!started_) {
VLOG(1) << "Starting GCS DNS cache.";
DCHECK(!worker_) << "Worker thread already exists!";
addresses_ = ResolveNames(kCachedDomainNames);
worker_.reset(env_->StartThread({}, "gcs_dns_worker",
[this]() { return WorkerThread(); }));
started_ = true;
}
CHECK_EQ(kCachedDomainNames.size(), addresses_.size());
for (size_t i = 0; i < kCachedDomainNames.size(); ++i) {
const string& name = kCachedDomainNames[i];
const std::vector<string>& addresses = addresses_[i];
if (!addresses.empty()) {
const string& chosen_address =
SelectRandomItemUniform(&random_, addresses);
request->AddResolveOverride(name, 443, chosen_address);
VLOG(1) << "Annotated DNS mapping: " << name << " --> " << chosen_address;
} else {
LOG(WARNING) << "No IP addresses available for " << name;
}
}
}
std::vector<string> GcsDnsCache::ResolveName(const string& name) {
VLOG(1) << "Resolving DNS name: " << name;
addrinfo hints;
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
addrinfo* result = nullptr;
RetryConfig retryConfig(
5000,
50 * 1000 * 5000,
5);
const absl::Status getaddrinfo_status = RetryingUtils::CallWithRetries(
[&name, &hints, &result]() {
int return_code = getaddrinfo(name.c_str(), nullptr, &hints, &result);
absl::Status return_status;
switch (return_code) {
case 0:
return_status = absl::OkStatus();
break;
#ifndef _WIN32
case EAI_ADDRFAMILY:
case EAI_SERVICE:
case EAI_SOCKTYPE:
case EAI_NONAME:
return_status = absl::FailedPreconditionError(
absl::StrCat("System in invalid state for getaddrinfo call: ",
gai_strerror(return_code)));
break;
case EAI_AGAIN:
case EAI_NODATA:
return_status = absl::UnavailableError(absl::StrCat(
"Resolving ", name, " is temporarily unavailable"));
break;
case EAI_BADFLAGS:
case EAI_FAMILY:
return_status = absl::InvalidArgumentError(absl::StrCat(
"Bad arguments for getaddrinfo: ", gai_strerror(return_code)));
break;
case EAI_FAIL:
return_status = absl::NotFoundError(
absl::StrCat("Permanent failure resolving ", name, ": ",
gai_strerror(return_code)));
break;
case EAI_MEMORY:
return_status = absl::ResourceExhaustedError("Out of memory");
break;
case EAI_SYSTEM:
default:
return_status = absl::UnknownError(strerror(return_code));
#else
case WSATYPE_NOT_FOUND:
case WSAESOCKTNOSUPPORT:
case WSAHOST_NOT_FOUND:
return_status = absl::FailedPreconditionError(
absl::StrCat("System in invalid state for getaddrinfo call: ",
gai_strerror(return_code)));
break;
case WSATRY_AGAIN:
return_status = absl::UnavailableError(absl::StrCat(
"Resolving ", name, " is temporarily unavailable"));
break;
case WSAEINVAL:
case WSAEAFNOSUPPORT:
return_status = absl::InvalidArgumentError(absl::StrCat(
"Bad arguments for getaddrinfo: ", gai_strerror(return_code)));
break;
case WSANO_RECOVERY:
return_status = absl::NotFoundError(
absl::StrCat("Permanent failure resolving ", name, ": ",
gai_strerror(return_code)));
break;
case WSA_NOT_ENOUGH_MEMORY:
return_status = absl::ResourceExhaustedError("Out of memory");
break;
default:
return_status = absl::UnknownError(strerror(return_code));
#endif
}
return absl::Status(return_status);
},
retryConfig);
std::vector<string> output;
if (getaddrinfo_status.ok()) {
for (const addrinfo* i = result; i != nullptr; i = i->ai_next) {
if (i->ai_family != AF_INET || i->ai_addr->sa_family != AF_INET) {
LOG(WARNING) << "Non-IPv4 address returned. ai_family: " << i->ai_family
<< ". sa_family: " << i->ai_addr->sa_family << ".";
continue;
}
char buf[INET_ADDRSTRLEN];
void* address_ptr =
&(reinterpret_cast<sockaddr_in*>(i->ai_addr)->sin_addr);
const char* formatted = nullptr;
if ((formatted = inet_ntop(i->ai_addr->sa_family, address_ptr, buf,
INET_ADDRSTRLEN)) == nullptr) {
LOG(ERROR) << "Error converting response to IP address for " << name
<< ": " << strerror(errno);
} else {
output.emplace_back(buf);
VLOG(1) << "... address: " << buf;
}
}
} else {
print_getaddrinfo_error(name, getaddrinfo_status);
}
if (result != nullptr) {
freeaddrinfo(result);
}
return output;
}
std::vector<std::vector<string>> GcsDnsCache::ResolveNames(
const std::vector<string>& names) {
std::vector<std::vector<string>> all_addresses;
all_addresses.reserve(names.size());
for (const string& name : names) {
all_addresses.push_back(ResolveName(name));
}
return all_addresses;
}
void GcsDnsCache::WorkerThread() {
while (true) {
{
mutex_lock l(mu_);
if (cancelled_) return;
cond_var_.wait_for(l, std::chrono::seconds(refresh_rate_secs_));
if (cancelled_) return;
}
auto new_addresses = ResolveNames(kCachedDomainNames);
{
mutex_lock l(mu_);
addresses_.swap(new_addresses);
}
}
}
} | #include "tsl/platform/cloud/gcs_dns_cache.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/test.h"
namespace tsl {
class TestHttpRequest : public HttpRequest {
public:
void SetUri(const string& uri) override {}
void SetRange(uint64 start, uint64 end) override {}
void AddHeader(const string& name, const string& value) override {}
void AddResolveOverride(const string& hostname, int64_t port,
const string& ip_addr) override {
EXPECT_EQ(port, 443) << "Unexpected port set for hostname: " << hostname;
auto itr = resolve_overrides_.find(hostname);
EXPECT_EQ(itr, resolve_overrides_.end())
<< "Hostname " << hostname << "already in map: " << itr->second;
resolve_overrides_.insert(
std::map<string, string>::value_type(hostname, ip_addr));
}
void AddAuthBearerHeader(const string& auth_token) override {}
void SetRequestStats(HttpRequest::RequestStats* stats) override {}
void SetDeleteRequest() override {}
absl::Status SetPutFromFile(const string& body_filepath,
size_t offset) override {
return absl::OkStatus();
}
void SetPutEmptyBody() override {}
void SetPostFromBuffer(const char* buffer, size_t size) override {}
void SetPostEmptyBody() override {}
void SetResultBuffer(std::vector<char>* out_buffer) override {}
void SetResultBufferDirect(char* buffer, size_t size) override {}
size_t GetResultBufferDirectBytesTransferred() override { return 0; }
string GetResponseHeader(const string& name) const override { return ""; }
uint64 GetResponseCode() const override { return 0; }
absl::Status Send() override { return absl::OkStatus(); }
string EscapeString(const string& str) override { return ""; }
void SetTimeouts(uint32 connection, uint32 inactivity,
uint32 total) override {}
std::map<string, string> resolve_overrides_;
};
class GcsDnsCacheTest : public ::testing::Test {
protected:
void ResolveNameTest() {
auto response = GcsDnsCache::ResolveName("www.googleapis.com");
EXPECT_LT(1, response.size()) << absl::StrJoin(response, ", ");
}
void AnnotateRequestTest() {
GcsDnsCache d;
{
mutex_lock l(d.mu_);
d.started_ = true;
d.addresses_ = {{"192.168.1.1"}, {"172.134.1.1"}};
}
TestHttpRequest req;
d.AnnotateRequest(&req);
EXPECT_EQ("192.168.1.1", req.resolve_overrides_["www.googleapis.com"]);
EXPECT_EQ("172.134.1.1", req.resolve_overrides_["storage.googleapis.com"]);
}
void SuccessfulCleanupTest() {
GcsDnsCache d;
TestHttpRequest req;
d.AnnotateRequest(&req);
}
};
TEST_F(GcsDnsCacheTest, AnnotateRequest) { AnnotateRequestTest(); }
TEST_F(GcsDnsCacheTest, SuccessfulCleanup) { SuccessfulCleanupTest(); }
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_dns_cache.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/gcs_dns_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a3080d71-6a4e-44ac-b11e-7ca949bc2bb5 | cpp | tensorflow/tensorflow | example_parser_configuration | tensorflow/core/example/example_parser_configuration.cc | tensorflow/core/example/example_parser_configuration_test.cc | #include "tensorflow/core/example/example_parser_configuration.h"
#include <vector>
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
Status FindNodeIndexByName(const tensorflow::GraphDef& graph,
const string& node_name, int* node_idx) {
for (int i = 0; i < graph.node_size(); ++i) {
const auto& node = graph.node(i);
if (node.name() == node_name) {
*node_idx = i;
return absl::OkStatus();
}
}
return errors::InvalidArgument(node_name, " not found in GraphDef");
}
Status ExtractExampleParserConfiguration(
const tensorflow::GraphDef& graph, const string& node_name,
tensorflow::Session* session,
std::vector<FixedLenFeature>* fixed_len_features,
std::vector<VarLenFeature>* var_len_features) {
int node_idx;
TF_RETURN_IF_ERROR(FindNodeIndexByName(graph, node_name, &node_idx));
const auto& node = graph.node(node_idx);
if (node.op() != "ParseExample") {
return errors::InvalidArgument(node_name, " node is not a ParseExample op");
}
auto& attr_map = node.attr();
auto num_sparse = attr_map.at("Nsparse").i();
auto num_dense = attr_map.at("Ndense").i();
fixed_len_features->resize(num_dense);
var_len_features->resize(num_sparse);
auto tdense = attr_map.at("Tdense");
auto dense_shapes = attr_map.at("dense_shapes");
auto sparse_types = attr_map.at("sparse_types");
if (tdense.list().type_size() != num_dense) {
return errors::InvalidArgument("Node attr Tdense has ",
tdense.list().type_size(),
" elements != Ndense attr: ", num_dense);
}
if (dense_shapes.list().shape_size() != num_dense) {
return errors::InvalidArgument("Node attr dense_shapes has ",
dense_shapes.list().shape_size(),
" elements != Ndense attr: ", num_dense);
}
if (sparse_types.list().type_size() != num_sparse) {
return errors::InvalidArgument("Node attr sparse_types has ",
sparse_types.list().type_size(),
" elements != NSparse attr: ", num_sparse);
}
for (int i = 0; i < tdense.list().type_size(); ++i) {
(*fixed_len_features)[i].dtype = tdense.list().type(i);
(*fixed_len_features)[i].shape = TensorShape(dense_shapes.list().shape(i));
}
for (int i = 0; i < sparse_types.list().type_size(); ++i) {
(*var_len_features)[i].dtype = sparse_types.list().type(i);
}
std::vector<string> fetch_names(node.input_size() - 1);
for (int i = 1; i < node.input_size(); ++i) {
fetch_names[i - 1] = node.input(i);
}
std::vector<Tensor> op_input_tensors;
TF_RETURN_IF_ERROR(session->Run({},
fetch_names, {},
&op_input_tensors));
int sparse_keys_start = 1;
int dense_keys_start = sparse_keys_start + num_sparse;
int dense_defaults_start = dense_keys_start + num_dense;
for (int i = 0; i < num_sparse; ++i) {
int input_idx = sparse_keys_start + i;
(*var_len_features)[i].key =
op_input_tensors[input_idx].scalar<tstring>()();
}
for (int i = 0; i < num_dense; ++i) {
FixedLenFeature& config = (*fixed_len_features)[i];
int dense_keys_offset = dense_keys_start + i;
config.key = op_input_tensors[dense_keys_offset].scalar<tstring>()();
int defaults_offset = dense_defaults_start + i;
config.default_value = op_input_tensors[defaults_offset];
}
int sparse_indices_output_start = 0;
int sparse_values_output_start = sparse_indices_output_start + num_sparse;
int sparse_shapes_output_start = sparse_values_output_start + num_sparse;
int dense_values_output_start = sparse_shapes_output_start + num_sparse;
string node_output_prefix = strings::StrCat(node_name, ":");
for (int i = 0; i < num_sparse; ++i) {
VarLenFeature& config = (*var_len_features)[i];
int indices_offset = sparse_indices_output_start + i;
config.indices_output_tensor_name =
strings::StrCat(node_output_prefix, indices_offset);
int values_offset = sparse_values_output_start + i;
config.values_output_tensor_name =
strings::StrCat(node_output_prefix, values_offset);
int shapes_offset = sparse_shapes_output_start + i;
config.shapes_output_tensor_name =
strings::StrCat(node_output_prefix, shapes_offset);
}
for (int i = 0; i < num_dense; ++i) {
int output_idx = dense_values_output_start + i;
(*fixed_len_features)[i].values_output_tensor_name =
strings::StrCat(node_output_prefix, output_idx);
}
return absl::OkStatus();
}
Status ExampleParserConfigurationProtoToFeatureVectors(
const ExampleParserConfiguration& config_proto,
std::vector<FixedLenFeature>* fixed_len_features,
std::vector<VarLenFeature>* var_len_features) {
const auto& feature_map = config_proto.feature_map();
for (auto it = feature_map.cbegin(); it != feature_map.cend(); ++it) {
string key = it->first;
const auto& config = it->second;
if (config.has_fixed_len_feature()) {
const auto& fixed_config = config.fixed_len_feature();
FixedLenFeature f;
f.key = key;
f.dtype = fixed_config.dtype();
f.shape = TensorShape(fixed_config.shape());
Tensor default_value(f.dtype, f.shape);
if (!default_value.FromProto(fixed_config.default_value())) {
return errors::InvalidArgument(
"Invalid default_value in config proto ",
fixed_config.default_value().DebugString());
}
f.default_value = default_value;
f.values_output_tensor_name = fixed_config.values_output_tensor_name();
fixed_len_features->push_back(f);
} else {
const auto& var_len_config = config.var_len_feature();
VarLenFeature v;
v.key = key;
v.dtype = var_len_config.dtype();
v.values_output_tensor_name = var_len_config.values_output_tensor_name();
v.indices_output_tensor_name =
var_len_config.indices_output_tensor_name();
v.shapes_output_tensor_name = var_len_config.shapes_output_tensor_name();
var_len_features->push_back(v);
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/example/example_parser_configuration.h"
#include <memory>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/example_proto_helper.h"
namespace tensorflow {
namespace {
void ReadFileToStringOrDie(Env* env, const string& filename, string* output) {
TF_CHECK_OK(ReadFileToString(env, filename, output));
}
std::unique_ptr<Session> CreateSession() {
SessionOptions options;
(*options.config.mutable_device_count())["CPU"] = 2;
return std::unique_ptr<Session>(NewSession(options));
}
class ExtractExampleParserConfigurationTest : public ::testing::Test {
protected:
void SetUp() override {
string proto_string;
string filename =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/example/testdata/parse_example_graph_def.pbtxt");
ReadFileToStringOrDie(Env::Default(), filename, &proto_string);
protobuf::TextFormat::ParseFromString(proto_string, &graph_def_);
session_ = CreateSession();
TF_CHECK_OK(session_->Create(graph_def_));
}
NodeDef* parse_example_node() {
for (auto& node : *graph_def_.mutable_node()) {
if (node.name() == "ParseExample/ParseExample") {
return &node;
}
}
return nullptr;
}
GraphDef graph_def_;
std::unique_ptr<Session> session_;
};
TEST_F(ExtractExampleParserConfigurationTest, OpNotFound) {
std::vector<FixedLenFeature> dense_vec;
std::vector<VarLenFeature> sparse_vec;
Status status = ExtractExampleParserConfiguration(
graph_def_, "BlarseExample/ParseExample", session_.get(), &dense_vec,
&sparse_vec);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
}
TEST_F(ExtractExampleParserConfigurationTest, InconsistentAttrNsparse) {
std::vector<FixedLenFeature> dense_vec;
std::vector<VarLenFeature> sparse_vec;
NodeDef* node = parse_example_node();
auto mutable_attr = node->mutable_attr();
(*mutable_attr)["Nsparse"].set_i(3);
Status status = ExtractExampleParserConfiguration(
graph_def_, "ParseExample/ParseExample", session_.get(), &dense_vec,
&sparse_vec);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
}
TEST_F(ExtractExampleParserConfigurationTest, InconsistentAttrNdense) {
std::vector<FixedLenFeature> dense_vec;
std::vector<VarLenFeature> sparse_vec;
NodeDef* node = parse_example_node();
auto mutable_attr = node->mutable_attr();
(*mutable_attr)["Ndense"].set_i(2);
Status status = ExtractExampleParserConfiguration(
graph_def_, "ParseExample/ParseExample", session_.get(), &dense_vec,
&sparse_vec);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
}
TEST_F(ExtractExampleParserConfigurationTest, Basic) {
std::vector<FixedLenFeature> dense_vec;
std::vector<VarLenFeature> sparse_vec;
Status status = ExtractExampleParserConfiguration(
graph_def_, "ParseExample/ParseExample", session_.get(), &dense_vec,
&sparse_vec);
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(2, sparse_vec.size());
EXPECT_EQ(3, dense_vec.size());
EXPECT_EQ("sf0", sparse_vec[0].key);
EXPECT_EQ(DT_STRING, sparse_vec[0].dtype);
EXPECT_EQ("ParseExample/ParseExample:0",
sparse_vec[0].indices_output_tensor_name);
EXPECT_EQ("ParseExample/ParseExample:2",
sparse_vec[0].values_output_tensor_name);
EXPECT_EQ("ParseExample/ParseExample:4",
sparse_vec[0].shapes_output_tensor_name);
EXPECT_EQ("sf1", sparse_vec[1].key);
EXPECT_EQ(DT_STRING, sparse_vec[1].dtype);
EXPECT_EQ("ParseExample/ParseExample:1",
sparse_vec[1].indices_output_tensor_name);
EXPECT_EQ("ParseExample/ParseExample:3",
sparse_vec[1].values_output_tensor_name);
EXPECT_EQ("ParseExample/ParseExample:5",
sparse_vec[1].shapes_output_tensor_name);
EXPECT_EQ("x", dense_vec[0].key);
EXPECT_EQ(DT_FLOAT, dense_vec[0].dtype);
EXPECT_EQ("ParseExample/ParseExample:6",
dense_vec[0].values_output_tensor_name);
EXPECT_EQ("y", dense_vec[1].key);
EXPECT_EQ(DT_FLOAT, dense_vec[1].dtype);
EXPECT_EQ("ParseExample/ParseExample:7",
dense_vec[1].values_output_tensor_name);
EXPECT_EQ("z", dense_vec[2].key);
EXPECT_EQ(DT_FLOAT, dense_vec[2].dtype);
EXPECT_EQ("ParseExample/ParseExample:8",
dense_vec[2].values_output_tensor_name);
}
static const char kExampleParseConfigurationProto[] = R"( feature_map {
key: "x"
value {
fixed_len_feature {
dtype: DT_FLOAT
shape {
dim {
size: 1
}
}
default_value {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 1
}
}
float_val: 33.0
}
values_output_tensor_name: "ParseExample/ParseExample:3"
}
}
}
feature_map {
key: "y"
value {
var_len_feature {
dtype: DT_STRING
values_output_tensor_name: "ParseExample/ParseExample:1"
indices_output_tensor_name: "ParseExample/ParseExample:0"
shapes_output_tensor_name: "ParseExample/ParseExample:2"
}
}
}
)";
class ExampleParserConfigurationProtoToFeatureVectorsTest
: public ::testing::Test {
protected:
void SetUp() override {
CHECK(protobuf::TextFormat::ParseFromString(kExampleParseConfigurationProto,
&config_proto_));
}
ExampleParserConfiguration config_proto_;
};
TEST_F(ExampleParserConfigurationProtoToFeatureVectorsTest, Basic) {
std::vector<FixedLenFeature> fixed_len_features;
std::vector<VarLenFeature> var_len_features;
TF_ASSERT_OK(ExampleParserConfigurationProtoToFeatureVectors(
config_proto_, &fixed_len_features, &var_len_features));
ASSERT_EQ(1, fixed_len_features.size());
ASSERT_EQ(1, var_len_features.size());
const FixedLenFeature& f = fixed_len_features[0];
ASSERT_EQ(DT_FLOAT, f.dtype);
ASSERT_EQ("x", f.key);
ASSERT_EQ("ParseExample/ParseExample:3", f.values_output_tensor_name);
TensorShape expected_shape({1});
ASSERT_EQ(expected_shape.dims(), f.shape.dims());
ASSERT_EQ(1, f.shape.dim_size(0));
Tensor expected_default(DT_FLOAT, TensorShape({1}));
test::FillIota<float>(&expected_default, 33.0);
test::ExpectTensorEqual<float>(expected_default, f.default_value);
const VarLenFeature& v = var_len_features[0];
ASSERT_EQ(DT_STRING, v.dtype);
ASSERT_EQ("ParseExample/ParseExample:0", v.indices_output_tensor_name);
ASSERT_EQ("ParseExample/ParseExample:1", v.values_output_tensor_name);
ASSERT_EQ("ParseExample/ParseExample:2", v.shapes_output_tensor_name);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/example/example_parser_configuration.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/example/example_parser_configuration_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8a516fea-af24-401e-b978-de5e7f147671 | cpp | tensorflow/tensorflow | tensor_dataset_op | tensorflow/core/kernels/data/tensor_dataset_op.cc | tensorflow/core/kernels/data/tensor_dataset_op_test.cc | #include "tensorflow/core/kernels/data/tensor_dataset_op.h"
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const TensorDatasetOp::kDatasetType;
constexpr const char* const TensorDatasetOp::kComponents;
constexpr const char* const TensorDatasetOp::kToutput_types;
constexpr const char* const TensorDatasetOp::kOutputShapes;
constexpr char kFromTensor[] = "FromTensor";
constexpr char kProduced[] = "produced";
class TensorDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, std::vector<Tensor> tensors)
: DatasetBase(DatasetContext(ctx)), tensors_(std::move(tensors)) {
dtypes_.reserve(tensors_.size());
shapes_.reserve(tensors_.size());
for (const Tensor& t : tensors_) {
dtypes_.push_back(t.dtype());
shapes_.emplace_back(t.shape().dim_sizes());
}
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kFromTensor, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
split_providers->push_back(std::make_unique<IndexSplitProvider>(1));
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override { return dtypes_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return 1LL;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
return Get(AnyContext(ctx), index, out_tensors);
}
Status Get(AnyContext ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
*out_tensors = tensors_;
return absl::OkStatus();
}
absl::Status RandomIndexingCompatible() const override {
return absl::OkStatus();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
std::vector<Node*> components;
components.reserve(tensors_.size());
for (const Tensor& t : tensors_) {
Node* node;
if (!ctx->is_graph_rewrite()) {
TF_RETURN_IF_ERROR(b->AddDatasetOrTensor(ctx, t, &node));
} else {
TF_RETURN_IF_ERROR(b->AddPlaceholder(t, &node));
DCHECK_NE(ctx->input_list(), nullptr);
ctx->input_list()->emplace_back(node->name(), t);
}
components.emplace_back(node);
}
AttrValue dtypes;
b->BuildAttrValue(dtypes_, &dtypes);
TF_RETURN_IF_ERROR(b->AddDataset(this, {}, {{0, components}},
{{kToutput_types, dtypes}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
produced_(false),
global_shuffle_iterator_(dataset()) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
if (!ctx->split_providers().empty()) {
TF_ASSIGN_OR_RETURN(split_provider_,
GetSingleSplitProvider(ctx, dataset()));
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper() != nullptr) {
return global_shuffle_iterator_.GetNext(ctx, out_tensors,
end_of_sequence);
}
mutex_lock l(mu_);
if (split_provider_) {
bool end_of_splits;
Tensor split;
TF_RETURN_IF_ERROR(split_provider_->GetNext(&split, &end_of_splits));
if (end_of_splits) {
produced_ = true;
}
}
if (!produced_) {
*out_tensors = dataset()->tensors_;
produced_ = true;
*end_of_sequence = false;
return absl::OkStatus();
} else {
*end_of_sequence = true;
return absl::OkStatus();
}
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kProduced,
static_cast<int64_t>(produced_)));
TF_RETURN_IF_ERROR(global_shuffle_iterator_.Save(prefix(), ctx, writer));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
return global_shuffle_iterator_.Restore(prefix(), ctx, reader);
}
mutex_lock l(mu_);
int64_t produced;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kProduced, &produced));
produced_ = static_cast<bool>(produced);
return absl::OkStatus();
}
private:
mutex mu_;
std::shared_ptr<SplitProvider> split_provider_;
bool produced_ TF_GUARDED_BY(mu_);
GlobalShuffleIterator global_shuffle_iterator_;
};
const std::vector<Tensor> tensors_;
DataTypeVector dtypes_;
std::vector<PartialTensorShape> shapes_;
};
TensorDatasetOp::TensorDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kToutput_types, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void TensorDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) {
OpInputList inputs;
OP_REQUIRES_OK(ctx, ctx->input_list(kComponents, &inputs));
std::vector<Tensor> components(inputs.begin(), inputs.end());
*output = new Dataset(ctx, std::move(components));
OP_REQUIRES_OK(ctx,
VerifyTypesMatch((*output)->output_dtypes(), output_types_));
OP_REQUIRES_OK(
ctx, VerifyShapesCompatible((*output)->output_shapes(), output_shapes_));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("TensorDataset").Device(DEVICE_CPU),
TensorDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/tensor_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "tensor_dataset";
class TensorDatasetParams : public DatasetParams {
public:
TensorDatasetParams(std::vector<Tensor> components, string node_name)
: DatasetParams(TensorDtypes(components), TensorShapes(components),
std::move(node_name)),
components_(std::move(components)) {}
std::vector<Tensor> GetInputTensors() const override { return components_; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->reserve(components_.size());
for (int i = 0; i < components_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(TensorDatasetOp::kComponents, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"Toutput_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override { return TensorDatasetOp::kDatasetType; }
private:
DataTypeVector TensorDtypes(const std::vector<Tensor>& input_components) {
DataTypeVector dtypes;
for (const auto& component : input_components) {
dtypes.emplace_back(component.dtype());
}
return dtypes;
}
std::vector<PartialTensorShape> TensorShapes(
const std::vector<Tensor>& input_components) {
std::vector<PartialTensorShape> shapes;
for (const auto& component : input_components) {
shapes.emplace_back(component.shape());
}
return shapes;
}
public:
std::vector<Tensor> components_;
};
class TensorDatasetOpTest : public DatasetOpsTestBase {};
std::vector<Tensor> PlainTensors() {
return {CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({1, 3}), {1, 2, 3}),
CreateTensor<double>(TensorShape({}), {37.0}),
CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})};
}
TensorDatasetParams PlainTensorDatasetParams() {
return {PlainTensors(),
kNodeName};
}
TensorDatasetParams NestedTensorDatasetParams() {
return {
{CreateTensor<Variant>(TensorShape({}),
{CreateTensor<double>(TensorShape({2, 2}),
{1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({1, 3}), {1, 2, 3})},
kNodeName};
}
std::vector<GetNextTestCase<TensorDatasetParams>> GetNextTestCases() {
return {{PlainTensorDatasetParams(),
PlainTensors()},
{NestedTensorDatasetParams(),
{CreateTensor<Variant>(TensorShape({}),
{CreateTensor<double>(TensorShape({2, 2}),
{1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({1, 3}), {1, 2, 3})}}};
}
class ParameterizedGetNextTest : public TensorDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<TensorDatasetParams>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence));
ASSERT_FALSE(end_of_sequence);
EXPECT_EQ(out_tensors.size(), test_case.expected_outputs.size());
for (int i = 0; i < out_tensors.size(); ++i) {
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output = out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case.expected_outputs[i].scalar<Variant>()().get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(out_tensors[i], test_case.expected_outputs[i]));
}
}
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence));
EXPECT_TRUE(end_of_sequence);
EXPECT_TRUE(out_tensors.empty());
}
INSTANTIATE_TEST_CASE_P(TensorDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(TensorDatasetOpTest, DatasetTypeString) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(TensorDatasetOp::kDatasetType)));
}
TEST_F(TensorDatasetOpTest, DatasetNodeName) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(TensorDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(TensorDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes(dataset_params.output_shapes()));
}
TEST_F(TensorDatasetOpTest, Cardinality) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(1));
}
TEST_F(TensorDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(TensorDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes(dataset_params.output_shapes()));
}
TEST_F(TensorDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
"FromTensor", dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<TensorDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{PlainTensorDatasetParams(),
{0, 1, 2},
PlainTensors()},
{NestedTensorDatasetParams(),
{0, 1, 2},
{CreateTensor<Variant>(TensorShape({}),
{CreateTensor<double>(TensorShape({2, 2}),
{1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({1, 3}), {1, 2, 3})}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public TensorDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<TensorDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
int cur_iteration = 0;
const std::vector<int>& breakpoints = test_case.breakpoints;
int cardinality = 1;
for (int breakpoint : breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration <= breakpoint) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
cur_iteration++;
}
if (breakpoint >= cardinality) {
EXPECT_TRUE(end_of_sequence);
} else {
EXPECT_FALSE(end_of_sequence);
}
}
EXPECT_EQ(out_tensors.size(), test_case.expected_outputs.size());
for (int i = 0; i < out_tensors.size(); ++i) {
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output = out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case.expected_outputs[i].scalar<Variant>()().get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(out_tensors[i], test_case.expected_outputs[i]));
}
}
}
INSTANTIATE_TEST_CASE_P(TensorDatasetOpTest,
ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
TEST_F(TensorDatasetOpTest, Splitting) {
auto params = PlainTensorDatasetParams();
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, PlainTensors()));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 3, 2,
CreateTensors<int64_t>(TensorShape({}), {})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 3, 0,
PlainTensors()));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/tensor_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/tensor_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aad3ff49-0c47-43f8-9f0d-aa4acb4eee14 | cpp | google/arolla | restricted_operator | arolla/expr/operators/restricted_operator.cc | arolla/expr/operators/restricted_operator_test.cc | #include "arolla/expr/operators/restricted_operator.h"
#include <memory>
#include <utility>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/operators/type_meta_eval_strategies.h"
#include "arolla/expr/qtype_utils.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr_operators {
namespace {
using ::arolla::expr::ExprAttributes;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::ExprOperator;
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::ExprOperatorSignature;
using ::arolla::expr::GetAttrQTypes;
using ::arolla::expr::HasAllAttrQTypes;
using ::arolla::expr::WithNewOperator;
class RestrictedOp final : public ExprOperator {
public:
RestrictedOp(ExprOperatorPtr wrapped_op, type_meta::Strategy restriction)
: ExprOperator(wrapped_op->display_name(),
FingerprintHasher("::arolla::expr_operators::RestrictedOp")
.Combine(wrapped_op)
.Finish()),
wrapped_op_(std::move(wrapped_op)),
restriction_(std::move(restriction)) {}
absl::StatusOr<ExprOperatorSignature> GetSignature() const final {
return wrapped_op_->GetSignature();
}
absl::StatusOr<ExprNodePtr> ToLowerLevel(
const ExprNodePtr& node) const final {
if (!node->qtype()) {
return node;
}
ASSIGN_OR_RETURN(auto unwrapped_node, WithNewOperator(node, wrapped_op_));
return wrapped_op_->ToLowerLevel(unwrapped_node);
}
absl::StatusOr<ExprAttributes> InferAttributes(
absl::Span<const ExprAttributes> inputs) const final {
if (!HasAllAttrQTypes(inputs)) {
return ExprAttributes{};
}
RETURN_IF_ERROR(restriction_(GetAttrQTypes(inputs)).status())
<< "in restriction for " << display_name() << " operator";
return wrapped_op_->InferAttributes(inputs);
}
private:
ExprOperatorPtr wrapped_op_;
type_meta::Strategy restriction_;
};
}
ExprOperatorPtr RestrictOperator(ExprOperatorPtr wrapped_op,
type_meta::Strategy restriction) {
return std::make_shared<RestrictedOp>(std::move(wrapped_op),
std::move(restriction));
}
absl::StatusOr<ExprOperatorPtr> RestrictOperator(
absl::StatusOr<ExprOperatorPtr> wrapped_op,
absl::StatusOr<type_meta::Strategy> restriction) {
RETURN_IF_ERROR(wrapped_op.status());
RETURN_IF_ERROR(restriction.status());
return RestrictOperator(*wrapped_op, *restriction);
}
} | #include "arolla/expr/operators/restricted_operator.h"
#include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/expr/eval/invoke.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/operators/type_meta_eval_strategies.h"
#include "arolla/expr/overloaded_expr_operator.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/testing/qtype.h"
namespace arolla::expr_operators {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::expr::CallOp;
using ::arolla::expr::Literal;
using ::arolla::expr_operators::type_meta::Floating;
using ::arolla::expr_operators::type_meta::Integral;
using ::arolla::testing::InvokeExprOperator;
using ::arolla::testing::TypedValueWith;
using ::testing::Eq;
using ::testing::HasSubstr;
TEST(RestrictedOperatorTest, RestrictSimpleOperator) {
ASSERT_OK_AND_ASSIGN(
auto add_ints_op,
RestrictOperator(expr::LookupOperator("math.add"), Integral));
ASSERT_OK_AND_ASSIGN(
auto add_ints,
CallOp(add_ints_op, {Literal<int64_t>(50), Literal<int64_t>(7)}));
EXPECT_THAT(add_ints->qtype(), Eq(GetQType<int64_t>()));
EXPECT_THAT(expr::Invoke(add_ints, {}),
IsOkAndHolds(TypedValueWith<int64_t>(57)));
EXPECT_THAT(
CallOp(add_ints_op, {Literal<float>(50), Literal<float>(7)}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr(
"expected all arguments to be integral, but got FLOAT32 for "
"0-th argument; in restriction for math.add operator")));
}
TEST(RestrictedOperatorTest, WorksWithOverloadedOperator) {
ASSERT_OK_AND_ASSIGN(
auto add_or_mul,
expr::MakeOverloadedOperator(
"test.add_or_mul",
RestrictOperator(expr::LookupOperator("math.add"), Integral),
RestrictOperator(expr::LookupOperator("math.multiply"), Floating)));
EXPECT_THAT(InvokeExprOperator<int32_t>(add_or_mul, 50, 7), IsOkAndHolds(57));
EXPECT_THAT(InvokeExprOperator<float>(add_or_mul, 3.f, 19.f),
IsOkAndHolds(57.f));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/restricted_operator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/restricted_operator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
5e3dedee-df81-4a6a-9c4f-321c9ec535fd | cpp | tensorflow/tensorflow | abs | tensorflow/lite/experimental/shlo/ops/abs.cc | tensorflow/lite/delegates/xnnpack/abs_test.cc | #include "tensorflow/lite/experimental/shlo/ops/abs.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Abs {
template <class T>
T operator()(const T& val) {
return val < static_cast<T>(0) ? static_cast<T>(-val) : val;
}
};
AbsOp Create(typename AbsOp::Attributes) { return AbsOp{}; }
absl::Status Prepare(AbsOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("abs"), input,
IsSignedIntTensor, IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("abs"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(AbsOp& op, const Tensor& input, Tensor& output) {
Abs abs;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), abs, input,
output)
} else if (IsSignedIntTensor(input) || IsFloatTensor(input)) {
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
input.tensor_element_type(), abs, input, output);
}
return absl::FailedPreconditionError("Unsupported tensor type.");
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Abs, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_ABS, xnnpack_delegate.get());
}
TEST(Abs, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_ABS, xnnpack_delegate.get());
}
TEST(Abs, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_ABS, xnnpack_delegate.get());
}
TEST(Abs, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_ABS,
xnnpack_delegate.get());
}
TEST(Abs, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_ABS, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/abs.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/abs_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5df7aab9-5757-4f4b-8261-a448a8554408 | cpp | tensorflow/tensorflow | indexing_map | third_party/xla/xla/service/gpu/model/indexing_map.cc | third_party/xla/xla/service/gpu/model/indexing_map_test.cc | #include "xla/service/gpu/model/indexing_map.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <limits>
#include <numeric>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/base/optimization.h"
#include "absl/log/check.h"
#include "absl/numeric/int128.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
namespace {
using llvm::ArrayRef;
using llvm::SmallBitVector;
using llvm::SmallVector;
using mlir::AffineBinaryOpExpr;
using mlir::AffineConstantExpr;
using mlir::AffineDimExpr;
using mlir::AffineExpr;
using mlir::AffineExprKind;
using mlir::AffineMap;
using mlir::AffineSymbolExpr;
using mlir::getAffineConstantExpr;
using mlir::getAffineDimExpr;
using mlir::MLIRContext;
AffineExpr GetLhs(AffineExpr e) {
return mlir::cast<AffineBinaryOpExpr>(e).getLHS();
}
AffineExpr GetRhs(AffineExpr e) {
return mlir::cast<AffineBinaryOpExpr>(e).getRHS();
}
template <typename Fn>
AffineExpr MapSummands(AffineExpr expr, const Fn& fn) {
if (expr.getKind() == AffineExprKind::Add) {
auto add = mlir::cast<AffineBinaryOpExpr>(expr);
auto lhs = MapSummands(add.getLHS(), fn);
auto rhs = MapSummands(add.getRHS(), fn);
if (lhs == add.getLHS() && rhs == add.getRHS()) {
return add;
}
return lhs + rhs;
}
return fn(expr);
}
template <typename Fn>
void VisitSummands(mlir::AffineExpr expr, const Fn& visit) {
if (expr.getKind() == AffineExprKind::Add) {
VisitSummands(GetLhs(expr), visit);
VisitSummands(GetRhs(expr), visit);
} else {
visit(expr);
}
}
class AffineExprSimplifier {
public:
explicit AffineExprSimplifier(RangeEvaluator* range_evaluator)
: range_evaluator_(range_evaluator),
zero_(getAffineConstantExpr(0, range_evaluator_->GetMLIRContext())) {}
mlir::AffineMap Simplify(mlir::AffineMap affine_map);
mlir::AffineExpr Simplify(mlir::AffineExpr expr);
bool SimplifyConstraintExprs(IndexingMap& map);
bool SimplifyConstraintRanges(IndexingMap& map);
private:
std::optional<int64_t> GetConstantRhs(mlir::AffineExpr expr,
AffineExprKind kind);
std::pair<mlir::AffineExpr, int64_t> ExtractMultiplier(
mlir::AffineExpr expr) {
if (auto mul = GetConstantRhs(expr, AffineExprKind::Mul)) {
return {GetLhs(expr), *mul};
}
return {expr, 1};
}
mlir::AffineExpr RewriteMod(mlir::AffineBinaryOpExpr mod);
mlir::AffineExpr RewriteFloorDiv(mlir::AffineBinaryOpExpr div);
AffineExpr SimplifyModDiv(AffineExpr dividend, int64_t divisor);
AffineExpr SimplifyDivDiv(AffineExpr dividend, int64_t divisor);
AffineExpr SimplifySumDiv(AffineExpr dividend, int64_t divisor);
mlir::AffineExpr RewriteMul(mlir::AffineBinaryOpExpr mul);
mlir::AffineExpr RewriteSum(mlir::AffineBinaryOpExpr sum);
mlir::AffineExpr SimplifyOnce(mlir::AffineExpr expr);
mlir::AffineExpr SimplifyWithMlir(mlir::AffineExpr expr, int num_dims,
int num_symbols);
bool SimplifyConstraintRangeOnce(AffineExpr* expr, Interval* range);
bool SimplifyConstraintRange(AffineExpr* expr, Interval* range);
bool SimplifyAddConstraint(AffineExpr* add, Interval* range);
std::tuple<AffineExpr , int64_t , AffineExpr > SplitSumByGcd(
AffineExpr sum);
RangeEvaluator* range_evaluator_;
AffineExpr zero_;
};
AffineExpr AffineExprSimplifier::RewriteMod(AffineBinaryOpExpr mod) {
auto rhs = range_evaluator_->ComputeExpressionRange(mod.getRHS());
if (!rhs.IsPoint()) {
return mod;
}
int64_t m = rhs.lower;
if (m == 0) {
return zero_;
}
auto lhs_simplified = SimplifyOnce(mod.getLHS());
auto lhs = range_evaluator_->ComputeExpressionRange(lhs_simplified);
int64_t offset = llvm::divideFloorSigned(lhs.lower, m) * -m;
if (lhs.upper + offset < m) {
return lhs_simplified + offset;
}
if (auto mul = GetConstantRhs(lhs_simplified, AffineExprKind::Mul);
mul && *mul > 0 && (m % *mul == 0)) {
return (GetLhs(lhs_simplified) % (m / *mul)) * *mul;
}
int64_t extracted_constant = 0;
auto new_lhs = MapSummands(lhs_simplified, [&](AffineExpr expr) {
if (auto cst = mlir::dyn_cast<AffineConstantExpr>(expr)) {
extracted_constant += cst.getValue();
return zero_;
}
if (auto multiplier = GetConstantRhs(expr, AffineExprKind::Mul);
multiplier && (*multiplier % m == 0)) {
return zero_;
}
return expr;
});
if (extracted_constant % m != 0) {
new_lhs = new_lhs + (extracted_constant % m);
}
auto [multiplied, multiplier_gcd, not_multiplied] = SplitSumByGcd(new_lhs);
if (multiplier_gcd != 1 && m % multiplier_gcd == 0) {
auto not_multiplied_range =
range_evaluator_->ComputeExpressionRange(not_multiplied);
if (not_multiplied_range == Interval{0, 0}) {
int64_t multiplier_mod_gcd = std::gcd(multiplier_gcd, m);
if (multiplier_mod_gcd == multiplier_gcd) {
new_lhs = multiplied;
} else if (multiplier_mod_gcd > 1) {
new_lhs = MapSummands(
multiplied, [&, multiplier_gcd = multiplier_gcd](AffineExpr expr) {
return expr * (multiplier_gcd / multiplier_mod_gcd);
});
}
return (new_lhs % (m / multiplier_mod_gcd)) * multiplier_mod_gcd;
} else if (Interval{0, multiplier_gcd - 1}.Contains(not_multiplied_range)) {
new_lhs = multiplied * multiplier_gcd;
return new_lhs % mod.getRHS() + not_multiplied;
}
}
return new_lhs == mod.getLHS() ? mod : (new_lhs % m);
}
AffineExpr AffineExprSimplifier::SimplifyModDiv(AffineExpr dividend,
int64_t divisor) {
if (auto mod = GetConstantRhs(dividend, AffineExprKind::Mod);
mod && (*mod % divisor == 0)) {
return GetLhs(dividend).floorDiv(divisor) % (*mod / divisor);
}
return nullptr;
}
AffineExpr AffineExprSimplifier::SimplifyDivDiv(AffineExpr dividend,
int64_t divisor) {
if (auto inner_divisor = GetConstantRhs(dividend, AffineExprKind::FloorDiv)) {
return GetLhs(dividend).floorDiv(divisor * *inner_divisor);
}
return nullptr;
}
AffineExpr AffineExprSimplifier::SimplifySumDiv(AffineExpr dividend,
int64_t divisor) {
AffineExpr extracted = zero_;
auto new_dividend = MapSummands(dividend, [&](AffineExpr expr) {
if (auto multiplier = GetConstantRhs(expr, AffineExprKind::Mul)) {
if (*multiplier % divisor == 0) {
int64_t factor = *multiplier / divisor;
extracted = extracted + GetLhs(expr) * factor;
return zero_;
}
}
return expr;
});
auto [multiplied, multiplier_gcd, not_multiplied] =
SplitSumByGcd(new_dividend);
int64_t multiplier_divisor_gcd = std::gcd(divisor, multiplier_gcd);
auto no_multiplier_range =
range_evaluator_->ComputeExpressionRange(not_multiplied);
if (multiplier_divisor_gcd != 1 &&
Interval{0, multiplier_divisor_gcd - 1}.Contains(no_multiplier_range)) {
new_dividend = multiplied * (multiplier_gcd / multiplier_divisor_gcd);
divisor /= multiplier_divisor_gcd;
} else if (no_multiplier_range.IsPoint() && no_multiplier_range.lower != 0) {
multiplier_divisor_gcd =
std::gcd(no_multiplier_range.lower, multiplier_divisor_gcd);
if (multiplier_divisor_gcd != 1) {
new_dividend = multiplied * (multiplier_gcd / multiplier_divisor_gcd) +
(no_multiplier_range.lower / multiplier_divisor_gcd);
divisor /= multiplier_divisor_gcd;
}
}
std::optional<int64_t> inner_divisor = std::nullopt;
int num_inner_divisors = 0;
VisitSummands(new_dividend, [&](AffineExpr summand) {
if (auto divisor = GetConstantRhs(summand, AffineExprKind::FloorDiv)) {
inner_divisor = divisor;
++num_inner_divisors;
}
});
if (num_inner_divisors == 1) {
new_dividend = MapSummands(new_dividend, [&](AffineExpr summand) {
if (auto inner_divisor =
GetConstantRhs(summand, AffineExprKind::FloorDiv)) {
return GetLhs(summand);
}
return summand * *inner_divisor;
});
divisor *= *inner_divisor;
}
if (new_dividend != dividend) {
return new_dividend.floorDiv(divisor) + extracted;
}
return nullptr;
}
AffineExpr AffineExprSimplifier::RewriteFloorDiv(AffineBinaryOpExpr div) {
auto rhs_range = range_evaluator_->ComputeExpressionRange(div.getRHS());
auto lhs_simplified = SimplifyOnce(div.getLHS());
if (!rhs_range.IsPoint()) {
return lhs_simplified.floorDiv(SimplifyOnce(div.getRHS()));
}
int64_t d = rhs_range.lower;
if (d > 1) {
if (auto result = SimplifyModDiv(lhs_simplified, d)) {
return result;
}
if (auto result = SimplifyDivDiv(lhs_simplified, d)) {
return result;
}
if (auto result = SimplifySumDiv(lhs_simplified, d)) {
return result;
}
}
return lhs_simplified != div.getLHS() ? lhs_simplified.floorDiv(d) : div;
}
mlir::AffineExpr AffineExprSimplifier::RewriteMul(
mlir::AffineBinaryOpExpr mul) {
auto rhs_range = range_evaluator_->ComputeExpressionRange(mul.getRHS());
if (!rhs_range.IsPoint()) {
return mul;
}
int64_t multiplier = rhs_range.lower;
auto lhs = SimplifyOnce(mul.getLHS());
if (lhs.getKind() == AffineExprKind::Add) {
return MapSummands(
lhs, [&](AffineExpr summand) { return summand * rhs_range.lower; });
}
if (multiplier == 1) {
return lhs;
}
if (lhs == mul.getLHS()) {
return mul;
}
return lhs * multiplier;
}
std::optional<int64_t> AffineExprSimplifier::GetConstantRhs(
AffineExpr expr, AffineExprKind kind) {
if (expr.getKind() != kind) {
return std::nullopt;
}
auto bound = range_evaluator_->ComputeExpressionRange(
mlir::cast<AffineBinaryOpExpr>(expr).getRHS());
if (!bound.IsPoint()) {
return std::nullopt;
}
return bound.lower;
}
int CompareExprs(AffineExpr a, AffineExpr b) {
if ((b.getKind() == AffineExprKind::Constant) !=
(a.getKind() == AffineExprKind::Constant)) {
return a.getKind() == AffineExprKind::Constant ? 1 : -1;
}
if (a.getKind() < b.getKind()) {
return -1;
}
if (a.getKind() > b.getKind()) {
return 1;
}
assert(a.getKind() == b.getKind());
int64_t a_value = 0;
int64_t b_value = 0;
switch (a.getKind()) {
case AffineExprKind::Add:
case AffineExprKind::FloorDiv:
case AffineExprKind::CeilDiv:
case AffineExprKind::Mul:
case AffineExprKind::Mod: {
auto lhs = CompareExprs(GetLhs(a), GetLhs(b));
if (lhs != 0) {
return lhs;
}
return CompareExprs(GetRhs(a), GetRhs(b));
}
case AffineExprKind::Constant: {
a_value = mlir::cast<AffineConstantExpr>(a).getValue();
b_value = mlir::cast<AffineConstantExpr>(b).getValue();
break;
}
case AffineExprKind::SymbolId: {
a_value = mlir::cast<AffineSymbolExpr>(a).getPosition();
b_value = mlir::cast<AffineSymbolExpr>(b).getPosition();
break;
}
case AffineExprKind::DimId: {
a_value = mlir::cast<AffineDimExpr>(a).getPosition();
b_value = mlir::cast<AffineDimExpr>(b).getPosition();
break;
}
}
return a_value < b_value ? -1 : (a_value > b_value ? 1 : 0);
}
mlir::AffineExpr AffineExprSimplifier::RewriteSum(
mlir::AffineBinaryOpExpr sum) {
SmallVector<std::pair<AffineExpr, int64_t >> mods;
SmallVector<std::pair<AffineExpr, int64_t >> divs;
llvm::SmallDenseMap<AffineExpr, int64_t > summands;
VisitSummands(sum, [&](AffineExpr expr) {
AffineExpr simplified = SimplifyOnce(expr);
auto [lhs, multiplier] = ExtractMultiplier(simplified);
if (lhs.getKind() == AffineExprKind::Mod) {
mods.push_back({lhs, multiplier});
} else if (lhs.getKind() == AffineExprKind::FloorDiv) {
divs.push_back({lhs, multiplier});
} else {
summands[lhs] += multiplier;
}
});
if (mods.size() * divs.size() >= 100) {
std::string s;
llvm::raw_string_ostream ss(s);
ss << sum;
LOG(WARNING) << "Unexpectedly large number of mods and divs in " << s
<< ". Please open an issue on GitHub at "
<< "https:
}
if (!divs.empty()) {
for (int mod_i = 0; mod_i < mods.size(); ++mod_i) {
auto [mod, mod_mul] = mods[mod_i];
auto mod_c = GetConstantRhs(mod, AffineExprKind::Mod);
if (!mod_c) continue;
AffineExpr simplified_mod = Simplify(GetLhs(mod).floorDiv(*mod_c));
for (int div_i = 0; div_i < divs.size(); ++div_i) {
auto [div, div_mul] = divs[div_i];
if (simplified_mod != div) continue;
if ((div_mul % mod_mul) || (div_mul / mod_mul) != mod_c) continue;
summands[GetLhs(mod)] += mod_mul;
divs[div_i].first = nullptr;
mods[mod_i].first = nullptr;
break;
}
}
for (int div_i = 0; div_i < divs.size(); ++div_i) {
auto [div, div_mul] = divs[div_i];
if (!div || div_mul > 0) continue;
auto div_c = GetConstantRhs(div, AffineExprKind::FloorDiv);
if (!div_c || *div_c < 0 || (div_mul % *div_c)) continue;
int64_t b = div_mul / *div_c;
auto x = GetLhs(div);
VisitSummands(x, [&](AffineExpr summand) { summands[summand] += b; });
mods.push_back({x % *div_c, -b});
divs[div_i].first = nullptr;
}
}
for (auto [expr, mul] : mods) {
if (expr) {
summands[expr] += mul;
}
}
for (auto [expr, mul] : divs) {
if (expr) {
summands[expr] += mul;
}
}
SmallVector<AffineExpr, 4> expanded_summands;
for (auto [expr, mul] : summands) {
expanded_summands.push_back(expr * mul);
}
llvm::sort(expanded_summands,
[](AffineExpr a, AffineExpr b) { return CompareExprs(a, b) < 0; });
AffineExpr result = zero_;
for (auto expr : expanded_summands) {
result = result + expr;
}
return result;
}
AffineExpr AffineExprSimplifier::SimplifyOnce(AffineExpr expr) {
if (expr.getKind() == AffineExprKind::Constant) {
return expr;
}
auto bounds = range_evaluator_->ComputeExpressionRange(expr);
if (bounds.IsPoint()) {
return getAffineConstantExpr(bounds.lower,
range_evaluator_->GetMLIRContext());
}
switch (expr.getKind()) {
case AffineExprKind::Mul:
return RewriteMul(mlir::cast<AffineBinaryOpExpr>(expr));
case AffineExprKind::Add:
return RewriteSum(mlir::cast<AffineBinaryOpExpr>(expr));
case AffineExprKind::Mod:
return RewriteMod(mlir::cast<AffineBinaryOpExpr>(expr));
case AffineExprKind::FloorDiv:
return RewriteFloorDiv(mlir::cast<AffineBinaryOpExpr>(expr));
default:
return expr;
}
}
AffineExpr AffineExprSimplifier::Simplify(AffineExpr expr) {
while (true) {
auto simplified = SimplifyOnce(expr);
if (simplified == expr) {
return expr;
}
expr = simplified;
}
}
AffineMap AffineExprSimplifier::Simplify(AffineMap affine_map) {
SmallVector<AffineExpr, 4> results;
results.reserve(affine_map.getNumResults());
for (AffineExpr expr : affine_map.getResults()) {
results.push_back(Simplify(expr));
}
return AffineMap::get(affine_map.getNumDims(), affine_map.getNumSymbols(),
results, affine_map.getContext());
}
bool AffineExprSimplifier::SimplifyAddConstraint(AffineExpr* add,
Interval* range) {
if (add->getKind() != AffineExprKind::Add) {
return false;
}
auto rhs_range = range_evaluator_->ComputeExpressionRange(GetRhs(*add));
if (rhs_range.IsPoint()) {
*add = GetLhs(*add);
range->lower -= rhs_range.lower;
range->upper -= rhs_range.lower;
return true;
}
if (range->lower != 0) {
return false;
}
auto [multiplied, multiplier_gcd, not_multiplied] = SplitSumByGcd(*add);
if (multiplier_gcd == 1) {
return false;
}
Interval difference_range =
Interval{range->upper, range->upper} -
range_evaluator_->ComputeExpressionRange(not_multiplied);
if (!difference_range.FloorDiv(multiplier_gcd).IsPoint()) {
return false;
}
*add = multiplied * multiplier_gcd;
return true;
}
bool AffineExprSimplifier::SimplifyConstraintRangeOnce(AffineExpr* expr,
Interval* range) {
switch (expr->getKind()) {
case AffineExprKind::DimId:
case AffineExprKind::SymbolId:
case AffineExprKind::Constant: {
return false;
}
case AffineExprKind::Add:
return SimplifyAddConstraint(expr, range);
default: {
auto binary_op = mlir::cast<AffineBinaryOpExpr>(*expr);
CHECK(binary_op);
auto lhs = binary_op.getLHS();
auto rhs_range = range_evaluator_->ComputeExpressionRange(GetRhs(*expr));
if (!rhs_range.IsPoint()) {
return false;
}
int64_t rhs_cst = rhs_range.lower;
switch (expr->getKind()) {
case AffineExprKind::Mul: {
int64_t factor = rhs_cst;
if (factor < 0) {
factor *= -1;
range->lower *= -1;
range->upper *= -1;
std::swap(range->lower, range->upper);
}
range->lower = llvm::divideCeilSigned(range->lower, factor);
range->upper = llvm::divideFloorSigned(range->upper, factor);
*expr = lhs;
return true;
}
case AffineExprKind::FloorDiv: {
int64_t divisor = rhs_cst;
if (divisor < 0) {
divisor *= -1;
range->lower *= -1;
range->upper *= -1;
std::swap(range->lower, range->upper);
}
range->lower *= divisor;
range->upper = (range->upper + 1) * divisor - 1;
*expr = lhs;
return true;
}
default: {
return false;
}
}
}
}
}
bool AffineExprSimplifier::SimplifyConstraintRange(AffineExpr* expr,
Interval* range) {
bool is_simplified = false;
while (SimplifyConstraintRangeOnce(expr, range)) {
is_simplified = true;
}
return is_simplified;
}
SmallVector<AffineExpr, 4> GetComposedSymbolsPermutationToCorrectOrder(
const IndexingMap& first, const IndexingMap& second) {
if (second.GetRTVarsCount() == 0) {
return {};
}
SmallVector<AffineExpr, 4> symbol_replacements;
MLIRContext* mlir_context = first.GetMLIRContext();
for (int id = 0; id < second.GetRangeVarsCount(); ++id) {
symbol_replacements.push_back(getAffineSymbolExpr(id, mlir_context));
}
int64_t first_range_vars_count = first.GetRangeVarsCount();
int64_t second_range_vars_count = second.GetRangeVarsCount();
int64_t first_rt_vars_count = first.GetRTVarsCount();
int64_t second_rt_vars_count = second.GetRTVarsCount();
int64_t rt_vars_second_start =
first_range_vars_count + second_range_vars_count;
for (int64_t id = 0; id < second_rt_vars_count; ++id) {
symbol_replacements.push_back(
getAffineSymbolExpr(rt_vars_second_start++, mlir_context));
}
int64_t range_vars_first_start = second_range_vars_count;
for (int64_t id = 0; id < first_range_vars_count; ++id) {
symbol_replacements.push_back(
getAffineSymbolExpr(range_vars_first_start++, mlir_context));
}
int64_t rt_vars_first_start =
first_range_vars_count + second_range_vars_count + second_rt_vars_count;
for (int64_t id = 0; id < first_rt_vars_count; ++id) {
symbol_replacements.push_back(
getAffineSymbolExpr(rt_vars_first_start++, mlir_context));
}
return symbol_replacements;
}
SmallVector<AffineExpr, 4> MapSymbolsToComposedSymbolsList(
const IndexingMap& map, const IndexingMap& composed) {
SmallVector<AffineExpr, 4> symbol_replacements;
MLIRContext* mlir_context = map.GetMLIRContext();
int64_t range_vars_start =
composed.GetRangeVarsCount() - map.GetRangeVarsCount();
for (int64_t id = 0; id < map.GetRangeVarsCount(); ++id) {
symbol_replacements.push_back(
getAffineSymbolExpr(range_vars_start++, mlir_context));
}
int64_t rt_vars_start = composed.GetSymbolCount() - map.GetRTVarsCount();
for (int64_t id = 0; id < map.GetRTVarsCount(); ++id) {
symbol_replacements.push_back(
getAffineSymbolExpr(rt_vars_start++, mlir_context));
}
return symbol_replacements;
}
}
static constexpr std::string_view kVarKindDefault = "default";
static constexpr std::string_view kVarKindThreadX = "th_x";
static constexpr std::string_view kVarKindThreadY = "th_y";
static constexpr std::string_view kVarKindThreadZ = "th_z";
static constexpr std::string_view kVarKindBlockX = "bl_x";
static constexpr std::string_view kVarKindBlockY = "bl_y";
static constexpr std::string_view kVarKindBlockZ = "bl_z";
static constexpr std::string_view kVarKindWarp = "warp";
static constexpr std::string_view kVarKindWarpThread = "th_w";
std::string_view ToVariableName(VariableKind var_kind) {
switch (var_kind) {
case VariableKind::kDefault:
return kVarKindDefault;
case VariableKind::kThreadX:
return kVarKindThreadX;
case VariableKind::kThreadY:
return kVarKindThreadY;
case VariableKind::kThreadZ:
return kVarKindThreadZ;
case VariableKind::kBlockX:
return kVarKindBlockX;
case VariableKind::kBlockY:
return kVarKindBlockY;
case VariableKind::kBlockZ:
return kVarKindBlockZ;
case VariableKind::kWarp:
return kVarKindWarp;
case VariableKind::kWarpThread:
return kVarKindWarpThread;
}
llvm_unreachable("Unknown VariableType");
}
VariableKind ToVariableType(std::string_view var_name) {
if (var_name == kVarKindThreadX) return VariableKind::kThreadX;
if (var_name == kVarKindThreadY) return VariableKind::kThreadY;
if (var_name == kVarKindThreadZ) return VariableKind::kThreadZ;
if (var_name == kVarKindBlockX) return VariableKind::kBlockX;
if (var_name == kVarKindBlockY) return VariableKind::kBlockY;
if (var_name == kVarKindBlockZ) return VariableKind::kBlockZ;
if (var_name == kVarKindWarp) return VariableKind::kWarp;
if (var_name == kVarKindWarpThread) return VariableKind::kWarpThread;
return VariableKind::kDefault;
}
std::ostream& operator<<(std::ostream& out, VariableKind var_type) {
out << ToVariableName(var_type);
return out;
}
std::ostream& operator<<(std::ostream& out, const Interval& interval) {
out << absl::StrFormat("[%d, %d]", interval.lower, interval.upper);
return out;
}
std::string Interval::ToString() const {
std::stringstream ss;
ss << *this;
return ss.str();
}
inline llvm::raw_ostream& operator<<(llvm::raw_ostream& os,
const Interval& interval) {
os << absl::StrFormat("[%d, %d]", interval.lower, interval.upper);
return os;
}
int64_t Interval::GetLoopTripCount() const {
if (!IsFeasible()) {
return 0;
}
DCHECK((static_cast<absl::int128>(upper) - lower + 1) <=
std::numeric_limits<int64_t>::max());
return upper - lower + 1;
}
Interval::ComparisonResult Interval::Gt(const Interval& b) const {
if (!IsFeasible() || !b.IsFeasible()) {
return {std::nullopt};
}
if (lower > b.upper) {
return {true};
}
if (upper <= b.lower) {
return {false};
}
return {std::nullopt};
}
Interval::ComparisonResult Interval::Eq(const Interval& b) const {
Interval intersection = Intersect(b);
if (!intersection.IsFeasible()) return {false};
if (intersection.IsPoint() && IsPoint() && b.IsPoint()) {
return {true};
}
return {std::nullopt};
}
Interval Interval::operator+(const Interval& rhs) const {
int64_t out_lower;
int64_t out_upper;
constexpr int64_t kMin = std::numeric_limits<int64_t>::min();
constexpr int64_t kMax = std::numeric_limits<int64_t>::max();
bool lower_overflow = llvm::AddOverflow(lower, rhs.lower, out_lower);
bool upper_overflow = llvm::AddOverflow(upper, rhs.upper, out_upper);
if (lower_overflow || lower == kMin || rhs.lower == kMin) {
if (lower < 0 || rhs.lower < 0) {
out_lower = kMin;
} else {
out_lower = kMax;
out_upper = kMax;
}
}
if (upper_overflow || upper == kMax || rhs.upper == kMax) {
if (upper > 0 || rhs.upper > 0) {
out_upper = kMax;
} else {
out_upper = kMin;
out_lower = kMin;
}
}
return {out_lower, out_upper};
}
Interval Interval::operator*(const Interval& rhs) const {
constexpr int64_t kMin = std::numeric_limits<int64_t>::min();
constexpr int64_t kMax = std::numeric_limits<int64_t>::max();
auto mul = [&](int64_t p) {
int64_t l = lower;
int64_t u = upper;
if (p < 0) {
std::swap(l, u);
}
int64_t out_lower;
int64_t out_upper;
if (llvm::MulOverflow(l, p, out_lower) ||
(p == -1 && l == kMax)) {
out_lower = kMin;
}
if (llvm::MulOverflow(u, p, out_upper)) {
out_upper = kMax;
}
return Interval{out_lower, out_upper};
};
return mul(rhs.lower).Union(mul(rhs.upper));
}
Interval Interval::operator-() const {
int64_t ub = lower == std::numeric_limits<int64_t>::min()
? std::numeric_limits<int64_t>::max()
: -lower;
int64_t lb = upper == std::numeric_limits<int64_t>::max()
? std::numeric_limits<int64_t>::min()
: -upper;
return Interval{lb, ub};
}
Interval Interval::FloorDiv(int64_t rhs) const {
auto saturate_div = [](int64_t lhs, int64_t rhs) {
constexpr int64_t kMin = std::numeric_limits<int64_t>::min();
constexpr int64_t kMax = std::numeric_limits<int64_t>::max();
if (lhs == kMin) {
return rhs > 0 ? kMin : kMax;
}
if (lhs == kMax) {
return rhs > 0 ? kMax : kMin;
}
return llvm::divideFloorSigned(lhs, rhs);
};
int64_t a = saturate_div(lower, rhs);
int64_t b = saturate_div(upper, rhs);
return {std::min(a, b), std::max(a, b)};
}
bool operator==(const IndexingMap::Variable& lhs,
const IndexingMap::Variable& rhs) {
return lhs.bounds == rhs.bounds;
}
std::vector<IndexingMap::Variable> DimVarsFromTensorSizes(
absl::Span<const int64_t> tensor_sizes) {
std::vector<IndexingMap::Variable> ranges;
ranges.reserve(tensor_sizes.size());
for (int64_t size : tensor_sizes) {
ranges.push_back(IndexingMap::Variable{0, size - 1});
}
return ranges;
}
std::vector<IndexingMap::Variable> DimVarsFromGPUGrid(
absl::Span<const int64_t> grid_sizes) {
CHECK_EQ(grid_sizes.size(), 6)
<< "Grid must be 6-dimensional (th_x, th_y, th_z, bl_x, bl_y, bl_z)";
return {
IndexingMap::Variable{0, grid_sizes[0] - 1, kVarKindThreadX},
IndexingMap::Variable{0, grid_sizes[1] - 1, kVarKindThreadY},
IndexingMap::Variable{0, grid_sizes[2] - 1, kVarKindThreadZ},
IndexingMap::Variable{0, grid_sizes[3] - 1, kVarKindBlockX},
IndexingMap::Variable{0, grid_sizes[4] - 1, kVarKindBlockY},
IndexingMap::Variable{0, grid_sizes[5] - 1, kVarKindBlockZ},
};
}
std::vector<IndexingMap::Variable> RangeVarsFromTensorSizes(
absl::Span<const int64_t> tensor_sizes) {
std::vector<IndexingMap::Variable> ranges;
ranges.reserve(tensor_sizes.size());
for (int64_t size : tensor_sizes) {
ranges.push_back({IndexingMap::Variable{0, size - 1}});
}
return ranges;
}
IndexingMap::IndexingMap(
AffineMap affine_map, std::vector<IndexingMap::Variable> dimensions,
std::vector<IndexingMap::Variable> range_vars,
std::vector<IndexingMap::Variable> rt_vars,
absl::Span<std::pair<AffineExpr, Interval> const> constraints)
: affine_map_(affine_map),
dim_vars_(std::move(dimensions)),
range_vars_(std::move(range_vars)),
rt_vars_(std::move(rt_vars)) {
if (!VerifyVariableIntervals()) {
ResetToKnownEmpty();
return;
}
for (const auto& [expr, range] : constraints) {
AddConstraint(expr, range);
}
}
IndexingMap::IndexingMap(
AffineMap affine_map, std::vector<IndexingMap::Variable> dimensions,
std::vector<IndexingMap::Variable> range_vars,
std::vector<IndexingMap::Variable> rt_vars,
const llvm::DenseMap<AffineExpr, Interval>& constraints)
: affine_map_(affine_map),
dim_vars_(std::move(dimensions)),
range_vars_(std::move(range_vars)),
rt_vars_(std::move(rt_vars)),
constraints_(constraints) {
if (!VerifyVariableIntervals() || !VerifyConstraintIntervals()) {
ResetToKnownEmpty();
return;
}
}
IndexingMap IndexingMap::FromTensorSizes(
AffineMap affine_map, absl::Span<const int64_t> dim_upper_bounds,
absl::Span<const int64_t> symbol_upper_bounds) {
return IndexingMap{affine_map, DimVarsFromTensorSizes(dim_upper_bounds),
RangeVarsFromTensorSizes(symbol_upper_bounds),
{}};
}
RangeEvaluator IndexingMap::GetRangeEvaluator() const {
return RangeEvaluator(*this, GetMLIRContext());
}
const Interval& IndexingMap::GetDimensionBound(int64_t dim_id) const {
return dim_vars_[dim_id].bounds;
}
Interval& IndexingMap::GetMutableDimensionBound(int64_t dim_id) {
return dim_vars_[dim_id].bounds;
}
std::vector<Interval> IndexingMap::GetDimensionBounds() const {
std::vector<Interval> bounds;
bounds.reserve(affine_map_.getNumDims());
for (const auto& dim : dim_vars_) {
bounds.push_back(dim.bounds);
}
return bounds;
}
const Interval& IndexingMap::GetSymbolBound(int64_t symbol_id) const {
int64_t range_var_count = GetRangeVarsCount();
return symbol_id < range_var_count
? range_vars_[symbol_id].bounds
: rt_vars_[symbol_id - range_var_count].bounds;
}
Interval& IndexingMap::GetMutableSymbolBound(int64_t symbol_id) {
int64_t range_var_count = GetRangeVarsCount();
return symbol_id < range_var_count
? range_vars_[symbol_id].bounds
: rt_vars_[symbol_id - range_var_count].bounds;
}
std::vector<Interval> IndexingMap::GetSymbolBounds() const {
std::vector<Interval> bounds;
bounds.reserve(affine_map_.getNumSymbols());
for (const auto& range_var : range_vars_) {
bounds.push_back(range_var.bounds);
}
for (const auto& rt_var : rt_vars_) {
bounds.push_back(rt_var.bounds);
}
return bounds;
}
void IndexingMap::AddConstraint(mlir::AffineExpr expr, Interval range) {
if (IsKnownEmpty()) {
return;
}
if (!range.IsFeasible()) {
ResetToKnownEmpty();
return;
}
if (auto dim_expr = mlir::dyn_cast<AffineDimExpr>(expr)) {
Interval& current_range = GetMutableDimensionBound(dim_expr.getPosition());
current_range = current_range.Intersect(range);
if (!current_range.IsFeasible()) ResetToKnownEmpty();
return;
}
if (auto symbol_expr = mlir::dyn_cast<AffineSymbolExpr>(expr)) {
Interval& current_range = GetMutableSymbolBound(symbol_expr.getPosition());
current_range = current_range.Intersect(range);
if (!current_range.IsFeasible()) ResetToKnownEmpty();
return;
}
if (auto constant_expr = mlir::dyn_cast<AffineConstantExpr>(expr)) {
if (!range.Contains(constant_expr.getValue())) {
ResetToKnownEmpty();
}
return;
}
auto [it, inserted] = constraints_.insert({expr, range});
if (!inserted) {
it->second = it->second.Intersect(range);
if (!it->second.IsFeasible()) {
ResetToKnownEmpty();
}
}
}
void IndexingMap::EraseConstraint(mlir::AffineExpr expr) {
constraints_.erase(expr);
}
bool IndexingMap::ConstraintsSatisfied(
ArrayRef<AffineExpr> dim_const_exprs,
ArrayRef<AffineExpr> symbol_const_exprs) const {
CHECK(dim_const_exprs.size() == affine_map_.getNumDims());
CHECK(symbol_const_exprs.size() == affine_map_.getNumSymbols());
if (IsKnownEmpty()) {
return false;
}
for (auto& [expr, range] : constraints_) {
int64_t expr_value =
mlir::cast<AffineConstantExpr>(
expr.replaceDimsAndSymbols(dim_const_exprs, symbol_const_exprs))
.getValue();
if (expr_value < range.lower || expr_value > range.upper) {
return false;
}
}
return true;
}
SmallVector<int64_t, 4> IndexingMap::Evaluate(
ArrayRef<AffineExpr> dim_const_exprs,
ArrayRef<AffineExpr> symbol_const_exprs) const {
CHECK(dim_const_exprs.size() == GetDimensionCount());
CHECK(symbol_const_exprs.size() == GetSymbolCount());
AffineMap eval = affine_map_.replaceDimsAndSymbols(
dim_const_exprs, symbol_const_exprs, dim_const_exprs.size(),
symbol_const_exprs.size());
return eval.getConstantResults();
}
bool IndexingMap::IsSymbolConstrained(int64_t symbol_id) const {
for (const auto& [expr, _] : constraints_) {
bool result = false;
expr.walk([&](mlir::AffineExpr leaf) {
auto sym = mlir::dyn_cast<mlir::AffineSymbolExpr>(leaf);
if (sym && sym.getPosition() == symbol_id) {
result = true;
}
});
if (result) return true;
}
return false;
}
RangeEvaluator::RangeEvaluator(const IndexingMap& indexing_map,
MLIRContext* mlir_context, bool use_constraints)
: mlir_context_(mlir_context),
indexing_map_(indexing_map),
use_constraints_(use_constraints) {}
bool RangeEvaluator::IsAlwaysPositiveOrZero(mlir::AffineExpr expr) {
return ComputeExpressionRange(expr).lower >= 0;
}
bool RangeEvaluator::IsAlwaysNegativeOrZero(mlir::AffineExpr expr) {
return ComputeExpressionRange(expr).upper <= 0;
}
Interval RangeEvaluator::ComputeExpressionRange(AffineExpr expr) {
switch (expr.getKind()) {
case AffineExprKind::Constant: {
int64_t value = mlir::cast<AffineConstantExpr>(expr).getValue();
return Interval{value, value};
}
case AffineExprKind::DimId:
return indexing_map_.GetDimensionBound(
mlir::cast<AffineDimExpr>(expr).getPosition());
case AffineExprKind::SymbolId:
return indexing_map_.GetSymbolBound(
mlir::cast<AffineSymbolExpr>(expr).getPosition());
default:
break;
}
auto binary_op = mlir::dyn_cast<AffineBinaryOpExpr>(expr);
CHECK(binary_op);
auto lhs = ComputeExpressionRange(binary_op.getLHS());
auto rhs = ComputeExpressionRange(binary_op.getRHS());
Interval result;
switch (expr.getKind()) {
case AffineExprKind::Add:
result = lhs + rhs;
break;
case AffineExprKind::Mul:
result = lhs * rhs;
break;
case AffineExprKind::Mod: {
CHECK(rhs.IsPoint()) << "RHS of mod must be a constant";
int64_t m = rhs.lower;
if (0 <= lhs.lower && lhs.upper < m) {
result = lhs;
} else {
result = {0, m - 1};
}
break;
}
case AffineExprKind::FloorDiv: {
CHECK(rhs.IsPoint()) << "RHS of floor_div must be a constant";
int64_t d = rhs.lower;
int64_t a = llvm::divideFloorSigned(lhs.lower, d);
int64_t b = llvm::divideFloorSigned(lhs.upper, d);
result = {std::min(a, b), std::max(a, b)};
break;
}
default:
LOG(FATAL) << "Unsupported expression";
}
if (use_constraints_) {
auto constraint = indexing_map_.GetConstraints().find(expr);
if (constraint != indexing_map_.GetConstraints().end()) {
return result.Intersect(constraint->second);
}
}
return result;
}
MLIRContext* IndexingMap::GetMLIRContext() const {
return IsUndefined() ? nullptr : affine_map_.getContext();
}
bool operator==(const IndexingMap& lhs, const IndexingMap& rhs) {
return lhs.GetAffineMap() == rhs.GetAffineMap() &&
lhs.GetDimVars() == rhs.GetDimVars() &&
lhs.GetRangeVars() == rhs.GetRangeVars() &&
lhs.GetRTVars() == rhs.GetRTVars() &&
lhs.GetConstraints() == rhs.GetConstraints();
}
IndexingMap operator*(const IndexingMap& lhs, const IndexingMap& rhs) {
return ComposeIndexingMaps(lhs, rhs);
}
bool IndexingMap::Verify(std::ostream& out) const {
if (IsUndefined()) {
return true;
}
if (affine_map_.getNumDims() != dim_vars_.size()) {
out << "dim size must match the number of dimensions in "
"the affine map";
return false;
}
if (affine_map_.getNumSymbols() != range_vars_.size() + rt_vars_.size()) {
out << "range vars size + rt var size must match the number of "
"symbols in the affine map";
return false;
}
return true;
}
bool IndexingMap::Simplify() {
if (IsUndefined() || IsKnownEmpty()) return false;
bool constraints_were_simplified = false;
RangeEvaluator constraint_range_evaluator(*this, GetMLIRContext(),
false);
AffineExprSimplifier constraint_simplifier(&constraint_range_evaluator);
while (true) {
bool did_simplify = false;
did_simplify |= constraint_simplifier.SimplifyConstraintExprs(*this);
did_simplify |= constraint_simplifier.SimplifyConstraintRanges(*this);
if (!did_simplify) {
break;
}
constraints_were_simplified = true;
}
constraints_were_simplified |= MergeModConstraints();
RangeEvaluator range_evaluator(*this, GetMLIRContext(),
true);
AffineMap simplified_affine_map =
AffineExprSimplifier(&range_evaluator).Simplify(affine_map_);
bool affine_map_was_simplified = simplified_affine_map != affine_map_;
if (affine_map_was_simplified) {
affine_map_ = simplified_affine_map;
}
return affine_map_was_simplified || constraints_were_simplified;
}
bool AffineExprSimplifier::SimplifyConstraintExprs(IndexingMap& map) {
std::vector<AffineExpr> to_remove;
std::vector<std::pair<AffineExpr, Interval>> to_add;
for (const auto& [expr, range] : map.GetConstraints()) {
AffineExpr simplified = Simplify(expr);
Interval evaluated_range =
range_evaluator_->ComputeExpressionRange(simplified);
if (evaluated_range.upper <= range.upper &&
evaluated_range.lower >= range.lower) {
to_remove.push_back(expr);
continue;
}
if (simplified == expr) continue;
to_add.push_back({simplified, range});
to_remove.push_back(expr);
}
for (const auto& expr : to_remove) {
map.EraseConstraint(expr);
}
for (const auto& [expr, range] : to_add) {
map.AddConstraint(expr, range);
}
return !to_add.empty();
}
bool AffineExprSimplifier::SimplifyConstraintRanges(IndexingMap& map) {
std::vector<AffineExpr> to_remove;
std::vector<std::pair<AffineExpr, Interval>> to_add;
for (const auto& [expr, range] : map.GetConstraints()) {
AffineExpr simplified_expr = expr;
Interval simplified_range = range;
if (SimplifyConstraintRange(&simplified_expr, &simplified_range)) {
to_add.push_back({simplified_expr, simplified_range});
to_remove.push_back(expr);
}
}
for (const auto& expr : to_remove) {
map.EraseConstraint(expr);
}
for (const auto& [expr, range] : to_add) {
map.AddConstraint(expr, range);
}
return !to_add.empty();
}
std::tuple<AffineExpr, int64_t, AffineExpr> AffineExprSimplifier::SplitSumByGcd(
AffineExpr sum) {
std::optional<int64_t> multiplier_gcd = std::nullopt;
AffineExpr no_multiplier = zero_;
VisitSummands(sum, [&](AffineExpr expr) {
if (auto multiplier = GetConstantRhs(expr, AffineExprKind::Mul)) {
if (multiplier_gcd.has_value()) {
multiplier_gcd = std::gcd(*multiplier_gcd, *multiplier);
} else {
multiplier_gcd = *multiplier;
}
}
});
if (multiplier_gcd.value_or(1) == 1) {
return {zero_, 1, sum};
}
auto scaled = MapSummands(sum, [&](AffineExpr expr) {
if (auto multiplier = GetConstantRhs(expr, AffineExprKind::Mul)) {
return GetLhs(expr) * (*multiplier / *multiplier_gcd);
}
no_multiplier = no_multiplier + expr;
return zero_;
});
return {scaled, *multiplier_gcd, no_multiplier};
}
namespace {
struct UsedParameters {
llvm::DenseSet<int64_t> dimension_ids;
llvm::DenseSet<int64_t> symbol_ids;
};
void GetUsedParametersImpl(const AffineExpr& expr,
UsedParameters& used_parameters) {
if (auto dim_expr = mlir::dyn_cast<AffineDimExpr>(expr)) {
used_parameters.dimension_ids.insert(dim_expr.getPosition());
return;
}
if (auto symbol_expr = mlir::dyn_cast<AffineSymbolExpr>(expr)) {
used_parameters.symbol_ids.insert(symbol_expr.getPosition());
return;
}
if (auto binary_expr = mlir::dyn_cast<AffineBinaryOpExpr>(expr)) {
GetUsedParametersImpl(binary_expr.getLHS(), used_parameters);
GetUsedParametersImpl(binary_expr.getRHS(), used_parameters);
}
}
UsedParameters GetUsedParameters(const mlir::AffineExpr& expr) {
UsedParameters used_parameters;
GetUsedParametersImpl(expr, used_parameters);
return used_parameters;
}
bool IsFunctionOfUnusedVarsOnly(const UsedParameters& used_parameters,
const SmallBitVector& unused_dims_bit_vector,
const SmallBitVector& unused_symbols_bit_vector,
bool removing_dims, bool removing_symbols) {
if (!used_parameters.dimension_ids.empty() && !removing_dims) {
return false;
}
if (!used_parameters.symbol_ids.empty() && !removing_symbols) {
return false;
}
for (int64_t dim_id : used_parameters.dimension_ids) {
if (!unused_dims_bit_vector[dim_id]) return false;
}
for (int64_t symbol_id : used_parameters.symbol_ids) {
if (!unused_symbols_bit_vector[symbol_id]) return false;
}
return true;
}
struct UnusedVariables {
SmallBitVector unused_dims;
SmallBitVector unused_symbols;
SmallVector<AffineExpr> constraints_with_unused_vars_only;
};
UnusedVariables DetectUnusedVariables(const IndexingMap& indexing_map,
bool removing_dims,
bool removing_symbols) {
AffineMap affine_map = indexing_map.GetAffineMap();
UnusedVariables unused_vars;
unused_vars.unused_dims = mlir::getUnusedDimsBitVector({affine_map});
unused_vars.unused_symbols = mlir::getUnusedSymbolsBitVector({affine_map});
SmallVector<std::pair<AffineExpr, UsedParameters>, 2>
unused_constraints_candidates;
for (const auto& [expr, range] : indexing_map.GetConstraints()) {
UsedParameters used_parameters = GetUsedParameters(expr);
if (IsFunctionOfUnusedVarsOnly(used_parameters, unused_vars.unused_dims,
unused_vars.unused_symbols, removing_dims,
removing_symbols)) {
unused_constraints_candidates.push_back({expr, used_parameters});
continue;
}
for (int64_t dim_id : used_parameters.dimension_ids) {
unused_vars.unused_dims[dim_id] = false;
}
for (int64_t symbol_id : used_parameters.symbol_ids) {
unused_vars.unused_symbols[symbol_id] = false;
}
}
for (const auto& [expr, used_parameters] : unused_constraints_candidates) {
if (IsFunctionOfUnusedVarsOnly(used_parameters, unused_vars.unused_dims,
unused_vars.unused_symbols, removing_dims,
removing_symbols)) {
unused_vars.constraints_with_unused_vars_only.push_back(expr);
}
}
return unused_vars;
}
SmallBitVector ConcatenateBitVectors(const SmallBitVector& lhs,
const SmallBitVector& rhs) {
SmallBitVector concat(lhs.size() + rhs.size(), false);
int id = 0;
for (int i = 0; i < lhs.size(); ++i, ++id) {
concat[id] = lhs[i];
}
for (int i = 0; i < rhs.size(); ++i, ++id) {
concat[id] = rhs[i];
}
return concat;
}
}
bool IndexingMap::CompressVars(const llvm::SmallBitVector& unused_dims,
const llvm::SmallBitVector& unused_symbols) {
MLIRContext* mlir_context = GetMLIRContext();
bool num_dims_changed = unused_dims.count() > 0;
bool num_symbols_changed = unused_symbols.count() > 0;
if (!num_dims_changed && !num_symbols_changed) return false;
unsigned num_dims_before = GetDimensionCount();
unsigned num_symbols_before = GetSymbolCount();
SmallVector<AffineExpr, 2> dim_replacements;
if (num_dims_changed) {
affine_map_ = mlir::compressDims(affine_map_, unused_dims);
std::vector<IndexingMap::Variable> compressed_dim_vars;
dim_replacements = SmallVector<AffineExpr, 2>(
num_dims_before, getAffineConstantExpr(0, mlir_context));
int64_t used_dims_count = 0;
for (int i = 0; i < unused_dims.size(); ++i) {
if (!unused_dims[i]) {
compressed_dim_vars.push_back(dim_vars_[i]);
dim_replacements[i] = getAffineDimExpr(used_dims_count++, mlir_context);
}
}
dim_vars_ = std::move(compressed_dim_vars);
}
SmallVector<AffineExpr, 2> symbol_replacements;
if (num_symbols_changed) {
affine_map_ = mlir::compressSymbols(affine_map_, unused_symbols);
symbol_replacements = SmallVector<AffineExpr, 2>(
num_symbols_before, getAffineConstantExpr(0, mlir_context));
std::vector<IndexingMap::Variable> compressed_range_vars;
std::vector<IndexingMap::Variable> compressed_rt_vars;
MLIRContext* mlir_context = GetMLIRContext();
int64_t used_symbols_count = 0;
auto range_vars_count = range_vars_.size();
for (int i = 0; i < unused_symbols.size(); ++i) {
if (!unused_symbols[i]) {
if (i < range_vars_count) {
compressed_range_vars.push_back(range_vars_[i]);
} else {
compressed_rt_vars.push_back(rt_vars_[i - range_vars_count]);
}
symbol_replacements[i] =
getAffineSymbolExpr(used_symbols_count++, mlir_context);
}
}
range_vars_ = std::move(compressed_range_vars);
rt_vars_ = std::move(compressed_rt_vars);
}
std::vector<AffineExpr> to_remove;
std::vector<std::pair<AffineExpr, Interval>> to_add;
for (const auto& [expr, range] : constraints_) {
auto updated_expr =
expr.replaceDimsAndSymbols(dim_replacements, symbol_replacements);
if (updated_expr == expr) continue;
to_add.push_back({updated_expr, range});
to_remove.push_back(expr);
}
for (const auto& expr : to_remove) {
constraints_.erase(expr);
}
for (const auto& [expr, range] : to_add) {
AddConstraint(expr, range);
}
return true;
}
SmallBitVector IndexingMap::RemoveUnusedSymbols() {
if (IsUndefined()) return {};
if (GetSymbolCount() == 0) return {};
UnusedVariables unused_vars = DetectUnusedVariables(
*this, false, true);
for (AffineExpr expr : unused_vars.constraints_with_unused_vars_only) {
constraints_.erase(expr);
}
if (!CompressVars({}, unused_vars.unused_symbols)) {
return {};
}
return std::move(unused_vars.unused_symbols);
}
void IndexingMap::ResetToKnownEmpty() {
auto zero = getAffineConstantExpr(0, GetMLIRContext());
affine_map_ = AffineMap::get(
affine_map_.getNumDims(), affine_map_.getNumSymbols(),
llvm::SmallVector<AffineExpr>(affine_map_.getNumResults(), zero),
GetMLIRContext());
for (auto& dim_var : dim_vars_) {
dim_var.bounds = Interval{0, -1};
}
for (auto& range_var : range_vars_) {
range_var.bounds = Interval{0, -1};
}
constraints_.clear();
is_known_empty_ = true;
}
bool IndexingMap::VerifyVariableIntervals() {
return llvm::all_of(dim_vars_,
[](const IndexingMap::Variable& dim_var) {
return dim_var.bounds.IsFeasible();
}) &&
llvm::all_of(range_vars_,
[](const IndexingMap::Variable& range_var) {
return range_var.bounds.IsFeasible();
}) &&
llvm::all_of(rt_vars_, [](const IndexingMap::Variable& rt_var) {
return rt_var.bounds.IsFeasible();
});
}
bool IndexingMap::VerifyConstraintIntervals() {
return llvm::all_of(constraints_, [](const auto& constraint) {
return constraint.second.IsFeasible();
});
}
SmallBitVector IndexingMap::RemoveUnusedVars() {
if (IsUndefined()) return {};
UnusedVariables unused_vars = DetectUnusedVariables(
*this, true, true);
for (AffineExpr expr : unused_vars.constraints_with_unused_vars_only) {
constraints_.erase(expr);
}
if (!CompressVars(unused_vars.unused_dims, unused_vars.unused_symbols)) {
return {};
}
return ConcatenateBitVectors(unused_vars.unused_dims,
unused_vars.unused_symbols);
}
bool IndexingMap::MergeModConstraints() {
RangeEvaluator range_evaluator(*this, GetMLIRContext(),
false);
bool did_simplify = false;
llvm::DenseMap<AffineExpr, llvm::SmallVector<AffineBinaryOpExpr, 2>>
grouped_constraints;
for (const auto& [expr, _] : constraints_) {
if (expr.getKind() != AffineExprKind::Mod) continue;
auto binop = mlir::cast<AffineBinaryOpExpr>(expr);
grouped_constraints[binop.getLHS()].push_back(binop);
}
for (const auto& [lhs, binops] : grouped_constraints) {
llvm::DenseMap<int64_t, llvm::SmallVector<AffineBinaryOpExpr, 2>>
mod_groups;
for (const auto& binop : binops) {
Interval mod_result = constraints_[binop];
if (mod_result.IsPoint()) {
mod_groups[mod_result.lower].push_back(binop);
}
}
if (mod_groups.empty()) continue;
Interval* interval_to_update = nullptr;
if (lhs.getKind() == AffineExprKind::DimId) {
interval_to_update = &GetMutableDimensionBound(
mlir::cast<AffineDimExpr>(lhs).getPosition());
} else if (lhs.getKind() == AffineExprKind::SymbolId) {
interval_to_update = &GetMutableSymbolBound(
mlir::cast<AffineSymbolExpr>(lhs).getPosition());
}
for (const auto& [res, ops] : mod_groups) {
int64_t div = 1;
for (const auto& op : ops) {
int64_t rhs_value =
range_evaluator.ComputeExpressionRange(op.getRHS()).lower;
div = std::lcm(div, rhs_value);
}
if (ops.size() > 1) {
for (const auto& op : ops) {
constraints_.erase(op);
}
constraints_[lhs % div] = Interval{res, res};
did_simplify = true;
}
if (interval_to_update != nullptr) {
Interval old = *interval_to_update;
int64_t l = (interval_to_update->lower / div) * div + res;
interval_to_update->lower =
l >= interval_to_update->lower ? l : l + div;
int64_t h = (interval_to_update->upper / div) * div + res;
interval_to_update->upper =
h <= interval_to_update->upper ? h : h - div;
if (*interval_to_update != old) {
did_simplify = true;
}
}
}
}
return did_simplify;
}
IndexingMap ComposeIndexingMaps(const IndexingMap& first,
const IndexingMap& second) {
if (second.IsUndefined() || first.IsUndefined()) {
return IndexingMap::GetUndefined();
}
MLIRContext* mlir_context = first.GetMLIRContext();
AffineMap producer_affine_map = second.GetAffineMap();
AffineMap composed_map = producer_affine_map.compose(first.GetAffineMap());
std::vector<IndexingMap::Variable> combined_range_vars;
combined_range_vars.reserve(second.GetRangeVarsCount() +
first.GetRangeVarsCount());
for (const IndexingMap::Variable& range_var :
llvm::concat<const IndexingMap::Variable>(second.GetRangeVars(),
first.GetRangeVars())) {
combined_range_vars.push_back(range_var);
}
std::vector<IndexingMap::Variable> combined_rt_vars;
combined_rt_vars.reserve(second.GetRTVarsCount() + first.GetRTVarsCount());
for (const IndexingMap::Variable& rt_var :
llvm::concat<const IndexingMap::Variable>(second.GetRTVars(),
first.GetRTVars())) {
combined_rt_vars.push_back(rt_var);
}
SmallVector<AffineExpr, 4> symbol_replacements =
GetComposedSymbolsPermutationToCorrectOrder(first, second);
if (!symbol_replacements.empty()) {
composed_map = composed_map.replaceDimsAndSymbols(
{}, symbol_replacements, composed_map.getNumDims(),
composed_map.getNumSymbols());
}
IndexingMap composed_indexing_map(composed_map, first.GetDimVars(),
std::move(combined_range_vars),
std::move(combined_rt_vars));
std::vector<AffineExpr> constraints;
std::vector<Interval> constraints_ranges;
for (const auto& [expr, range] : second.GetConstraints()) {
constraints.push_back(expr);
constraints_ranges.push_back(range);
}
auto constraints_map = AffineMap::get(producer_affine_map.getNumDims(),
producer_affine_map.getNumSymbols(),
constraints, mlir_context);
auto remapped_constraints =
constraints_map.compose(first.GetAffineMap())
.replaceDimsAndSymbols({}, symbol_replacements,
composed_indexing_map.GetDimensionCount(),
composed_indexing_map.GetSymbolCount());
for (const auto& [expr, range] :
llvm::zip(remapped_constraints.getResults(), constraints_ranges)) {
composed_indexing_map.AddConstraint(expr, range);
}
SmallVector<AffineExpr, 4> first_map_symbols_to_composed_symbols =
MapSymbolsToComposedSymbolsList(first, composed_indexing_map);
for (const auto& [expr, range] : first.GetConstraints()) {
composed_indexing_map.AddConstraint(
expr.replaceSymbols(first_map_symbols_to_composed_symbols), range);
}
for (auto [index, expr] :
llvm::enumerate(first.GetAffineMap().getResults())) {
Interval producer_dim_range =
second.GetDimensionBound(static_cast<int64_t>(index));
composed_indexing_map.AddConstraint(
expr.replaceSymbols(first_map_symbols_to_composed_symbols),
producer_dim_range);
}
return composed_indexing_map;
}
bool IndexingMap::RescaleSymbols() {
MergeModConstraints();
llvm::DenseSet<AffineExpr> to_delete;
llvm::DenseMap<AffineExpr, AffineExpr> to_replace;
for (const auto& [expr, range] : constraints_) {
if (range.lower != range.upper) continue;
auto shift_value = range.lower;
if (expr.getKind() != AffineExprKind::Mod) continue;
auto mod_expr = mlir::cast<AffineBinaryOpExpr>(expr);
auto constant_expr = mlir::dyn_cast<AffineConstantExpr>(mod_expr.getRHS());
if (!constant_expr) continue;
if (constant_expr.getValue() <= 0) continue;
auto scaling_factor = constant_expr.getValue();
if (mod_expr.getLHS().getKind() != AffineExprKind::SymbolId) continue;
auto symbol_expr = mlir::cast<AffineSymbolExpr>(mod_expr.getLHS());
if (to_replace.contains(symbol_expr)) {
continue;
}
to_replace[symbol_expr] = constant_expr * symbol_expr + shift_value;
to_delete.insert(expr);
affine_map_ = affine_map_.replace(
symbol_expr, constant_expr * symbol_expr + shift_value,
affine_map_.getNumDims(), affine_map_.getNumSymbols());
auto& symbol_range = range_vars_[symbol_expr.getPosition()].bounds;
symbol_range.lower = (symbol_range.lower - shift_value) / scaling_factor;
symbol_range.upper = (symbol_range.upper - shift_value) / scaling_factor;
}
llvm::DenseMap<mlir::AffineExpr, Interval> new_constraints;
for (const auto& [expr, range] : constraints_) {
if (!to_delete.contains(expr)) {
new_constraints[expr.replace(to_replace)] = range;
}
}
constraints_ = std::move(new_constraints);
return !to_delete.empty();
}
bool IndexingMap::IsRangeVarSymbol(mlir::AffineSymbolExpr symbol) const {
unsigned int position = symbol.getPosition();
CHECK_LE(position, GetSymbolCount());
return position < range_vars_.size();
}
bool IndexingMap::IsRTVarSymbol(mlir::AffineSymbolExpr symbol) const {
unsigned int position = symbol.getPosition();
CHECK_LE(position, GetSymbolCount());
return position >= range_vars_.size();
}
IndexingMap IndexingMap::ConvertSymbolsToDimensions() const {
int num_symbols = GetSymbolCount();
if (IsUndefined() || IsKnownEmpty() || num_symbols == 0) {
return *this;
}
int num_dims = GetDimensionCount();
MLIRContext* mlir_context = GetMLIRContext();
int64_t num_vars = num_dims + num_symbols;
std::vector<IndexingMap::Variable> new_dim_vars;
new_dim_vars.reserve(num_vars);
llvm::append_range(new_dim_vars, GetDimVars());
SmallVector<AffineExpr> syms_replacements;
int64_t symbol_id = num_dims;
for (const IndexingMap::Variable& var :
llvm::concat<const IndexingMap::Variable>(range_vars_, rt_vars_)) {
syms_replacements.push_back(getAffineDimExpr(symbol_id++, mlir_context));
new_dim_vars.push_back(IndexingMap::Variable{var.bounds});
}
SmallVector<std::pair<AffineExpr, Interval>, 4> new_constraints;
for (const auto& [expr, range] : constraints_) {
new_constraints.push_back(
std::make_pair(expr.replaceSymbols(syms_replacements), range));
}
AffineMap canonical_map =
affine_map_.replaceDimsAndSymbols({}, syms_replacements, num_vars, 0);
IndexingMap new_indexing_map(canonical_map, new_dim_vars, {},
{}, new_constraints);
return new_indexing_map;
}
}
} | #include "xla/service/gpu/model/indexing_map.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <sstream>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/hash/hash_testing.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/service/gpu/model/indexing_map_serialization.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::mlir::AffineMap;
using ::testing::AnyOf;
using ::testing::ElementsAre;
class IndexingMapTest : public HloTestBase {
public:
IndexingMap Parse(absl::string_view indexing_map_str) {
auto indexing_map = ParseIndexingMap(indexing_map_str, &mlir_context_);
EXPECT_TRUE(indexing_map.has_value());
return *indexing_map;
}
mlir::MLIRContext mlir_context_;
};
std::vector<bool> ConvertToSTL(const llvm::SmallBitVector& bit_vector) {
std::vector<bool> result;
result.reserve(bit_vector.size());
for (int i = 0; i < bit_vector.size(); ++i) {
result.push_back(bit_vector[i]);
}
return result;
}
TEST_F(IndexingMapTest, VariableKind) {
EXPECT_EQ(ToVariableType("default"), VariableKind::kDefault);
EXPECT_EQ(ToVariableType("th_x"), VariableKind::kThreadX);
EXPECT_EQ(ToVariableType("th_y"), VariableKind::kThreadY);
EXPECT_EQ(ToVariableType("th_z"), VariableKind::kThreadZ);
EXPECT_EQ(ToVariableType("bl_x"), VariableKind::kBlockX);
EXPECT_EQ(ToVariableType("bl_y"), VariableKind::kBlockY);
EXPECT_EQ(ToVariableType("bl_z"), VariableKind::kBlockZ);
EXPECT_EQ(ToVariableType("warp"), VariableKind::kWarp);
EXPECT_EQ(ToVariableType("th_w"), VariableKind::kWarpThread);
EXPECT_EQ(ToVariableName(VariableKind::kDefault), "default");
EXPECT_EQ(ToVariableName(VariableKind::kThreadX), "th_x");
EXPECT_EQ(ToVariableName(VariableKind::kThreadY), "th_y");
EXPECT_EQ(ToVariableName(VariableKind::kThreadZ), "th_z");
EXPECT_EQ(ToVariableName(VariableKind::kBlockX), "bl_x");
EXPECT_EQ(ToVariableName(VariableKind::kBlockY), "bl_y");
EXPECT_EQ(ToVariableName(VariableKind::kBlockZ), "bl_z");
EXPECT_EQ(ToVariableName(VariableKind::kWarp), "warp");
EXPECT_EQ(ToVariableName(VariableKind::kWarpThread), "th_w");
}
TEST_F(IndexingMapTest, VerifyDimensions) {
auto indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0) -> (d0)", &mlir_context_),
{10, 10}, {});
std::stringstream ss;
EXPECT_FALSE(indexing_map.Verify(ss));
EXPECT_EQ(ss.str(),
"dim size must match the number of dimensions in the affine map");
}
TEST_F(IndexingMapTest, VerifySymbols) {
auto indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0) -> (d0)", &mlir_context_),
{10}, {10});
std::stringstream ss;
EXPECT_FALSE(indexing_map.Verify(ss));
EXPECT_EQ(ss.str(),
"range vars size + rt var size must match the number of symbols in "
"the affine map");
}
TEST_F(IndexingMapTest, RTVar) {
IndexingMap indexing_map(
ParseAffineMap("(d0, d1)[range, rt0, rt1] -> (d1, d0, range + rt0, rt1)",
&mlir_context_),
{IndexingMap::Variable{0, 99, "d0"}, IndexingMap::Variable{0, 43, "d1"}},
{IndexingMap::Variable{-99, 99, "range"}},
{IndexingMap::Variable{Interval{0, 2}},
IndexingMap::Variable({Interval{0, 7}})});
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1)[range, rt0, rt1] -> (d1, d0, range + rt0, rt1),
domain:
d0 in [0, 99],
d1 in [0, 43],
range in [-99, 99],
rt0 in [0, 2],
rt1 in [0, 7]
)"));
}
TEST_F(IndexingMapTest, Evaluation) {
IndexingMap indexing_map = Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 3],
d1 in [0, 3],
s0 in [0, 1],
s1 in [0, 1]
)");
auto results = indexing_map.Evaluate(
mlir::getAffineConstantExprs({1, 2}, &mlir_context_),
mlir::getAffineConstantExprs({3, 4}, &mlir_context_));
EXPECT_THAT(results, ElementsAre(2, 1, 4, 3));
auto feasible = indexing_map.ConstraintsSatisfied(
mlir::getAffineConstantExprs({1, 2}, &mlir_context_),
mlir::getAffineConstantExprs({3, 4}, &mlir_context_));
EXPECT_TRUE(feasible);
indexing_map.AddConstraint(ParseAffineExpr("s0 mod 4", &mlir_context_),
Interval{0, 0});
auto infeasible = indexing_map.ConstraintsSatisfied(
mlir::getAffineConstantExprs({1, 2}, &mlir_context_),
mlir::getAffineConstantExprs({5, 4}, &mlir_context_));
EXPECT_FALSE(infeasible);
}
TEST_F(IndexingMapTest, Composition_Permutation) {
IndexingMap producer = Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 3],
d1 in [0, 3],
s0 in [0, 1],
s1 in [0, 1]
)");
IndexingMap consumer = Parse(R"(
(d0)[s0] -> (d0, s0),
domain:
d0 in [0, 3],
s0 in [0, 3]
)");
auto composed = ComposeIndexingMaps(consumer, producer);
EXPECT_THAT(composed, MatchIndexingMap(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 3],
s0 in [0, 1],
s1 in [0, 1],
s2 in [0, 3]
)"));
}
TEST_F(IndexingMapTest, Composition_RestrictedInterval) {
IndexingMap producer = Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 4],
d1 in [0, 5],
s0 in [0, 6],
s1 in [0, 1]
)");
IndexingMap consumer = Parse(R"(
(d0)[s0] -> (d0, s0),
domain:
d0 in [0, 9],
s0 in [0, 7]
)");
auto composed = ComposeIndexingMaps(consumer, producer);
EXPECT_THAT(composed, MatchIndexingMap(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 4],
s0 in [0, 6],
s1 in [0, 1],
s2 in [0, 5]
)"));
}
TEST_F(IndexingMapTest, Composition_ProducerAndConsumerHaveConstraints) {
IndexingMap producer = Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
d0 mod 8 in [0, 0],
s0 mod 3 in [1, 1]
)");
IndexingMap consumer = Parse(R"(
(d0)[s0] -> (d0, s0),
domain:
d0 in [0, 9],
s0 in [0, 7],
d0 + s0 in [0, 20],
s0 mod 4 in [0, 0]
)");
auto composed = ComposeIndexingMaps(consumer, producer);
EXPECT_THAT(composed, MatchIndexingMap(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 9],
s0 in [0, 69],
s1 in [0, 19],
s2 in [0, 7],
d0 + s2 in [0, 20],
d0 mod 8 in [0, 0],
s0 mod 3 in [1, 1],
s2 mod 4 in [0, 0]
)"));
EXPECT_TRUE(composed.Simplify());
EXPECT_THAT(composed, MatchIndexingMap(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 8],
s0 in [1, 67],
s1 in [0, 19],
s2 in [0, 4],
d0 mod 8 in [0, 0],
s0 mod 3 in [1, 1],
s2 mod 4 in [0, 0]
)"));
}
TEST_F(IndexingMapTest, Composition_RTVar) {
std::vector<IndexingMap::Variable> rt_vars{
IndexingMap::Variable{Interval{0, 0}},
IndexingMap::Variable({Interval{0, 1}}),
IndexingMap::Variable({Interval{0, 226}})};
IndexingMap producer(
ParseAffineMap(
"(d0, d1, d2)[rt0, rt1, rt2] -> (d0 + rt0, d1 + rt1, d2 + rt2)",
&mlir_context_),
{IndexingMap::Variable{{0, 0}}, IndexingMap::Variable{{0, 1}},
IndexingMap::Variable{{0, 226}}},
{}, std::move(rt_vars));
IndexingMap consumer(
ParseAffineMap("(d0, d1)[s] -> (0, d1, s)", &mlir_context_),
{IndexingMap::Variable{0, 0}, IndexingMap::Variable{0, 1}},
{IndexingMap::Variable{0, 31, "s"}}, {});
auto composed = ComposeIndexingMaps(consumer, producer);
EXPECT_THAT(ToString(composed), MatchIndexingString(R"(
(d0, d1)[s, rt0, rt1, rt2] -> (rt0, d1 + rt1, s + rt2),
domain:
d0 in [0, 0],
d1 in [0, 1],
s in [0, 31],
rt0 in [0, 0],
rt1 in [0, 1],
rt2 in [0, 226]
)"));
}
TEST_F(IndexingMapTest, Composition_OnlyRTVars) {
IndexingMap producer(
ParseAffineMap("(d0, d1)[s0, s1] -> (d0 + s0, d1 + 4 * s1)",
&mlir_context_),
{IndexingMap::Variable{0, 24}, IndexingMap::Variable{0, 15}}, {},
{IndexingMap::Variable{Interval{0, 2}, "ps_0"},
IndexingMap::Variable{Interval{0, 1}, "ps_1"}});
std::vector<IndexingMap::Variable> consumer_rt_vars;
IndexingMap consumer(
ParseAffineMap("(d0, d1)[s0, s1] -> (d0 + 2 * s0, d1 + 3 * s1)",
&mlir_context_),
{IndexingMap::Variable{0, 24}, IndexingMap::Variable{0, 15}}, {},
{IndexingMap::Variable{Interval{0, 25}, "cs_0"},
IndexingMap::Variable{Interval{0, 16}, "cs_1"}});
auto composed = ComposeIndexingMaps(consumer, producer);
EXPECT_THAT(ToString(composed), MatchIndexingString(R"(
(d0, d1)[ps_0, ps_1, cs_0, cs_1] ->
(d0 + cs_0 * 2 + ps_0, d1 + cs_1 * 3 + ps_1 * 4),
domain:
d0 in [0, 24],
d1 in [0, 15],
ps_0 in [0, 2],
ps_1 in [0, 1],
cs_0 in [0, 25],
cs_1 in [0, 16],
d0 + cs_0 * 2 in [0, 24],
d1 + cs_1 * 3 in [0, 15]
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedVars_ConstraintUsesDim) {
auto indexing_map = Parse(R"(
(d0, d1)[s0, s1] -> (d1, s0, s1),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
d0 + s0 in [1, 100],
s0 mod 3 in [0, 0]
)");
indexing_map.RemoveUnusedVars();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0, d1)[s0, s1] -> (d1, s0, s1),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
d0 + s0 in [1, 100],
s0 mod 3 in [0, 0]
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedVars_ConstraintUsesUnusedDim) {
auto indexing_map = Parse(R"(
(d0, d1)[s0, s1] -> (s0, d1, s1),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
d0 mod 3 in [0, 0]
)");
indexing_map.RemoveUnusedVars();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0)[s0, s1] -> (s0, d0, s1),
domain:
d0 in [0, 59],
s0 in [0, 69],
s1 in [0, 19]
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedSymbols_ConstraintUsesOnlyUnusedSym) {
auto indexing_map = Parse(R"(
(d0, d1)[s0, s1] -> (d0, d1, s1),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
s0 mod 3 in [0, 0]
)");
indexing_map.RemoveUnusedSymbols();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0, d1)[s0] -> (d0, d1, s0),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 19]
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedVars_ConstraintsWithManyDims) {
auto indexing_map = Parse(R"(
(d0, d1, d2, d3, d4)[s0, s1, s2] -> (s0 * 4 + d1 + d3 - 42),
domain:
d0 in [0, 0],
d1 in [0, 1],
d2 in [0, 2],
d3 in [0, 3],
d4 in [0, 4],
s0 in [0, 31],
s1 in [0, 63],
s2 in [0, 95],
s0 * 4 + d1 + d3 in [24, 459],
s0 + s2 in [0, 512]
)");
auto unused_vars = indexing_map.RemoveUnusedVars();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0, d1)[s0, s1] -> (d0 + s0 * 4 + d1 - 42),
domain:
d0 in [0, 1],
d1 in [0, 3],
s0 in [0, 31],
s1 in [0, 95],
d0 + s0 * 4 + d1 in [24, 459],
s0 + s1 in [0, 512]
)"));
EXPECT_THAT(ConvertToSTL(unused_vars),
::testing::ElementsAreArray(
{true, false, true, false, true, false, true, false}));
}
TEST_F(IndexingMapTest, RemoveUnusedSymbols_ConstraintUsesSymbol) {
auto indexing_map = Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
s0 + s1 in [1, 100],
s0 mod 3 in [0, 0]
)");
indexing_map.RemoveUnusedSymbols();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
s0 + s1 in [1, 100],
s0 mod 3 in [0, 0]
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedSymbols_ConstraintUsesOnlyUnusedSymbols) {
auto indexing_map = Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
s0 mod 3 in [0, 0]
)");
indexing_map.RemoveUnusedSymbols();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0, d1)[s0] -> (d1, d0, s0),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 19]
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedSymbols_ConstraintIsAConstantWithinRange) {
auto indexing_map = Parse(R"(
(d0) -> (d0),
domain:
d0 in [0, 49],
0 in [-10, 5]
)");
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0) -> (d0),
domain:
d0 in [0, 49]
)"));
}
TEST_F(IndexingMapTest, KnownEmpty_CreatingIndexingMapWithInfeasibleRange) {
auto indexing_map = Parse(R"(
(d0) -> (d0),
domain:
d0 in [0, -2]
)");
EXPECT_THAT(indexing_map, MatchIndexingMap("KNOWN EMPTY"));
}
TEST_F(IndexingMapTest, KnownEmpty_AddingConstraintOutOfRange) {
auto indexing_map = Parse(R"(
(d0) -> (d0),
domain:
d0 in [0, 49],
0 in [10, 15]
)");
EXPECT_THAT(indexing_map, MatchIndexingMap("KNOWN EMPTY"));
}
TEST_F(IndexingMapTest, KnownEmpty_Composition) {
auto indexing_map = Parse("(d0) -> (d0), domain: d0 in [0, 49]");
auto known_empty = Parse("(d0) -> (d0), domain: d0 in [0, -1]");
EXPECT_THAT(known_empty, MatchIndexingMap("KNOWN EMPTY"));
EXPECT_THAT(indexing_map * known_empty, MatchIndexingMap("KNOWN EMPTY"));
EXPECT_THAT(known_empty * indexing_map, MatchIndexingMap("KNOWN EMPTY"));
EXPECT_EQ((indexing_map * known_empty).GetAffineMap().getNumResults(), 1);
EXPECT_EQ((known_empty * indexing_map).GetAffineMap().getNumResults(), 1);
}
TEST_F(IndexingMapTest,
KnownEmpty_AddingConstraintOutOfRangeAfterSimplification) {
auto indexing_map = Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
s1 floordiv 20 in [2, 2]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(indexing_map, MatchIndexingMap("KNOWN EMPTY"));
}
TEST_F(IndexingMapTest, RemoveUnusedSymbols_ConstraintsWithManySymbols) {
auto indexing_map = Parse(R"(
(d0)[s0, s1, s2, s3, s4] -> (d0 * 4 + s1 + s3 - 42),
domain:
d0 in [0, 31],
s0 in [0, 0],
s1 in [0, 1],
s2 in [0, 2],
s3 in [0, 3],
s4 in [0, 4],
d0 * 4 + s1 + s3 in [24, 459]
)");
indexing_map.RemoveUnusedSymbols();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0)[s0, s1] -> (d0 * 4 + s0 + s1 - 42),
domain:
d0 in [0, 31],
s0 in [0, 1],
s1 in [0, 3],
d0 * 4 + s0 + s1 in [24, 459]
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedSymbols_ConstraintsWithRTVars) {
IndexingMap indexing_map(
ParseAffineMap("(d0)[s0, s1, s2, s3, s4] -> (d0 * 4 + s1 + s3 - 42)",
&mlir_context_),
{IndexingMap::Variable{{0, 31}}},
{IndexingMap::Variable{{0, 0}}, IndexingMap::Variable{{0, 1}},
IndexingMap::Variable{{0, 2}}},
{IndexingMap::Variable{Interval{0, 3}},
IndexingMap::Variable{Interval{0, 4}}});
indexing_map.AddConstraint(
ParseAffineExpr("d0 * 4 + s1 + s3", &mlir_context_), Interval{24, 459});
indexing_map.RemoveUnusedSymbols();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0)[s0, rt0] -> (d0 * 4 + s0 + rt0 - 42),
domain:
d0 in [0, 31],
s0 in [0, 1],
rt0 in [0, 3],
d0 * 4 + s0 + rt0 in [24, 459]
)"));
};
TEST_F(IndexingMapTest, ConvertSymbolsToDimensions) {
IndexingMap indexing_map(
ParseAffineMap(
"(d0)[s0, s1, s2, s3] -> (d0 * 4 + s0 + s1 + 2 * s2 + 3 * s3 - 42)",
&mlir_context_),
{IndexingMap::Variable{{0, 31}}},
{IndexingMap::Variable{{0, 0}}, IndexingMap::Variable{{0, 1}}},
{IndexingMap::Variable{Interval{0, 3}},
IndexingMap::Variable{Interval{0, 4}}});
indexing_map.AddConstraint(
ParseAffineExpr("d0 * 4 + s0 + 2 * s2", &mlir_context_),
Interval{24, 459});
EXPECT_THAT(indexing_map.ConvertSymbolsToDimensions(), MatchIndexingMap(R"(
(d0, d1, d2, d3, d4) -> (d0 * 4 + d1 + d2 + d3 * 2 + d4 * 3 - 42),
domain:
d0 in [0, 31],
d1 in [0, 0],
d2 in [0, 1],
d3 in [0, 3],
d4 in [0, 4],
d0 * 4 + d1 + d3 * 2 in [24, 459]
)"));
}
TEST_F(IndexingMapTest, ConstraintIntervalSimplification_Sum) {
auto indexing_map = Parse(R"(
(d0) -> (d0),
domain:
d0 in [0, 99],
d0 mod 8 + 5 in [50, 54]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0) -> (d0),
domain:
d0 in [0, 99],
d0 mod 8 in [45, 49]
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_Sum_IndependentOfSymbol) {
auto indexing_map = Parse(R"(
(d0)[s0, s1] -> (d0 * 6 + s0 * 3 + s1),
domain:
d0 in [0, 1999],
s0 in [0, 1],
s1 in [0, 2],
d0 * 6 + s0 * 3 + s1 in [0, 599]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0, s1] -> (d0 * 6 + s0 * 3 + s1),
domain:
d0 in [0, 99],
s0 in [0, 1],
s1 in [0, 2]
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_Sum_NotIndependentOfSymbol) {
auto indexing_map = Parse(R"(
(d0)[s0, s1] -> (d0 * 6 + s0 * 3 + s1),
domain:
d0 in [0, 1999],
s0 in [0, 1],
s1 in [0, 2],
d0 * 6 + s0 * 3 + s1 in [0, 598]
)");
EXPECT_FALSE(indexing_map.Simplify());
}
TEST_F(IndexingMapTest, ConstraintIntervalSimplification_Sum_GcdGreaterOne) {
auto indexing_map = Parse(R"(
(d0)[s0] -> (d0 * 6 + s0 * 3),
domain:
d0 in [0, 1999],
s0 in [0, 1],
d0 * 6 + s0 * 3 in [0, 599]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0] -> (d0 * 6 + s0 * 3),
domain:
d0 in [0, 99],
s0 in [0, 1]
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_FloorDivPositiveDivisorPositiveBounds) {
auto indexing_map = Parse(R"(
(d0) -> (d0),
domain:
d0 in [0, 99],
d0 floordiv 8 in [5, 11]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0) -> (d0),
domain:
d0 in [40, 95]
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_FloorDivPositiveDivisorNegativeBounds) {
auto indexing_map = Parse(R"(
(d0)[s0] -> (d0),
domain:
d0 in [0, 99],
s0 in [-99, 99],
s0 floordiv 3 in [-11, -5]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0] -> (d0),
domain:
d0 in [0, 99],
s0 in [-33, -13]
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_FloorDivNegativeDivisorNegativeBounds) {
auto indexing_map = Parse(R"(
(d0)[s0] -> (d0),
domain:
d0 in [0, 99],
s0 in [-99, 99],
s0 floordiv -3 in [-11, -5]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0] -> (d0),
domain:
d0 in [0, 99],
s0 in [15, 35]
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_MulPositiveMultiplierPositiveBounds) {
auto indexing_map = Parse(R"(
(d0) -> (d0),
domain:
d0 in [0, 99],
d0 * 8 in [14, 33]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0) -> (d0),
domain:
d0 in [2, 4]
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_MulPositiveMultiplierNegativeBounds) {
auto indexing_map = Parse(R"(
(d0)[s0] -> (d0),
domain:
d0 in [0, 99],
s0 in [-99, 99],
s0 * 3 in [-11, -5]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0] -> (d0),
domain:
d0 in [0, 99],
s0 in [-3, -2]
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_MulNegativeMultiplierNegativeBounds) {
auto indexing_map = Parse(R"(
(d0)[s0] -> (d0),
domain:
d0 in [0, 99],
s0 in [-99, 99],
s0 * -3 in [-11, -5]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0] -> (d0),
domain:
d0 in [0, 99],
s0 in [2, 3]
)"));
}
TEST_F(IndexingMapTest, ConstraintMerge_Mod) {
auto indexing_map = Parse(R"(
(d0)[s0, s1] -> (d0, s1, s0),
domain:
d0 in [0, 3],
s0 in [-21, -2],
s1 in [0, 10],
d0 mod 3 in [0, 0],
s0 mod 2 in [0, 0],
s0 mod 3 in [0, 0],
s1 mod 5 in [1, 1]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0, s1] -> (d0, s1, s0),
domain:
d0 in [0, 3],
s0 in [-18, -6],
s1 in [1, 6],
d0 mod 3 in [0, 0],
s0 mod 6 in [0, 0],
s1 mod 5 in [1, 1]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_ConstantDims) {
auto indexing_map = Parse(R"(
(d0) -> (d0),
domain:
d0 in [5, 5]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0) -> (5),
domain:
d0 in [5, 5]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SumOrderRegression) {
auto indexing_map = Parse(R"(
(d0, d1)[s0, s1] -> (((((d0 + (d0 mod 3)) floordiv 3)
+ (s0 + ((s0 + s0) mod 3))) + (((d0 + s0) mod 3) + 0))),
domain:
d0 in [0, 9],
d1 in [0, 19],
s0 in [0, 29],
s1 in [0, 39]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_FALSE(indexing_map.Simplify());
}
TEST_F(IndexingMapTest, AffineMapSimplification_SumOrderRegression2) {
auto indexing_map = Parse(R"(
(d0)[s0] -> ((((s0 + d0) + d0) floordiv 2)),
domain:
d0 in [0, 9],
s0 in [0, 19]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_FALSE(indexing_map.Simplify());
}
TEST_F(IndexingMapTest, AffineMapSimplification_FloorDivRegression) {
auto indexing_map = Parse(R"(
(d0, d1) -> (((d0 floordiv 3) * 3 + d1 floordiv 2) floordiv 6),
domain:
d0 in [0, 11],
d1 in [0, 5]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1) -> (d0 floordiv 6),
domain:
d0 in [0, 11],
d1 in [0, 5]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_ModIsSub) {
auto indexing_map = Parse(R"(
(d0) -> (d0 mod 42),
domain:
d0 in [53, 71]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0) -> (d0 - 42),
domain:
d0 in [53, 71]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_ModIsAdd) {
auto indexing_map = Parse(R"(
(d0) -> (d0 mod 5),
domain:
d0 in [-5, -1]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0) -> (d0 + 5),
domain:
d0 in [-5, -1]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_ModIsNotAdd) {
auto indexing_map1 = Parse("(d0) -> (d0 mod 5), domain: d0 in [-4, 0]");
EXPECT_FALSE(indexing_map1.Simplify());
auto indexing_map2 = Parse("(d0) -> (d0 mod 5), domain: d0 in [-6, -1]");
EXPECT_FALSE(indexing_map2.Simplify());
}
TEST_F(IndexingMapTest, AffineMapSimplification_SubIsMod) {
auto indexing_map = Parse(R"(
(d0)[s0] -> (d0 - (s0 floordiv 3) * 3 + s0),
domain:
d0 in [0, 1],
s0 in [0, 3]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0] -> (d0 + s0 mod 3),
domain:
d0 in [0, 1],
s0 in [0, 3]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SubIsModMultiplied) {
auto indexing_map = Parse(R"(
(d0)[s0] -> (d0 - (s0 floordiv 3) * 12 + s0 * 7),
domain:
d0 in [0, 1],
s0 in [0, 3]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0] -> (d0 + (s0 mod 3) * 4 + s0 * 3),
domain:
d0 in [0, 1],
s0 in [0, 3]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SubIsModSum) {
auto indexing_map = Parse(R"(
(d0)[s0] -> (1 + d0 - ((s0 + 1) floordiv 3) * 3 + s0),
domain:
d0 in [0, 1],
s0 in [0, 3]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0] -> (d0 + (s0 + 1) mod 3),
domain:
d0 in [0, 1],
s0 in [0, 3]
)"));
}
TEST_F(IndexingMapTest,
AffineMapSimplification_DivsAndModsIfSmallerThanDivisor) {
auto indexing_map = Parse(R"(
(d0, d1) -> (d0 + d1 floordiv 16, d1 mod 16),
domain:
d0 in [0, 7],
d1 in [0, 15]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 7],
d1 in [0, 15]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_DivsAndModsWithMultipliers) {
auto indexing_map = Parse(R"(
(d0, d1, d2) -> ((d0 * 100 + d1 * 10 + d2) floordiv 100,
((d0 * 100 + d1 * 10 + d2) mod 100) floordiv 10,
d2 mod 10),
domain:
d0 in [0, 8],
d1 in [0, 8],
d2 in [0, 8]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1, d2) -> (d0, d1, d2),
domain:
d0 in [0, 8],
d1 in [0, 8],
d2 in [0, 8]
)"));
}
TEST_F(IndexingMapTest,
AffineMapSimplification_DivsAndModsWithDivisibleMultipliers) {
auto indexing_map = Parse(R"(
(d0, d1, d2) -> ((d0 * 16 + d1 * 4 + d2) floordiv 8,
(d0 * 16 + d1 * 4 + d2) mod 8),
domain:
d0 in [0, 9],
d1 in [0, 9],
d2 in [0, 9]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1, d2) -> (d0 * 2 + (d1 * 4 + d2) floordiv 8,
(d1 * 4 + d2) mod 8),
domain:
d0 in [0, 9],
d1 in [0, 9],
d2 in [0, 9]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_DivsAndModsWithReverse) {
auto indexing_map = Parse(R"(
(d0, d1) -> (-((d0 * -11 - d1 + 109) floordiv 11) + 9,
d0 * 11 + d1 + ((d0 * -11 - d1 + 109) floordiv 11) * 11 - 99),
domain:
d0 in [0, 7],
d1 in [0, 8]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 7],
d1 in [0, 8]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SimplifyReshape) {
auto indexing_map = Parse(R"(
()[s0] -> ((s0 * 128) mod 715 + ((s0 * 128) floordiv 715) * 715),
domain:
s0 in [0, 127]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
()[s0] -> (s0 * 128),
domain:
s0 in [0, 127]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SimplifyReshape2) {
auto indexing_map = Parse(R"(
(d0, d1) -> ((d0 mod 8) * 128 + d1 + (d0 floordiv 8) * 1024),
domain:
d0 in [0, 1023],
d1 in [0, 127]
)");
;
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1) -> (d0 * 128 + d1),
domain:
d0 in [0, 1023],
d1 in [0, 127]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SimplifyReshape3) {
auto indexing_map = Parse(R"(
(d0, d1) -> (((d1 * 2 + d0 floordiv 64) mod 3) * 256 + (d0 mod 64) * 4
+ ((d1 * 128 + d0) floordiv 192) * 768),
domain:
d0 in [0, 127],
d1 in [0, 3071]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1) -> (d0 * 4 + d1 * 512),
domain:
d0 in [0, 127],
d1 in [0, 3071]
)"));
}
TEST_F(IndexingMapTest,
AffineMapSimplification_ModWithNegativeMultiplerDoesNotGetSimplified) {
auto indexing_map = Parse(R"(
(d0) -> ((-d0) mod 2),
domain:
d0 in [0, 127]
)");
EXPECT_FALSE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0) -> ((-d0) mod 2),
domain:
d0 in [0, 127]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SimplifyBitcastAndBack) {
auto indexing_map = Parse(R"(
(d0, d1) -> ((d0 floordiv 1536) * 786432
+ (((d0 * 2 + d1 floordiv 64) floordiv 3) mod 1024) * 768
+ ((d0 * 2 + d1 floordiv 64) mod 3) * 256 + (d1 mod 64) * 4),
domain:
d0 in [0, 3071],
d1 in [0, 127]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1) -> (d0 * 512 + d1 * 4),
domain:
d0 in [0, 3071],
d1 in [0, 127]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SimplifyReshape_Regression) {
auto indexing_map = Parse(R"(
()[s0] -> ((s0 * 128) mod 715 + ((s0 * 64) floordiv 715) * 715),
domain:
s0 in [0, 127]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
()[s0] -> (((s0 * 64) floordiv 715) * 715 + (s0 * 128) mod 715),
domain:
s0 in [0, 127]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_DivsInSequence) {
auto indexing_map = Parse(R"(
()[s0] -> (s0 - ((s0 floordiv 2) floordiv 7) * 14 + (s0 floordiv 14) * 14),
domain:
s0 in [0, 1233]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
()[s0] -> (s0),
domain:
s0 in [0, 1233]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_DivDiv) {
auto indexing_map = Parse(R"(
()[s0, s1] -> ((s0 * 2 + s1 floordiv 64) floordiv 3),
domain:
s0 in [0, 1233],
s1 in [0, 127]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
()[s0, s1] -> ((s0 * 128 + s1) floordiv 192),
domain:
s0 in [0, 1233],
s1 in [0, 127]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_DivSumConstant) {
auto indexing_map = Parse(R"(
()[s0] -> ((s0 * 6 + 9) floordiv 18),
domain:
s0 in [0, 1233]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
()[s0] -> ((s0 * 2 + 3) floordiv 6),
domain:
s0 in [0, 1233]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_DivSumDiv) {
auto indexing_map = Parse(R"(
()[s0, s1] -> ((s0 floordiv 3 + s1 floordiv 3) floordiv 6),
domain:
s0 in [0, 1233],
s1 in [0, 127]
)");
EXPECT_FALSE(indexing_map.Simplify());
}
TEST_F(IndexingMapTest, AffineMapSimplification_NegativeDiv) {
auto indexing_map = Parse(R"(
()[s0] -> ((s0 floordiv 2) floordiv -7),
domain:
s0 in [0, 1233]
)");
EXPECT_FALSE(indexing_map.Simplify());
}
TEST_F(IndexingMapTest, AffineMapSimplification_ExtractFromMod) {
auto indexing_map = Parse(R"(
()[s0, s1, s2, s3] -> ((s0 * 458752 + s1 + s2 * 4 + s3 * 512) mod 20000),
domain:
s0 in [0, 871],
s1 in [0, 3],
s2 in [0, 127],
s3 in [0, 895]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
()[s0, s1, s2, s3] -> (
((s0 * 114688 + s3 * 128 + s2) mod 5000) * 4 + s1
),
domain:
s0 in [0, 871],
s1 in [0, 3],
s2 in [0, 127],
s3 in [0, 895]
)"));
}
TEST_F(IndexingMapTest,
AffineMapSimplification_ExtractFromDiv_NegativeMultiplier) {
auto indexing_map = Parse(R"(
()[s0, s1] -> ((s0 * 16 - (s1 floordiv 4) floordiv 2 + (s1 floordiv 8) * 2)
floordiv 4),
domain:
s0 in [0, 1],
s1 in [0, 127]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
()[s0, s1] -> (
s0 * 4 + s1 floordiv 32
),
domain:
s0 in [0, 1],
s1 in [0, 127]
)"));
}
TEST_F(IndexingMapTest, RescaleSymbols_Simple) {
auto indexing_map = Parse(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0 floordiv 6),
domain:
d0 in [0, 3],
s0 in [0, 6],
s1 in [0, 1],
s2 in [0, 5],
s0 mod 6 in [0, 0]
)");
EXPECT_TRUE(indexing_map.RescaleSymbols());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 3],
s0 in [0, 1],
s1 in [0, 1],
s2 in [0, 5]
)"));
}
TEST_F(IndexingMapTest, RescaleSymbols_WithShift) {
auto indexing_map = Parse(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 3],
s0 in [0, 41],
s1 in [0, 1],
s2 in [0, 5],
s0 mod 6 in [3, 3]
)");
EXPECT_TRUE(indexing_map.RescaleSymbols());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0 * 6 + 3),
domain:
d0 in [0, 3],
s0 in [0, 6],
s1 in [0, 1],
s2 in [0, 5]
)"));
}
TEST_F(IndexingMapTest, RescaleSymbols_TwoModConstraints) {
auto indexing_map = Parse(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0 floordiv 6),
domain:
d0 in [0, 3],
s0 in [0, 7],
s1 in [0, 1],
s2 in [0, 5],
s0 mod 2 in [0, 0],
s0 mod 3 in [0, 0]
)");
EXPECT_TRUE(indexing_map.RescaleSymbols());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 3],
s0 in [0, 1],
s1 in [0, 1],
s2 in [0, 5]
)"));
}
TEST_F(IndexingMapTest, RescaleSymbols_RescaledSymbolInOtherNonModConstraint) {
auto indexing_map = Parse(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 3],
s0 in [0, 9],
s1 in [0, 1],
s2 in [0, 5],
s0 * s2 in [0, 28],
s0 mod 6 in [3, 3]
)");
EXPECT_TRUE(indexing_map.RescaleSymbols());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0 * 6 + 3),
domain:
d0 in [0, 3],
s0 in [0, 1],
s1 in [0, 1],
s2 in [0, 5],
(s0 * 6 + 3) * s2 in [0, 28]
)"));
}
TEST_F(IndexingMapTest,
RescaleSymbols_TwoModConstraintsForTheSameSymbolWhichCannotBeMerged) {
auto indexing_map = Parse(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 3],
s0 in [0, 99],
s1 in [0, 1],
s2 in [0, 5],
s0 mod 6 in [3, 3],
s0 mod 7 in [5, 5]
)");
EXPECT_TRUE(indexing_map.RescaleSymbols());
const mlir::AffineExpr result3 = indexing_map.GetAffineMap().getResult(3);
ASSERT_THAT(indexing_map.GetConstraints(), ::testing::SizeIs(1));
const mlir::AffineExpr constraint_expr =
indexing_map.GetConstraints().begin()->first;
const Interval constraint_interval =
indexing_map.GetConstraints().begin()->second;
EXPECT_THAT(
std::make_tuple(result3, constraint_expr, constraint_interval),
AnyOf(
std::make_tuple(ParseAffineExpr("s0 * 6 + 3", &mlir_context_),
ParseAffineExpr("(s0 * 6 + 3) mod 7", &mlir_context_),
Interval{5, 5}),
std::make_tuple(ParseAffineExpr("s0 * 7 + 5", &mlir_context_),
ParseAffineExpr("(s0 * 7 + 5) mod 6", &mlir_context_),
Interval{3, 3})));
}
TEST_F(IndexingMapTest, RescaleSymbolsKeepsHashmapConsistent) {
auto indexing_map = Parse(R"(
(d0)[s0, s1, s2] -> (s2, d0, s0, s0 floordiv 6),
domain:
d0 in [0, 3],
s0 in [0, 6],
s1 in [0, 1],
s2 in [0, 5],
s0 mod 6 in [0, 0],
s0 * s1 in [0, 100]
)");
EXPECT_TRUE(indexing_map.RescaleSymbols());
for (auto& [expr, interval] : indexing_map.GetConstraints()) {
EXPECT_TRUE(indexing_map.GetConstraints().contains(expr))
<< "Don't modify the *keys* of the hashmap.";
}
}
TEST_F(IndexingMapTest, RangeEvaluatorTest) {
auto indexing_map = Parse(R"(
(d0, d1, d2, d3)[] -> (0),
domain:
d0 in [0, 9],
d1 in [-10, -1],
d2 in [-1, 2],
d3 in [0, 0]
)");
RangeEvaluator range_evaluator(indexing_map, &mlir_context_);
mlir::AffineExpr d0, d1, d2, d3;
bindDims(&mlir_context_, d0, d1, d2, d3);
EXPECT_TRUE(range_evaluator.IsAlwaysPositiveOrZero(d0));
EXPECT_FALSE(range_evaluator.IsAlwaysNegativeOrZero(d0));
EXPECT_FALSE(range_evaluator.IsAlwaysPositiveOrZero(d1));
EXPECT_TRUE(range_evaluator.IsAlwaysNegativeOrZero(d1));
EXPECT_FALSE(range_evaluator.IsAlwaysPositiveOrZero(d2));
EXPECT_FALSE(range_evaluator.IsAlwaysNegativeOrZero(d2));
EXPECT_TRUE(range_evaluator.IsAlwaysPositiveOrZero(d3));
EXPECT_TRUE(range_evaluator.IsAlwaysNegativeOrZero(d3));
}
TEST(IntervalComparisonTest, PointComparisons) {
Interval interval{12, 64};
auto point = [](int64_t n) { return Interval{n, n}; };
EXPECT_EQ(interval.Gt(point(11)), true);
EXPECT_EQ(interval.Gt(point(12)), std::nullopt);
EXPECT_EQ(interval.Gt(point(65)), false);
EXPECT_EQ(interval.Lt(point(65)), true);
EXPECT_EQ(interval.Lt(point(64)), std::nullopt);
EXPECT_EQ(interval.Lt(point(10)), false);
EXPECT_EQ(interval.Eq(point(11)), false);
EXPECT_EQ(interval.Eq(point(12)), std::nullopt);
EXPECT_EQ(interval.Eq(point(15)), std::nullopt);
EXPECT_EQ(interval.Eq(point(65)), false);
EXPECT_EQ(interval.Ne(point(11)), true);
EXPECT_EQ(interval.Ne(point(15)), std::nullopt);
EXPECT_EQ(interval.Ne(point(65)), true);
EXPECT_EQ(interval.Ge(point(12)), true);
EXPECT_EQ(interval.Ge(point(64)), std::nullopt);
EXPECT_EQ(interval.Ge(point(65)), false);
EXPECT_EQ(interval.Le(point(11)), false);
EXPECT_EQ(interval.Le(point(64)), true);
EXPECT_EQ(interval.Le(point(63)), std::nullopt);
EXPECT_EQ(interval.Le(point(65)), true);
EXPECT_EQ(point(15).Eq(point(15)), true);
EXPECT_EQ(point(15).Eq(point(16)), false);
EXPECT_EQ(point(15).Ne(point(15)), false);
EXPECT_EQ(point(15).Ne(point(16)), true);
}
TEST(IntervalComparisonTest, RangeComparisons) {
Interval interval{12, 64};
auto range = [](int64_t l, int64_t u) { return Interval{l, u}; };
EXPECT_EQ(interval.Gt(range(-10, 11)), true);
EXPECT_EQ(interval.Gt(range(-10, 12)), std::nullopt);
EXPECT_EQ(interval.Gt(interval), std::nullopt);
EXPECT_EQ(interval.Gt(range(10, 20)), std::nullopt);
EXPECT_EQ(interval.Gt(range(50, 60)), std::nullopt);
EXPECT_EQ(interval.Gt(range(64, 100)), false);
EXPECT_EQ(interval.Gt(range(65, 100)), false);
EXPECT_EQ(interval.Lt(range(65, 100)), true);
EXPECT_EQ(interval.Lt(range(64, 100)), std::nullopt);
EXPECT_EQ(interval.Lt(interval), std::nullopt);
EXPECT_EQ(interval.Lt(range(50, 60)), std::nullopt);
EXPECT_EQ(interval.Lt(range(10, 20)), std::nullopt);
EXPECT_EQ(interval.Lt(range(-10, 12)), false);
EXPECT_EQ(interval.Lt(range(-10, 11)), false);
EXPECT_EQ(interval.Eq(interval), std::nullopt);
EXPECT_EQ(interval.Eq(range(65, 100)), false);
EXPECT_EQ(interval.Eq(range(0, 11)), false);
}
MATCHER_P(IntervalIs, interval, "") {
std::pair<int64_t, int64_t> arg_pair{arg.lower, arg.upper};
return ::testing::ExplainMatchResult(
::testing::Pair(interval.lower, interval.upper), arg_pair,
result_listener);
}
TEST(IntervalMathTest, Addition) {
Interval a{12, 64};
Interval b{-100, 120};
Interval sum{12 - 100, 64 + 120};
EXPECT_THAT(a + b, IntervalIs(sum));
}
TEST(IntervalMathTest, AdditionSaturating) {
Interval a{12, 64};
Interval b{-100, 120};
Interval c{100, std::numeric_limits<int64_t>::max() - 80};
Interval any{std::numeric_limits<int64_t>::min(),
std::numeric_limits<int64_t>::max()};
Interval positive{0, std::numeric_limits<int64_t>::max()};
Interval negative{std::numeric_limits<int64_t>::min(), 0};
auto range = [](int64_t l, int64_t u) { return Interval{l, u}; };
EXPECT_THAT(positive + negative, IntervalIs(any));
EXPECT_THAT(any + any, IntervalIs(any));
EXPECT_THAT(b + any, IntervalIs(any));
EXPECT_THAT(c + any, IntervalIs(any));
EXPECT_THAT(c + positive,
IntervalIs(range(100, std::numeric_limits<int64_t>::max())));
Interval c_plus_negative{negative.lower, c.upper};
EXPECT_THAT(c + negative, IntervalIs(c_plus_negative));
Interval a_plus_c{112, std::numeric_limits<int64_t>::max() - 16};
EXPECT_THAT(a + c, IntervalIs(a_plus_c));
Interval b_plus_c{0, std::numeric_limits<int64_t>::max()};
EXPECT_THAT(b + c, IntervalIs(b_plus_c));
}
TEST(IntervalMathTest, Multiplication) {
Interval pos{10, 100};
Interval neg{-10, -1};
Interval both_small{-5, 6};
Interval both_large{-20, 1000};
auto range = [](int64_t l, int64_t u) { return Interval{l, u}; };
EXPECT_THAT(pos * neg, IntervalIs(range(-1000, -10)));
EXPECT_THAT(pos * both_small, IntervalIs(range(-500, 600)));
EXPECT_THAT(pos * both_large, IntervalIs(range(-2000, 100000)));
EXPECT_THAT(neg * both_small, IntervalIs(range(-60, 50)));
EXPECT_THAT(neg * both_large, IntervalIs(range(-10000, 200)));
EXPECT_THAT(both_small * both_large, IntervalIs(range(-5000, 6000)));
}
TEST(IntervalMathTest, MultiplicationSaturating) {
Interval any{std::numeric_limits<int64_t>::min(),
std::numeric_limits<int64_t>::max()};
Interval bit33{42, std::numeric_limits<uint32_t>::max()};
Interval bit33_sq{42 * 42, std::numeric_limits<int64_t>::max()};
EXPECT_THAT(bit33 * bit33, IntervalIs(bit33_sq));
EXPECT_THAT(any * any, IntervalIs(any));
Interval greater_41{42, std::numeric_limits<int64_t>::max()};
Interval neg_one{-1, -1};
Interval less_neg_41{std::numeric_limits<int64_t>::min(), -42};
EXPECT_THAT(greater_41 * neg_one, IntervalIs(less_neg_41));
EXPECT_THAT(less_neg_41 * neg_one, IntervalIs(greater_41));
EXPECT_THAT(any * neg_one, IntervalIs(any));
}
template <typename T>
void ExpectSupportsAbslHashAndEqAndNe(absl::Span<const T> values) {
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(values));
for (const T& a : values) {
for (const T& b : values) {
EXPECT_EQ(a != b, !(a == b));
}
}
}
TEST_F(IndexingMapTest, IntervalSupportsAbslHashAndEqAndNe) {
ExpectSupportsAbslHashAndEqAndNe<Interval>(
{Interval{1, 1}, Interval{0, 1}, Interval{1, 2}});
}
TEST_F(IndexingMapTest, IntervalSupportsLlvmStyleHashingAndEqAndNe) {
auto check_consistent = [](const Interval& a, const Interval& b) {
if (a == b) {
EXPECT_EQ(hash_value(a), hash_value(b));
}
if (hash_value(a) != hash_value(b)) {
EXPECT_NE(a, b);
}
EXPECT_EQ(a != b, !(a == b));
};
std::vector<Interval> intervals = {Interval{1, 1}, Interval{0, 1},
Interval{1, 2}};
for (const auto& a : intervals) {
for (const auto& b : intervals) {
check_consistent(a, b);
}
}
}
TEST_F(IndexingMapTest, DimVarSupportsAbslHashAndEqAndNe) {
ExpectSupportsAbslHashAndEqAndNe<IndexingMap::Variable>(
{IndexingMap::Variable{1, 1}, IndexingMap::Variable{0, 1},
IndexingMap::Variable{1, 2}});
}
TEST_F(IndexingMapTest, RangeVarSupportsAbslHashAndEqAndNe) {
ExpectSupportsAbslHashAndEqAndNe<IndexingMap::Variable>(
{IndexingMap::Variable{1, 1}, IndexingMap::Variable{0, 1},
IndexingMap::Variable{1, 2}});
}
TEST_F(IndexingMapTest, RTVarSupportsAbslHashAndEqAndNe) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> hlo_module,
ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY e {
ROOT %constant = s64[] constant(42)
}
)"));
ASSERT_NE(hlo_module, nullptr);
ExpectSupportsAbslHashAndEqAndNe<IndexingMap::Variable>(
{IndexingMap::Variable{Interval{1, 1}},
IndexingMap::Variable{Interval{1, 2}},
IndexingMap::Variable{Interval{1, 2}},
IndexingMap::Variable{Interval{1, 2}}});
}
TEST_F(IndexingMapTest, IndexingMapSupportsAbslHashAndEqAndNe) {
ExpectSupportsAbslHashAndEqAndNe<IndexingMap>(
{Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 79]
)"),
Parse(R"(
(d0, d1)[s0, s1] -> (d1 * 2, d0, s1, s0),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 79]
)"),
Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 50],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 79]
)"),
Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 79]
)"),
Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 79],
d0 mod 8 in [0, 0],
d0 mod 16 in [0, 0]
)"),
Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 79],
d0 mod 8 in [0, 0],
d0 mod 32 in [0, 0]
)"),
IndexingMap(
ParseAffineMap("(d0)[s0, s1, s2, s3, s4] -> (d0 * 4 + s1 + s3 - 42)",
&mlir_context_),
{IndexingMap::Variable{{0, 31}}},
{IndexingMap::Variable{{0, 0}}, IndexingMap::Variable{{0, 1}},
IndexingMap::Variable{{0, 2}}},
{IndexingMap::Variable{Interval{0, 3}},
IndexingMap::Variable{Interval{0, 4}}}),
IndexingMap(
ParseAffineMap("(d0)[s0, s1, s2, s3, s4] -> (d0 * 4 + s1 + s3 - 42)",
&mlir_context_),
{IndexingMap::Variable{{0, 31}}},
{IndexingMap::Variable{{0, 0}}, IndexingMap::Variable{{0, 1}},
IndexingMap::Variable{{0, 2}}},
{IndexingMap::Variable{Interval{0, 3}},
IndexingMap::Variable{Interval{0, 5}}})});
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/indexing_map.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/indexing_map_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0a557562-a4da-4ac0-a621-6c54bbdd406d | cpp | tensorflow/tensorflow | all_reduce_blueconnect | third_party/xla/xla/service/gpu/transforms/all_reduce_blueconnect.cc | third_party/xla/xla/service/gpu/transforms/all_reduce_blueconnect_test.cc | #include "xla/service/gpu/transforms/all_reduce_blueconnect.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/computation_placer.h"
#include "xla/service/global_device_id.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
std::vector<HloInstruction*> GetOutputs(HloInstruction& instruction) {
if (!instruction.shape().IsTuple()) {
return {&instruction};
}
std::vector<HloInstruction*> outputs;
outputs.reserve(instruction.shape().tuple_shapes_size());
HloComputation& computation = *instruction.parent();
for (int i = 0; i < instruction.shape().tuple_shapes_size(); ++i) {
outputs.push_back(computation.AddInstruction(
HloInstruction::CreateGetTupleElement(&instruction, i)));
}
return outputs;
}
struct DecomposedReplicaGroups {
std::vector<ReplicaGroup> scatter_gather_groups;
std::vector<ReplicaGroup> new_all_reduce_groups;
};
std::optional<GlobalDeviceId> TryConvertingReplicaIdToDeviceId(
int64_t replica_id, const DeviceAssignment& device_assignment,
CollectiveOpGroupMode collective_group_mode) {
if (collective_group_mode == CollectiveOpGroupMode::kCrossReplica) {
if (device_assignment.computation_count() != 1) {
return std::nullopt;
}
return GlobalDeviceId{device_assignment(replica_id, 0)};
} else if (collective_group_mode == CollectiveOpGroupMode::kFlattenedID) {
int partition_count = device_assignment.computation_count();
int64_t actual_replica_id = replica_id / partition_count;
int64_t partition_id = replica_id % partition_count;
return GlobalDeviceId{device_assignment(actual_replica_id, partition_id)};
}
VLOG(1) << "Skip AllReduceBlueConnect because of unsupported "
"CollectiveOpGroupMode "
<< CollectiveOpGroupModeToString(collective_group_mode);
return std::nullopt;
}
absl::StatusOr<std::optional<DecomposedReplicaGroups>> TryDecomposeReplicaGroup(
const ReplicaGroup& replica_group,
const DeviceAssignment& device_assignment, size_t num_devices_per_host,
CollectiveOpGroupMode collective_group_mode) {
int group_size = replica_group.replica_ids_size();
TF_RET_CHECK(group_size > 0);
absl::btree_map<int, std::vector<int64_t>> replica_ids_by_host;
for (int64_t replica_id : replica_group.replica_ids()) {
std::optional<GlobalDeviceId> device_id = TryConvertingReplicaIdToDeviceId(
replica_id, device_assignment, collective_group_mode);
if (!device_id.has_value()) {
return {std::nullopt};
}
TF_RET_CHECK(*device_id >= 0);
int host_id = device_id->value() / num_devices_per_host;
replica_ids_by_host[host_id].push_back(replica_id);
}
size_t num_local_devices = replica_ids_by_host.begin()->second.size();
bool same_num_devices_on_each_host =
absl::c_all_of(replica_ids_by_host, [&](const auto& entry) {
return entry.second.size() == num_local_devices;
});
if (!same_num_devices_on_each_host) {
return {std::nullopt};
}
std::vector<int64_t> sorted_replica_group;
sorted_replica_group.reserve(group_size);
for (const auto& entry : replica_ids_by_host) {
absl::c_copy(entry.second, std::back_inserter(sorted_replica_group));
}
size_t scatter_group_size = std::max(num_local_devices, size_t(2));
size_t num_scatter_groups = group_size / scatter_group_size;
if ((group_size % scatter_group_size != 0) || (num_scatter_groups < 2)) {
return {std::nullopt};
}
std::vector<ReplicaGroup> scatter_gather_groups(num_scatter_groups);
std::vector<ReplicaGroup> new_all_reduce_groups(scatter_group_size);
for (size_t i = 0; i < group_size; ++i) {
int64_t replica_id = sorted_replica_group[i];
scatter_gather_groups[i / scatter_group_size].add_replica_ids(replica_id);
new_all_reduce_groups[i % scatter_group_size].add_replica_ids(replica_id);
}
return {DecomposedReplicaGroups{std::move(scatter_gather_groups),
std::move(new_all_reduce_groups)}};
}
absl::StatusOr<std::optional<DecomposedReplicaGroups>>
TryDecomposeReplicaGroups(const HloAllReduceInstruction& all_reduce,
size_t num_devices_per_host) {
const DeviceAssignment& device_assignment =
all_reduce.GetModule()->config().static_device_assignment();
absl::Span<const ReplicaGroup> replica_groups = all_reduce.replica_groups();
ReplicaGroup all_replicas;
if (replica_groups.empty()) {
for (int i = 0; i < device_assignment.replica_count(); ++i) {
all_replicas.add_replica_ids(i);
}
replica_groups = absl::MakeSpan(&all_replicas, 1);
}
TF_ASSIGN_OR_RETURN(
CollectiveOpGroupMode collective_op_group_mode,
GetCollectiveOpGroupMode(all_reduce.channel_id().has_value(),
all_reduce.use_global_device_ids()));
std::vector<ReplicaGroup> scatter_gather_groups;
std::vector<ReplicaGroup> new_all_reduce_groups;
for (const ReplicaGroup& replica_group : replica_groups) {
TF_ASSIGN_OR_RETURN(
std::optional<DecomposedReplicaGroups> decomposed_groups,
TryDecomposeReplicaGroup(replica_group, device_assignment,
num_devices_per_host,
collective_op_group_mode));
if (!decomposed_groups) return {std::nullopt};
int scatter_group_size =
decomposed_groups->scatter_gather_groups[0].replica_ids_size();
if (scatter_gather_groups.empty()) {
for (const HloInstruction* operand : all_reduce.operands()) {
TF_RET_CHECK(operand->shape().IsArray());
int64_t num_elements = ShapeUtil::ElementsIn(operand->shape());
if (num_elements % scatter_group_size != 0) {
return {std::nullopt};
}
}
scatter_gather_groups.reserve(
replica_groups.size() *
decomposed_groups->scatter_gather_groups.size());
new_all_reduce_groups.reserve(
replica_groups.size() *
decomposed_groups->new_all_reduce_groups.size());
} else if (scatter_group_size !=
scatter_gather_groups[0].replica_ids_size()) {
return {std::nullopt};
}
absl::c_move(decomposed_groups->scatter_gather_groups,
std::back_inserter(scatter_gather_groups));
absl::c_move(decomposed_groups->new_all_reduce_groups,
std::back_inserter(new_all_reduce_groups));
}
return {DecomposedReplicaGroups{std::move(scatter_gather_groups),
std::move(new_all_reduce_groups)}};
}
absl::StatusOr<bool> TryDecomposeAllReduce(HloAllReduceInstruction* all_reduce,
size_t num_devices_per_host) {
TF_RET_CHECK(all_reduce);
TF_RET_CHECK(!all_reduce->has_sharding());
HloComputation& computation = *all_reduce->parent();
PrimitiveType element_type = all_reduce->operand(0)->shape().element_type();
TF_ASSIGN_OR_RETURN(
std::optional<DecomposedReplicaGroups> decomposed_groups,
TryDecomposeReplicaGroups(*all_reduce, num_devices_per_host));
if (!decomposed_groups) return false;
std::vector<HloInstruction*> flat_operands;
flat_operands.reserve(all_reduce->operand_count());
std::vector<Shape> flat_shapes;
flat_shapes.reserve(all_reduce->operand_count());
std::vector<Shape> scattered_shapes;
scattered_shapes.reserve(all_reduce->operand_count());
int scatter_group_size =
decomposed_groups->scatter_gather_groups[0].replica_ids_size();
for (HloInstruction* operand : all_reduce->operands()) {
TF_RET_CHECK(operand->shape().IsArray());
int64_t num_elements = ShapeUtil::ElementsIn(operand->shape());
Shape flat_shape = ShapeUtil::MakeShape(element_type, {num_elements});
flat_operands.push_back(computation.AddInstruction(
HloInstruction::CreateBitcast(flat_shape, operand)));
flat_shapes.push_back(std::move(flat_shape));
scattered_shapes.push_back(ShapeUtil::MakeShape(
element_type, {num_elements / scatter_group_size}));
}
Shape reduce_scatter_shape = ShapeUtil::MakeMaybeTupleShape(scattered_shapes);
int64_t next_channel_id = hlo_query::NextChannelId(*computation.parent());
auto get_channel_id = [&]() -> std::optional<int64_t> {
if (all_reduce->channel_id().has_value()) {
return next_channel_id++;
}
return std::nullopt;
};
HloInstruction* reduce_scatter =
computation.AddInstruction(HloInstruction::CreateReduceScatter(
reduce_scatter_shape, flat_operands, all_reduce->to_apply(),
CollectiveDeviceList(decomposed_groups->scatter_gather_groups),
false, get_channel_id(),
all_reduce->use_global_device_ids(),
0));
HloInstruction* new_all_reduce =
computation.AddInstruction(HloInstruction::CreateAllReduce(
reduce_scatter_shape, GetOutputs(*reduce_scatter),
all_reduce->to_apply(),
CollectiveDeviceList(decomposed_groups->new_all_reduce_groups),
false, all_reduce->channel_id(),
all_reduce->use_global_device_ids()));
HloInstruction* all_gather =
computation.AddInstruction(HloInstruction::CreateAllGather(
ShapeUtil::MakeMaybeTupleShape(flat_shapes),
GetOutputs(*new_all_reduce),
0,
CollectiveDeviceList(decomposed_groups->scatter_gather_groups),
false, get_channel_id(),
all_reduce->use_global_device_ids()));
std::vector<HloInstruction*> outputs = GetOutputs(*all_gather);
for (int64_t i = 0; i < outputs.size(); ++i) {
outputs[i] = computation.AddInstruction(HloInstruction::CreateBitcast(
all_reduce->operand(i)->shape(), outputs[i]));
}
HloInstruction* replacement = MaybeMakeTuple(outputs);
TF_RETURN_IF_ERROR(
all_reduce->CopyAllControlDepsTo(reduce_scatter, replacement));
TF_RETURN_IF_ERROR(all_reduce->DropAllControlDeps());
TF_RETURN_IF_ERROR(computation.ReplaceInstruction(all_reduce, replacement));
TF_RETURN_IF_ERROR(
TryDecomposeAllReduce(Cast<HloAllReduceInstruction>(new_all_reduce),
num_devices_per_host)
.status());
return true;
}
}
absl::StatusOr<bool> AllReduceBlueConnect::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(1) << "Running AllReduceBlueConnect";
if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {
VLOG(1)
<< "Skip AllReduceBlueConnect because the module contains all-reduce "
"with constrained layouts";
return false;
}
if (!module->config().has_static_device_assignment()) {
VLOG(1)
<< "Skip AllReduceBlueConnect because the module doesn't have static "
"device assignment";
return false;
}
std::vector<HloAllReduceInstruction*> all_reduces;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kAllReduce) {
all_reduces.push_back(Cast<HloAllReduceInstruction>(instruction));
}
}
}
bool changed = false;
for (HloAllReduceInstruction* all_reduce : all_reduces) {
TF_ASSIGN_OR_RETURN(
bool all_reduce_changed,
TryDecomposeAllReduce(all_reduce, num_devices_per_host_));
changed |= all_reduce_changed;
}
return changed;
}
} | #include "xla/service/gpu/transforms/all_reduce_blueconnect.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/computation_placer.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::tsl::testing::IsOkAndHolds;
namespace m = ::xla::match;
using AllReduceBlueConnectTest = HloTestBase;
HloPredicate MatchChannelId(std::optional<int64_t> channel_id) {
return [channel_id](const HloInstruction* instruction) {
return instruction->channel_id() == channel_id;
};
}
void SetModuleConfig(HloModuleConfig* module_config, size_t replica_count,
size_t partition_count = 1) {
DeviceAssignment device_assignment(replica_count,
partition_count);
device_assignment.FillIota(0);
module_config->set_replica_count(replica_count);
module_config->set_num_partitions(partition_count);
module_config->set_static_device_assignment(device_assignment);
}
void SetModuleConfig(HloModule& module, size_t replica_count,
size_t partition_count = 1) {
SetModuleConfig(&module.mutable_config(), replica_count, partition_count);
}
TEST_F(AllReduceBlueConnectTest, OneStage) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
ROOT crs = f32[4,4] all-reduce(p0), to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 8);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(true));
std::vector<std::vector<int64_t>> scatter_gather_groups = {
{0, 1, 2, 3}, {4, 5, 6, 7}};
std::vector<std::vector<int64_t>> new_all_reduce_groups = {
{0, 4}, {1, 5}, {2, 6}, {3, 7}};
auto bitcast = m::Bitcast(m::Parameter(0)).WithShape(F32, {16});
auto reduce_scatter = m::ReduceScatter(bitcast)
.WithShape(F32, {4})
.WithReplicaGroups(scatter_gather_groups)
.WithPredicate(MatchChannelId(std::nullopt));
auto all_reduce = m::AllReduce(reduce_scatter)
.WithShape(F32, {4})
.WithReplicaGroups(new_all_reduce_groups)
.WithPredicate(MatchChannelId(std::nullopt));
auto all_gather = m::AllGather(all_reduce)
.WithShape(F32, {16})
.WithReplicaGroups(scatter_gather_groups)
.WithPredicate(MatchChannelId(std::nullopt));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Bitcast(all_gather).WithShape(F32, {4, 4})));
}
TEST_F(AllReduceBlueConnectTest, TwoStage) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
ROOT crs = f32[4,4] all-reduce(p0), to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 16);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(true));
std::vector<std::vector<int64_t>> outer_scatter_gather_groups = {
{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}, {12, 13, 14, 15}};
std::vector<std::vector<int64_t>> inner_scatter_gather_groups = {
{0, 4}, {8, 12}, {1, 5}, {9, 13}, {2, 6}, {10, 14}, {3, 7}, {11, 15}};
std::vector<std::vector<int64_t>> new_all_reduce_groups = {
{0, 8}, {4, 12}, {1, 9}, {5, 13}, {2, 10}, {6, 14}, {3, 11}, {7, 15}};
auto bitcast0 = m::Bitcast(m::Parameter(0)).WithShape(F32, {16});
auto reduce_scatter0 =
m::ReduceScatter(bitcast0).WithShape(F32, {4}).WithReplicaGroups(
outer_scatter_gather_groups);
auto bitcast1 = m::Bitcast(reduce_scatter0).WithShape(F32, {4});
auto reduce_scatter1 =
m::ReduceScatter(bitcast1).WithShape(F32, {2}).WithReplicaGroups(
inner_scatter_gather_groups);
auto all_reduce = m::AllReduce(reduce_scatter1)
.WithShape(F32, {2})
.WithReplicaGroups(new_all_reduce_groups);
auto all_gather0 = m::AllGather(all_reduce)
.WithShape(F32, {4})
.WithReplicaGroups(inner_scatter_gather_groups);
auto bitcast2 = m::Bitcast(all_gather0).WithShape(F32, {4});
auto all_gather1 =
m::AllGather(bitcast2).WithShape(F32, {16}).WithReplicaGroups(
outer_scatter_gather_groups);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Bitcast(all_gather1).WithShape(F32, {4, 4})));
}
TEST_F(AllReduceBlueConnectTest, TwoOperands) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
p1 = f32[4,4,2] parameter(1)
ROOT crs = (f32[4,4], f32[4,4,2]) all-reduce(p0, p1), to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 8);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(true));
std::vector<std::vector<int64_t>> scatter_gather_groups = {
{0, 1, 2, 3}, {4, 5, 6, 7}};
std::vector<std::vector<int64_t>> new_all_reduce_groups = {
{0, 4}, {1, 5}, {2, 6}, {3, 7}};
auto bitcast0 = m::Bitcast(m::Parameter(0)).WithShape(F32, {16});
auto bitcast1 = m::Bitcast(m::Parameter(1)).WithShape(F32, {32});
Shape expected0 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {4}), ShapeUtil::MakeShape(F32, {8})});
Shape expected1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {16}), ShapeUtil::MakeShape(F32, {32})});
auto reduce_scatter = m::ReduceScatter(bitcast0, bitcast1)
.WithShapeEqualTo(&expected0)
.WithReplicaGroups(scatter_gather_groups);
auto all_reduce = m::AllReduce(m::GetTupleElement(reduce_scatter, 0),
m::GetTupleElement(reduce_scatter, 1))
.WithShapeEqualTo(&expected0)
.WithReplicaGroups(new_all_reduce_groups);
auto all_gather = m::AllGather(m::GetTupleElement(all_reduce, 0),
m::GetTupleElement(all_reduce, 1))
.WithShapeEqualTo(&expected1)
.WithReplicaGroups(scatter_gather_groups);
auto bitcast2 =
m::Bitcast(m::GetTupleElement(all_gather, 0)).WithShape(F32, {4, 4});
auto bitcast3 =
m::Bitcast(m::GetTupleElement(all_gather, 1)).WithShape(F32, {4, 4, 2});
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(bitcast2, bitcast3)));
}
TEST_F(AllReduceBlueConnectTest, MultiplePartitionsFilecheck) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[8,8] parameter(0)
ROOT crs = f32[8,8] all-reduce(p0), channel_id=1,
replica_groups={{0,1,2,3,4,5,6,7}}, use_global_device_ids=true, to_apply=add
})";
HloModuleConfig module_config;
SetModuleConfig(&module_config, 1, 8);
AllReduceBlueConnect pass(4);
RunAndFilecheckHloRewrite(hlo_string, std::move(pass), R"(
CHECK: %p0 = f32[8,8]{1,0} parameter(0)
CHECK-NEXT: [[bitcast:%[^ ]+]] = f32[64]{0} bitcast(%p0)
CHECK-NEXT: [[reduce_scatter:%[^ ]+]] = f32[16]{0} reduce-scatter([[bitcast]]), channel_id=2, replica_groups={{..0,1,2,3.,.4,5,6,7..}}, use_global_device_ids=true, dimensions={0}, to_apply=%add
CHECK-NEXT: [[all_reduce:%[^ ]+]] = f32[16]{0} all-reduce([[reduce_scatter]]), channel_id=1, replica_groups={{..0,4.,.1,5.,.2,6.,.3,7..}}, use_global_device_ids=true, to_apply=%add
CHECK-NEXT: [[all_gather:%[^ ]+]] = f32[64]{0} all-gather([[all_reduce]]), channel_id=3, replica_groups={{..0,1,2,3.,.4,5,6,7..}}, dimensions={0}, use_global_device_ids=true
CHECK-NEXT: ROOT [[output:%[^ ]+]] = f32[8,8]{1,0} bitcast([[all_gather]])
}
)",
nullptr, &module_config);
}
TEST_F(AllReduceBlueConnectTest, DifferentNumLocalDevicesWithinReplicaGroup) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
ROOT crs = f32[4,4] all-reduce(p0),
replica_groups={{0,1,2,7},{3,4,5,6}}, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 8);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(AllReduceBlueConnectTest, DifferentNumLocalDevicesAcrossReplicaGroups) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
ROOT crs = f32[4,4] all-reduce(p0),
replica_groups={{0,1,4,5},{2,3,6,7},{8,9,10,11},{12,13,14,15}}, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 16);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(AllReduceBlueConnectTest, OperandIndivisible) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
p1 = f32[9] parameter(1)
ROOT crs = (f32[4,4], f32[9]) all-reduce(p0, p1), to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 8);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(AllReduceBlueConnectTest, ControlDeps) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
p1 = f32[4,4] parameter(1)
add = f32[4,4] add(p0, p1)
crs = f32[4,4] all-reduce(p0), to_apply=add, control-predecessors={add}
ROOT add1 = f32[4,4] add(crs, add), control-predecessors={crs}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 8);
const HloInstruction* ar =
module->entry_computation()->root_instruction()->operand(0);
auto expected_preds = ar->control_predecessors();
auto expected_succs = ar->control_successors();
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(true));
std::vector<std::vector<int64_t>> scatter_gather_groups = {
{0, 1, 2, 3}, {4, 5, 6, 7}};
std::vector<std::vector<int64_t>> new_all_reduce_groups = {
{0, 4}, {1, 5}, {2, 6}, {3, 7}};
const HloInstruction *matched_rs, *matched_bitcast;
auto bitcast = m::Bitcast(m::Parameter(0)).WithShape(F32, {16});
auto reduce_scatter = m::ReduceScatter(&matched_rs, bitcast)
.WithShape(F32, {4})
.WithReplicaGroups(scatter_gather_groups);
auto all_reduce = m::AllReduce(reduce_scatter)
.WithShape(F32, {4})
.WithReplicaGroups(new_all_reduce_groups);
auto all_gather = m::AllGather(all_reduce)
.WithShape(F32, {16})
.WithReplicaGroups(scatter_gather_groups);
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Add()));
EXPECT_THAT(
root->operand(0),
GmockMatch(
m::Bitcast(&matched_bitcast, all_gather).WithShape(F32, {4, 4})));
EXPECT_THAT(matched_rs, GmockMatch(m::Op().WithControlDeps(
absl::MakeSpan(expected_preds), {})));
EXPECT_THAT(matched_bitcast, GmockMatch(m::Op().WithControlDeps(
{}, absl::MakeSpan(expected_succs))));
}
TEST_F(AllReduceBlueConnectTest, ReduceScatterUnchanged) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[8,4] parameter(0)
ROOT crs = f32[1,4] reduce-scatter(p0), dimensions={0}, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 8);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(false));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/all_reduce_blueconnect.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/all_reduce_blueconnect_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7159b4fd-095b-4682-92bb-0b9ec2944fde | cpp | abseil/abseil-cpp | salted_seed_seq | absl/random/internal/salted_seed_seq.h | absl/random/internal/salted_seed_seq_test.cc | #ifndef ABSL_RANDOM_INTERNAL_SALTED_SEED_SEQ_H_
#define ABSL_RANDOM_INTERNAL_SALTED_SEED_SEQ_H_
#include <cstdint>
#include <cstdlib>
#include <initializer_list>
#include <iterator>
#include <memory>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/meta/type_traits.h"
#include "absl/random/internal/seed_material.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
template <typename SSeq>
class SaltedSeedSeq {
public:
using inner_sequence_type = SSeq;
using result_type = typename SSeq::result_type;
SaltedSeedSeq() : seq_(absl::make_unique<SSeq>()) {}
template <typename Iterator>
SaltedSeedSeq(Iterator begin, Iterator end)
: seq_(absl::make_unique<SSeq>(begin, end)) {}
template <typename T>
SaltedSeedSeq(std::initializer_list<T> il)
: SaltedSeedSeq(il.begin(), il.end()) {}
SaltedSeedSeq(const SaltedSeedSeq&) = delete;
SaltedSeedSeq& operator=(const SaltedSeedSeq&) = delete;
SaltedSeedSeq(SaltedSeedSeq&&) = default;
SaltedSeedSeq& operator=(SaltedSeedSeq&&) = default;
template <typename RandomAccessIterator>
void generate(RandomAccessIterator begin, RandomAccessIterator end) {
using U = typename std::iterator_traits<RandomAccessIterator>::value_type;
using TagType = absl::conditional_t<
(std::is_same<U, uint32_t>::value &&
(std::is_pointer<RandomAccessIterator>::value ||
std::is_same<RandomAccessIterator,
typename std::vector<U>::iterator>::value)),
ContiguousAndUint32Tag, DefaultTag>;
if (begin != end) {
generate_impl(TagType{}, begin, end, std::distance(begin, end));
}
}
template <typename OutIterator>
void param(OutIterator out) const {
seq_->param(out);
}
size_t size() const { return seq_->size(); }
private:
struct ContiguousAndUint32Tag {};
struct DefaultTag {};
template <typename Contiguous>
void generate_impl(ContiguousAndUint32Tag, Contiguous begin, Contiguous end,
size_t n) {
seq_->generate(begin, end);
const uint32_t salt = absl::random_internal::GetSaltMaterial().value_or(0);
auto span = absl::Span<uint32_t>(&*begin, n);
MixIntoSeedMaterial(absl::MakeConstSpan(&salt, 1), span);
}
template <typename RandomAccessIterator>
void generate_impl(DefaultTag, RandomAccessIterator begin,
RandomAccessIterator, size_t n) {
absl::InlinedVector<uint32_t, 8> data(n, 0);
generate_impl(ContiguousAndUint32Tag{}, data.begin(), data.end(), n);
std::copy(data.begin(), data.end(), begin);
}
std::unique_ptr<SSeq> seq_;
};
template <typename T, typename = void>
struct is_salted_seed_seq : public std::false_type {};
template <typename T>
struct is_salted_seed_seq<
T, typename std::enable_if<std::is_same<
T, SaltedSeedSeq<typename T::inner_sequence_type>>::value>::type>
: public std::true_type {};
template <
typename SSeq,
typename EnableIf = absl::enable_if_t<is_salted_seed_seq<SSeq>::value>>
SSeq MakeSaltedSeedSeq(SSeq&& seq) {
return SSeq(std::forward<SSeq>(seq));
}
template <
typename SSeq,
typename EnableIf = absl::enable_if_t<!is_salted_seed_seq<SSeq>::value>>
SaltedSeedSeq<typename std::decay<SSeq>::type> MakeSaltedSeedSeq(SSeq&& seq) {
using sseq_type = typename std::decay<SSeq>::type;
using result_type = typename sseq_type::result_type;
absl::InlinedVector<result_type, 8> data;
seq.param(std::back_inserter(data));
return SaltedSeedSeq<sseq_type>(data.begin(), data.end());
}
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/internal/salted_seed_seq.h"
#include <iterator>
#include <random>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
using absl::random_internal::GetSaltMaterial;
using absl::random_internal::MakeSaltedSeedSeq;
using absl::random_internal::SaltedSeedSeq;
using testing::Eq;
using testing::Pointwise;
namespace {
template <typename Sseq>
void ConformsToInterface() {
{ Sseq default_constructed_seq; }
{
uint32_t init_array[] = {1, 3, 5, 7, 9};
Sseq iterator_constructed_seq(std::begin(init_array), std::end(init_array));
}
{ Sseq list_constructed_seq = {1, 3, 5, 7, 9, 11, 13}; }
{
uint32_t init_array[] = {1, 2, 3, 4, 5};
Sseq seq(std::begin(init_array), std::end(init_array));
EXPECT_EQ(seq.size(), ABSL_ARRAYSIZE(init_array));
std::vector<uint32_t> state_vector;
seq.param(std::back_inserter(state_vector));
EXPECT_EQ(state_vector.size(), ABSL_ARRAYSIZE(init_array));
for (int i = 0; i < state_vector.size(); i++) {
EXPECT_EQ(state_vector[i], i + 1);
}
}
{
Sseq seq;
uint32_t seeds[5];
seq.generate(std::begin(seeds), std::end(seeds));
}
}
TEST(SaltedSeedSeq, CheckInterfaces) {
ConformsToInterface<std::seed_seq>();
ConformsToInterface<SaltedSeedSeq<std::seed_seq>>();
}
TEST(SaltedSeedSeq, CheckConstructingFromOtherSequence) {
std::vector<uint32_t> seed_values(10, 1);
std::seed_seq seq(seed_values.begin(), seed_values.end());
auto salted_seq = MakeSaltedSeedSeq(std::move(seq));
EXPECT_EQ(seq.size(), salted_seq.size());
std::vector<uint32_t> param_result;
seq.param(std::back_inserter(param_result));
EXPECT_EQ(seed_values, param_result);
}
TEST(SaltedSeedSeq, SaltedSaltedSeedSeqIsNotDoubleSalted) {
uint32_t init[] = {1, 3, 5, 7, 9};
std::seed_seq seq(std::begin(init), std::end(init));
SaltedSeedSeq<std::seed_seq> salted_seq = MakeSaltedSeedSeq(std::move(seq));
uint32_t a[16];
salted_seq.generate(std::begin(a), std::end(a));
SaltedSeedSeq<std::seed_seq> salted_salted_seq =
MakeSaltedSeedSeq(std::move(salted_seq));
uint32_t b[16];
salted_salted_seq.generate(std::begin(b), std::end(b));
EXPECT_THAT(b, Pointwise(Eq(), a)) << "a[0] " << a[0];
}
TEST(SaltedSeedSeq, SeedMaterialIsSalted) {
const size_t kNumBlocks = 16;
uint32_t seed_material[kNumBlocks];
std::random_device urandom{"/dev/urandom"};
for (uint32_t& seed : seed_material) {
seed = urandom();
}
std::seed_seq seq(std::begin(seed_material), std::end(seed_material));
SaltedSeedSeq<std::seed_seq> salted_seq(std::begin(seed_material),
std::end(seed_material));
bool salt_is_available = GetSaltMaterial().has_value();
if (salt_is_available) {
uint32_t outputs[kNumBlocks];
uint32_t salted_outputs[kNumBlocks];
seq.generate(std::begin(outputs), std::end(outputs));
salted_seq.generate(std::begin(salted_outputs), std::end(salted_outputs));
EXPECT_THAT(outputs, Pointwise(testing::Ne(), salted_outputs));
}
}
TEST(SaltedSeedSeq, GenerateAcceptsDifferentTypes) {
const size_t kNumBlocks = 4;
SaltedSeedSeq<std::seed_seq> seq({1, 2, 3});
uint32_t expected[kNumBlocks];
seq.generate(std::begin(expected), std::end(expected));
{
unsigned long seed_material[kNumBlocks];
seq.generate(std::begin(seed_material), std::end(seed_material));
EXPECT_THAT(seed_material, Pointwise(Eq(), expected));
}
{
unsigned int seed_material[kNumBlocks];
seq.generate(std::begin(seed_material), std::end(seed_material));
EXPECT_THAT(seed_material, Pointwise(Eq(), expected));
}
{
uint64_t seed_material[kNumBlocks];
seq.generate(std::begin(seed_material), std::end(seed_material));
EXPECT_THAT(seed_material, Pointwise(Eq(), expected));
}
{
int64_t seed_material[kNumBlocks];
seq.generate(std::begin(seed_material), std::end(seed_material));
EXPECT_THAT(seed_material, Pointwise(Eq(), expected));
}
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/salted_seed_seq.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/salted_seed_seq_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
d6039166-212e-4c5c-a02a-ee5b64913499 | cpp | google/leveldb | memenv | helpers/memenv/memenv.cc | helpers/memenv/memenv_test.cc | #include "helpers/memenv/memenv.h"
#include <cstring>
#include <limits>
#include <map>
#include <string>
#include <vector>
#include "leveldb/env.h"
#include "leveldb/status.h"
#include "port/port.h"
#include "port/thread_annotations.h"
#include "util/mutexlock.h"
namespace leveldb {
namespace {
class FileState {
public:
FileState() : refs_(0), size_(0) {}
FileState(const FileState&) = delete;
FileState& operator=(const FileState&) = delete;
void Ref() {
MutexLock lock(&refs_mutex_);
++refs_;
}
void Unref() {
bool do_delete = false;
{
MutexLock lock(&refs_mutex_);
--refs_;
assert(refs_ >= 0);
if (refs_ <= 0) {
do_delete = true;
}
}
if (do_delete) {
delete this;
}
}
uint64_t Size() const {
MutexLock lock(&blocks_mutex_);
return size_;
}
void Truncate() {
MutexLock lock(&blocks_mutex_);
for (char*& block : blocks_) {
delete[] block;
}
blocks_.clear();
size_ = 0;
}
Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const {
MutexLock lock(&blocks_mutex_);
if (offset > size_) {
return Status::IOError("Offset greater than file size.");
}
const uint64_t available = size_ - offset;
if (n > available) {
n = static_cast<size_t>(available);
}
if (n == 0) {
*result = Slice();
return Status::OK();
}
assert(offset / kBlockSize <= std::numeric_limits<size_t>::max());
size_t block = static_cast<size_t>(offset / kBlockSize);
size_t block_offset = offset % kBlockSize;
size_t bytes_to_copy = n;
char* dst = scratch;
while (bytes_to_copy > 0) {
size_t avail = kBlockSize - block_offset;
if (avail > bytes_to_copy) {
avail = bytes_to_copy;
}
std::memcpy(dst, blocks_[block] + block_offset, avail);
bytes_to_copy -= avail;
dst += avail;
block++;
block_offset = 0;
}
*result = Slice(scratch, n);
return Status::OK();
}
Status Append(const Slice& data) {
const char* src = data.data();
size_t src_len = data.size();
MutexLock lock(&blocks_mutex_);
while (src_len > 0) {
size_t avail;
size_t offset = size_ % kBlockSize;
if (offset != 0) {
avail = kBlockSize - offset;
} else {
blocks_.push_back(new char[kBlockSize]);
avail = kBlockSize;
}
if (avail > src_len) {
avail = src_len;
}
std::memcpy(blocks_.back() + offset, src, avail);
src_len -= avail;
src += avail;
size_ += avail;
}
return Status::OK();
}
private:
enum { kBlockSize = 8 * 1024 };
~FileState() { Truncate(); }
port::Mutex refs_mutex_;
int refs_ GUARDED_BY(refs_mutex_);
mutable port::Mutex blocks_mutex_;
std::vector<char*> blocks_ GUARDED_BY(blocks_mutex_);
uint64_t size_ GUARDED_BY(blocks_mutex_);
};
class SequentialFileImpl : public SequentialFile {
public:
explicit SequentialFileImpl(FileState* file) : file_(file), pos_(0) {
file_->Ref();
}
~SequentialFileImpl() override { file_->Unref(); }
Status Read(size_t n, Slice* result, char* scratch) override {
Status s = file_->Read(pos_, n, result, scratch);
if (s.ok()) {
pos_ += result->size();
}
return s;
}
Status Skip(uint64_t n) override {
if (pos_ > file_->Size()) {
return Status::IOError("pos_ > file_->Size()");
}
const uint64_t available = file_->Size() - pos_;
if (n > available) {
n = available;
}
pos_ += n;
return Status::OK();
}
private:
FileState* file_;
uint64_t pos_;
};
class RandomAccessFileImpl : public RandomAccessFile {
public:
explicit RandomAccessFileImpl(FileState* file) : file_(file) { file_->Ref(); }
~RandomAccessFileImpl() override { file_->Unref(); }
Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const override {
return file_->Read(offset, n, result, scratch);
}
private:
FileState* file_;
};
class WritableFileImpl : public WritableFile {
public:
WritableFileImpl(FileState* file) : file_(file) { file_->Ref(); }
~WritableFileImpl() override { file_->Unref(); }
Status Append(const Slice& data) override { return file_->Append(data); }
Status Close() override { return Status::OK(); }
Status Flush() override { return Status::OK(); }
Status Sync() override { return Status::OK(); }
private:
FileState* file_;
};
class NoOpLogger : public Logger {
public:
void Logv(const char* format, std::va_list ap) override {}
};
class InMemoryEnv : public EnvWrapper {
public:
explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) {}
~InMemoryEnv() override {
for (const auto& kvp : file_map_) {
kvp.second->Unref();
}
}
Status NewSequentialFile(const std::string& fname,
SequentialFile** result) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
*result = nullptr;
return Status::IOError(fname, "File not found");
}
*result = new SequentialFileImpl(file_map_[fname]);
return Status::OK();
}
Status NewRandomAccessFile(const std::string& fname,
RandomAccessFile** result) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
*result = nullptr;
return Status::IOError(fname, "File not found");
}
*result = new RandomAccessFileImpl(file_map_[fname]);
return Status::OK();
}
Status NewWritableFile(const std::string& fname,
WritableFile** result) override {
MutexLock lock(&mutex_);
FileSystem::iterator it = file_map_.find(fname);
FileState* file;
if (it == file_map_.end()) {
file = new FileState();
file->Ref();
file_map_[fname] = file;
} else {
file = it->second;
file->Truncate();
}
*result = new WritableFileImpl(file);
return Status::OK();
}
Status NewAppendableFile(const std::string& fname,
WritableFile** result) override {
MutexLock lock(&mutex_);
FileState** sptr = &file_map_[fname];
FileState* file = *sptr;
if (file == nullptr) {
file = new FileState();
file->Ref();
}
*result = new WritableFileImpl(file);
return Status::OK();
}
bool FileExists(const std::string& fname) override {
MutexLock lock(&mutex_);
return file_map_.find(fname) != file_map_.end();
}
Status GetChildren(const std::string& dir,
std::vector<std::string>* result) override {
MutexLock lock(&mutex_);
result->clear();
for (const auto& kvp : file_map_) {
const std::string& filename = kvp.first;
if (filename.size() >= dir.size() + 1 && filename[dir.size()] == '/' &&
Slice(filename).starts_with(Slice(dir))) {
result->push_back(filename.substr(dir.size() + 1));
}
}
return Status::OK();
}
void RemoveFileInternal(const std::string& fname)
EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
if (file_map_.find(fname) == file_map_.end()) {
return;
}
file_map_[fname]->Unref();
file_map_.erase(fname);
}
Status RemoveFile(const std::string& fname) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
return Status::IOError(fname, "File not found");
}
RemoveFileInternal(fname);
return Status::OK();
}
Status CreateDir(const std::string& dirname) override { return Status::OK(); }
Status RemoveDir(const std::string& dirname) override { return Status::OK(); }
Status GetFileSize(const std::string& fname, uint64_t* file_size) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
return Status::IOError(fname, "File not found");
}
*file_size = file_map_[fname]->Size();
return Status::OK();
}
Status RenameFile(const std::string& src,
const std::string& target) override {
MutexLock lock(&mutex_);
if (file_map_.find(src) == file_map_.end()) {
return Status::IOError(src, "File not found");
}
RemoveFileInternal(target);
file_map_[target] = file_map_[src];
file_map_.erase(src);
return Status::OK();
}
Status LockFile(const std::string& fname, FileLock** lock) override {
*lock = new FileLock;
return Status::OK();
}
Status UnlockFile(FileLock* lock) override {
delete lock;
return Status::OK();
}
Status GetTestDirectory(std::string* path) override {
*path = "/test";
return Status::OK();
}
Status NewLogger(const std::string& fname, Logger** result) override {
*result = new NoOpLogger;
return Status::OK();
}
private:
typedef std::map<std::string, FileState*> FileSystem;
port::Mutex mutex_;
FileSystem file_map_ GUARDED_BY(mutex_);
};
}
Env* NewMemEnv(Env* base_env) { return new InMemoryEnv(base_env); }
} | #include "helpers/memenv/memenv.h"
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "db/db_impl.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "util/testutil.h"
namespace leveldb {
class MemEnvTest : public testing::Test {
public:
MemEnvTest() : env_(NewMemEnv(Env::Default())) {}
~MemEnvTest() { delete env_; }
Env* env_;
};
TEST_F(MemEnvTest, Basics) {
uint64_t file_size;
WritableFile* writable_file;
std::vector<std::string> children;
ASSERT_LEVELDB_OK(env_->CreateDir("/dir"));
ASSERT_TRUE(!env_->FileExists("/dir/non_existent"));
ASSERT_TRUE(!env_->GetFileSize("/dir/non_existent", &file_size).ok());
ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children));
ASSERT_EQ(0, children.size());
ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(0, file_size);
delete writable_file;
ASSERT_TRUE(env_->FileExists("/dir/f"));
ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(0, file_size);
ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children));
ASSERT_EQ(1, children.size());
ASSERT_EQ("f", children[0]);
ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_LEVELDB_OK(writable_file->Append("abc"));
delete writable_file;
ASSERT_LEVELDB_OK(env_->NewAppendableFile("/dir/f", &writable_file));
ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(3, file_size);
ASSERT_LEVELDB_OK(writable_file->Append("hello"));
delete writable_file;
ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(8, file_size);
ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok());
ASSERT_LEVELDB_OK(env_->RenameFile("/dir/f", "/dir/g"));
ASSERT_TRUE(!env_->FileExists("/dir/f"));
ASSERT_TRUE(env_->FileExists("/dir/g"));
ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/g", &file_size));
ASSERT_EQ(8, file_size);
SequentialFile* seq_file;
RandomAccessFile* rand_file;
ASSERT_TRUE(!env_->NewSequentialFile("/dir/non_existent", &seq_file).ok());
ASSERT_TRUE(!seq_file);
ASSERT_TRUE(!env_->NewRandomAccessFile("/dir/non_existent", &rand_file).ok());
ASSERT_TRUE(!rand_file);
ASSERT_TRUE(!env_->RemoveFile("/dir/non_existent").ok());
ASSERT_LEVELDB_OK(env_->RemoveFile("/dir/g"));
ASSERT_TRUE(!env_->FileExists("/dir/g"));
ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children));
ASSERT_EQ(0, children.size());
ASSERT_LEVELDB_OK(env_->RemoveDir("/dir"));
}
TEST_F(MemEnvTest, ReadWrite) {
WritableFile* writable_file;
SequentialFile* seq_file;
RandomAccessFile* rand_file;
Slice result;
char scratch[100];
ASSERT_LEVELDB_OK(env_->CreateDir("/dir"));
ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_LEVELDB_OK(writable_file->Append("hello "));
ASSERT_LEVELDB_OK(writable_file->Append("world"));
delete writable_file;
ASSERT_LEVELDB_OK(env_->NewSequentialFile("/dir/f", &seq_file));
ASSERT_LEVELDB_OK(seq_file->Read(5, &result, scratch));
ASSERT_EQ(0, result.compare("hello"));
ASSERT_LEVELDB_OK(seq_file->Skip(1));
ASSERT_LEVELDB_OK(seq_file->Read(1000, &result, scratch));
ASSERT_EQ(0, result.compare("world"));
ASSERT_LEVELDB_OK(
seq_file->Read(1000, &result, scratch));
ASSERT_EQ(0, result.size());
ASSERT_LEVELDB_OK(seq_file->Skip(100));
ASSERT_LEVELDB_OK(seq_file->Read(1000, &result, scratch));
ASSERT_EQ(0, result.size());
delete seq_file;
ASSERT_LEVELDB_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
ASSERT_LEVELDB_OK(rand_file->Read(6, 5, &result, scratch));
ASSERT_EQ(0, result.compare("world"));
ASSERT_LEVELDB_OK(rand_file->Read(0, 5, &result, scratch));
ASSERT_EQ(0, result.compare("hello"));
ASSERT_LEVELDB_OK(rand_file->Read(10, 100, &result, scratch));
ASSERT_EQ(0, result.compare("d"));
ASSERT_TRUE(!rand_file->Read(1000, 5, &result, scratch).ok());
delete rand_file;
}
TEST_F(MemEnvTest, Locks) {
FileLock* lock;
ASSERT_LEVELDB_OK(env_->LockFile("some file", &lock));
ASSERT_LEVELDB_OK(env_->UnlockFile(lock));
}
TEST_F(MemEnvTest, Misc) {
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
ASSERT_TRUE(!test_dir.empty());
WritableFile* writable_file;
ASSERT_LEVELDB_OK(env_->NewWritableFile("/a/b", &writable_file));
ASSERT_LEVELDB_OK(writable_file->Sync());
ASSERT_LEVELDB_OK(writable_file->Flush());
ASSERT_LEVELDB_OK(writable_file->Close());
delete writable_file;
}
TEST_F(MemEnvTest, LargeWrite) {
const size_t kWriteSize = 300 * 1024;
char* scratch = new char[kWriteSize * 2];
std::string write_data;
for (size_t i = 0; i < kWriteSize; ++i) {
write_data.append(1, static_cast<char>(i));
}
WritableFile* writable_file;
ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_LEVELDB_OK(writable_file->Append("foo"));
ASSERT_LEVELDB_OK(writable_file->Append(write_data));
delete writable_file;
SequentialFile* seq_file;
Slice result;
ASSERT_LEVELDB_OK(env_->NewSequentialFile("/dir/f", &seq_file));
ASSERT_LEVELDB_OK(seq_file->Read(3, &result, scratch));
ASSERT_EQ(0, result.compare("foo"));
size_t read = 0;
std::string read_data;
while (read < kWriteSize) {
ASSERT_LEVELDB_OK(seq_file->Read(kWriteSize - read, &result, scratch));
read_data.append(result.data(), result.size());
read += result.size();
}
ASSERT_TRUE(write_data == read_data);
delete seq_file;
delete[] scratch;
}
TEST_F(MemEnvTest, OverwriteOpenFile) {
const char kWrite1Data[] = "Write #1 data";
const size_t kFileDataLen = sizeof(kWrite1Data) - 1;
const std::string kTestFileName = testing::TempDir() + "leveldb-TestFile.dat";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, kWrite1Data, kTestFileName));
RandomAccessFile* rand_file;
ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(kTestFileName, &rand_file));
const char kWrite2Data[] = "Write #2 data";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, kWrite2Data, kTestFileName));
Slice result;
char scratch[kFileDataLen];
ASSERT_LEVELDB_OK(rand_file->Read(0, kFileDataLen, &result, scratch));
ASSERT_EQ(0, result.compare(kWrite2Data));
delete rand_file;
}
TEST_F(MemEnvTest, DBTest) {
Options options;
options.create_if_missing = true;
options.env = env_;
DB* db;
const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")};
const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")};
ASSERT_LEVELDB_OK(DB::Open(options, "/dir/db", &db));
for (size_t i = 0; i < 3; ++i) {
ASSERT_LEVELDB_OK(db->Put(WriteOptions(), keys[i], vals[i]));
}
for (size_t i = 0; i < 3; ++i) {
std::string res;
ASSERT_LEVELDB_OK(db->Get(ReadOptions(), keys[i], &res));
ASSERT_TRUE(res == vals[i]);
}
Iterator* iterator = db->NewIterator(ReadOptions());
iterator->SeekToFirst();
for (size_t i = 0; i < 3; ++i) {
ASSERT_TRUE(iterator->Valid());
ASSERT_TRUE(keys[i] == iterator->key());
ASSERT_TRUE(vals[i] == iterator->value());
iterator->Next();
}
ASSERT_TRUE(!iterator->Valid());
delete iterator;
DBImpl* dbi = reinterpret_cast<DBImpl*>(db);
ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable());
for (size_t i = 0; i < 3; ++i) {
std::string res;
ASSERT_LEVELDB_OK(db->Get(ReadOptions(), keys[i], &res));
ASSERT_TRUE(res == vals[i]);
}
delete db;
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/helpers/memenv/memenv.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/helpers/memenv/memenv_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
e0bafeec-b8dc-4bfa-840f-87c8a06e09cc | cpp | tensorflow/tensorflow | collective_permute_valid_iteration_annotator | third_party/xla/xla/service/gpu/transforms/collective_permute_valid_iteration_annotator.cc | third_party/xla/xla/service/gpu/transforms/collective_permute_valid_iteration_annotator_test.cc | #include "xla/service/gpu/transforms/collective_permute_valid_iteration_annotator.h"
#include "xla/literal_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/while_loop_analysis.h"
namespace xla {
static const HloInstruction* NonConstantOperand(const HloInstruction* instr) {
const HloInstruction* result = nullptr;
for (const HloInstruction* operand : instr->operands()) {
if (!operand->IsConstant()) {
if (result != nullptr) {
CHECK_EQ(result, operand);
}
result = operand;
}
}
CHECK_NE(result, nullptr);
return result;
}
std::optional<int64_t> GetStep(HloInstruction* while_inst) {
std::optional<int64_t> indvar_tuple_idx =
GetLoopInductionVarTupleIdx(while_inst);
if (!indvar_tuple_idx) {
return std::nullopt;
};
auto* while_body_indvar_update =
while_inst->while_body()->root_instruction()->mutable_operand(
*indvar_tuple_idx);
auto* while_body_indvar = NonConstantOperand(while_body_indvar_update);
HloInstruction* trip_count_increase_step_instr = nullptr;
if (!Match(while_body_indvar_update,
match::AddAnyOrder(match::Op().Is(while_body_indvar),
match::Op(&trip_count_increase_step_instr)))) {
return std::nullopt;
}
return LiteralUtil::LiteralAsScalarInt64(
trip_count_increase_step_instr->literal());
}
absl::StatusOr<bool> CollectivePermuteValidIterationAnnotator::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp : module->computations(execution_threads)) {
for (HloInstruction* inst : comp->instructions()) {
if (inst->opcode() != HloOpcode::kCollectivePermute) {
continue;
}
if (inst->frontend_attributes().map().find(kSendRecvValidationAttr) !=
inst->frontend_attributes().map().end()) {
continue;
}
auto sourceTargetPairs = inst->source_target_pairs();
if (!IsForwardCycle(sourceTargetPairs) &&
!IsBackwardCycle(sourceTargetPairs)) {
continue;
}
VLOG(2) << "Collective permute with cycle: " << inst->ToString();
int64_t max_device_num = -1;
for (auto [source, target] : sourceTargetPairs) {
max_device_num = std::max(std::max(source, target), max_device_num);
}
int64_t num_devices = max_device_num + 1;
HloInstruction* whileOp = inst->parent()->WhileCallInstruction();
if (whileOp == nullptr) {
VLOG(2) << "No surrounding while op found. Ignoring " << inst->name();
continue;
}
if (!whileOp->frontend_attributes().map().contains(
"is_pipelined_while_loop"))
continue;
TF_ASSIGN_OR_RETURN(WhileLoopBackendConfig config,
whileOp->backend_config<WhileLoopBackendConfig>());
if (!config.has_known_trip_count()) {
VLOG(2) << "Trip count for while loop (" << whileOp->name()
<< "): unknown";
continue;
}
int64_t trip_count = config.known_trip_count().n();
std::optional<int64_t> step = GetStep(whileOp);
VLOG(2) << "Trip count for while loop (" << whileOp->name()
<< "): " << trip_count;
if (!step) {
VLOG(2) << "Could not find step for while operation";
continue;
}
VLOG(2) << "Step for while loop (" << whileOp->name() << "): " << *step;
if (*step != 1) {
VLOG(2) << "Step is not 1. Skipping...";
continue;
}
int64_t offset = trip_count - num_devices;
std::vector<std::pair<int64_t, int64_t>> sendRecvValidation(
sourceTargetPairs.size());
for (size_t currIdx = 0; currIdx < sourceTargetPairs.size(); currIdx++) {
sendRecvValidation[currIdx] = {currIdx, currIdx + offset};
}
if (IsBackwardCycle(sourceTargetPairs)) {
std::reverse(sendRecvValidation.begin(), sendRecvValidation.end());
}
xla::FrontendAttributes attributes;
std::string iteration_instances =
"{" +
absl::StrJoin(sendRecvValidation, ",",
[](std::string* out, std::pair<int64_t, int64_t> item) {
absl::StrAppend(out, "{", item.first, ",",
item.second, "}");
}) +
"}";
(*attributes.mutable_map())[kSendRecvValidationAttr] =
iteration_instances;
inst->add_frontend_attributes(attributes);
VLOG(1) << "Adding " << kSendRecvValidationAttr << " to " << inst->name()
<< ": " << iteration_instances;
changed = true;
}
}
return changed;
}
} | #include "xla/service/gpu/transforms/collective_permute_valid_iteration_annotator.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/while_loop_trip_count_annotator.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
using CollectivePermuteValidIterationAnnotatorTest = HloTestBase;
TEST_F(CollectivePermuteValidIterationAnnotatorTest, NoChange) {
absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={()->(s32[], s32[])}
%Body (param: (s32[], s32[])) -> (s32[], s32[]) {
%param = (s32[], s32[]) parameter(0)
%i = s32[] get-tuple-element((s32[], s32[]) %param), index=1
%one = s32[] constant(1)
%i_plus_one = s32[] add(s32[] %i, s32[] %one)
%permute = s32[] collective-permute(%i_plus_one), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,0}}
ROOT %tuple = (s32[], s32[]) tuple(s32[] %permute, s32[] %permute)
}
%Cond (param.1: (s32[], s32[])) -> pred[] {
%param.1 = (s32[], s32[]) parameter(0)
%i.1 = s32[] get-tuple-element((s32[], s32[]) %param.1), index=1
%trip_count = s32[] constant(10)
ROOT %done = pred[] compare(s32[] %i.1, s32[] %trip_count), direction=LT
}
ENTRY %test () -> (s32[], s32[]) {
%i_start = s32[] constant(0)
%p_start = s32[] constant(0)
%initial_tuple = (s32[], s32[]) tuple(s32[] %i_start, s32[] %p_start)
ROOT %while = (s32[], s32[]) while((s32[], s32[]) %initial_tuple), condition=%Cond, body=%Body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, 1, 4));
HloPassPipeline pipeline("my-pass-pipeline");
pipeline.AddPass<WhileLoopTripCountAnnotator>();
pipeline.AddPass<CollectivePermuteValidIterationAnnotator>();
TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get()));
EXPECT_FALSE(changed);
HloCollectivePermuteInstruction* cp =
DynCastOrNull<HloCollectivePermuteInstruction>(
FindInstruction(module.get(), HloOpcode::kCollectivePermute));
ASSERT_NE(cp, nullptr);
auto sendRecvValidationIt =
cp->frontend_attributes().map().find(kSendRecvValidationAttr);
ASSERT_EQ(sendRecvValidationIt, cp->frontend_attributes().map().end());
}
TEST_F(CollectivePermuteValidIterationAnnotatorTest, ForwardCycle) {
absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={()->(s32[], s32[])}
%Body (param: (s32[], s32[])) -> (s32[], s32[]) {
%param = (s32[], s32[]) parameter(0)
%i = s32[] get-tuple-element((s32[], s32[]) %param), index=1
%one = s32[] constant(1)
%i_plus_one = s32[] add(s32[] %i, s32[] %one)
%permute = s32[] collective-permute(%i_plus_one), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,0}}
ROOT %tuple = (s32[], s32[]) tuple(s32[] %permute, s32[] %i_plus_one)
}
%Cond (param.1: (s32[], s32[])) -> pred[] {
%param.1 = (s32[], s32[]) parameter(0)
%i.1 = s32[] get-tuple-element((s32[], s32[]) %param.1), index=1
%trip_count = s32[] constant(10)
ROOT %done = pred[] compare(s32[] %i.1, s32[] %trip_count), direction=LT
}
ENTRY %test () -> (s32[], s32[]) {
%i_start = s32[] constant(0)
%p_start = s32[] constant(0)
%initial_tuple = (s32[], s32[]) tuple(s32[] %i_start, s32[] %p_start)
ROOT %while = (s32[], s32[]) while((s32[], s32[]) %initial_tuple), condition=%Cond, body=%Body, frontend_attributes={is_pipelined_while_loop="true"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, 1, 4));
HloPassPipeline pipeline("my-pass-pipeline");
pipeline.AddPass<WhileLoopTripCountAnnotator>();
pipeline.AddPass<CollectivePermuteValidIterationAnnotator>();
TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get()));
EXPECT_TRUE(changed);
HloCollectivePermuteInstruction* cp =
DynCastOrNull<HloCollectivePermuteInstruction>(
FindInstruction(module.get(), HloOpcode::kCollectivePermute));
ASSERT_NE(cp, nullptr);
auto sendRecvValidationIt =
cp->frontend_attributes().map().find(kSendRecvValidationAttr);
ASSERT_NE(sendRecvValidationIt, cp->frontend_attributes().map().end());
std::string sendRecvValidationAttr = sendRecvValidationIt->second;
EXPECT_EQ(sendRecvValidationAttr, "{{0,6},{1,7},{2,8},{3,9}}");
}
TEST_F(CollectivePermuteValidIterationAnnotatorTest, BackwardCycle) {
absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={()->(s32[], s32[])}
%Body (param: (s32[], s32[])) -> (s32[], s32[]) {
%param = (s32[], s32[]) parameter(0)
%i = s32[] get-tuple-element((s32[], s32[]) %param), index=1
%one = s32[] constant(1)
%i_plus_one = s32[] add(s32[] %i, s32[] %one)
%permute = s32[] collective-permute(%i_plus_one), channel_id=1, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}
ROOT %tuple = (s32[], s32[]) tuple(s32[] %permute, s32[] %i_plus_one)
}
%Cond (param.1: (s32[], s32[])) -> pred[] {
%param.1 = (s32[], s32[]) parameter(0)
%i.1 = s32[] get-tuple-element((s32[], s32[]) %param.1), index=1
%trip_count = s32[] constant(10)
ROOT %done = pred[] compare(s32[] %i.1, s32[] %trip_count), direction=LT
}
ENTRY %test () -> (s32[], s32[]) {
%i_start = s32[] constant(0)
%p_start = s32[] constant(0)
%initial_tuple = (s32[], s32[]) tuple(s32[] %i_start, s32[] %p_start)
ROOT %while = (s32[], s32[]) while((s32[], s32[]) %initial_tuple), condition=%Cond, body=%Body, frontend_attributes={is_pipelined_while_loop="true"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, 1, 4));
HloPassPipeline pipeline("my-pass-pipeline");
pipeline.AddPass<WhileLoopTripCountAnnotator>();
pipeline.AddPass<CollectivePermuteValidIterationAnnotator>();
TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get()));
EXPECT_TRUE(changed);
HloCollectivePermuteInstruction* cp =
DynCastOrNull<HloCollectivePermuteInstruction>(
FindInstruction(module.get(), HloOpcode::kCollectivePermute));
ASSERT_NE(cp, nullptr);
auto sendRecvValidationIt =
cp->frontend_attributes().map().find(kSendRecvValidationAttr);
ASSERT_NE(sendRecvValidationIt, cp->frontend_attributes().map().end());
std::string sendRecvValidationAttr = sendRecvValidationIt->second;
EXPECT_EQ(sendRecvValidationAttr, "{{3,9},{2,8},{1,7},{0,6}}");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/collective_permute_valid_iteration_annotator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/collective_permute_valid_iteration_annotator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ad69ddc5-0993-4550-bcd1-c25e14d6bb5e | cpp | tensorflow/tensorflow | path_utils | tensorflow/core/data/service/snapshot/path_utils.cc | tensorflow/core/data/service/snapshot/path_utils_test.cc | #include "tensorflow/core/data/service/snapshot/path_utils.h"
#include <cstdint>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/path.h"
namespace tensorflow {
namespace data {
namespace {
constexpr const char kDoneFileName[] = "DONE";
constexpr const char kErrorFileName[] = "ERROR";
constexpr const char kWorkerFileName[] = "owner_worker";
constexpr const char kSnapshotMetadataFileName[] = "snapshot.metadata";
constexpr const char kDatasetDefFileName[] = "dataset_def.proto";
constexpr const char kDatasetSpecFileName[] = "dataset_spec.pb";
constexpr const char kStreamsDirectoryName[] = "streams";
constexpr const char kSplitsDirectoryName[] = "splits";
constexpr const char kCheckpointsDirectoryName[] = "checkpoints";
constexpr const char kCommittedChunksDirectoryName[] = "chunks";
constexpr const char kUncommittedChunksDirectoryName[] = "uncommitted_chunks";
constexpr int64_t kUnknownNumElements = -1;
}
std::string StreamsDirectory(absl::string_view snapshot_path) {
return tsl::io::JoinPath(snapshot_path, kStreamsDirectoryName);
}
std::string StreamDirectory(absl::string_view snapshot_path,
int64_t stream_index) {
return tsl::io::JoinPath(StreamsDirectory(snapshot_path),
absl::StrCat("stream_", stream_index));
}
std::string SplitsDirectory(absl::string_view snapshot_path,
int64_t stream_index) {
return tsl::io::JoinPath(StreamDirectory(snapshot_path, stream_index),
kSplitsDirectoryName);
}
std::string SourceDirectory(absl::string_view snapshot_path,
int64_t stream_index, int64_t source_index) {
return tsl::io::JoinPath(SplitsDirectory(snapshot_path, stream_index),
absl::StrCat("source_", source_index));
}
std::string RepetitionDirectory(absl::string_view snapshot_path,
int64_t stream_index, int64_t source_index,
int64_t repetition_index) {
return tsl::io::JoinPath(
SourceDirectory(snapshot_path, stream_index, source_index),
absl::StrCat("repetition_", repetition_index));
}
std::string SplitPath(absl::string_view snapshot_path, int64_t stream_index,
int64_t source_index, int64_t repetition_index,
int64_t local_index, int64_t global_index) {
return tsl::io::JoinPath(
RepetitionDirectory(snapshot_path, stream_index, source_index,
repetition_index),
absl::StrCat("split_", local_index, "_", global_index));
}
absl::StatusOr<int64_t> ParseStreamDirectoryName(
absl::string_view stream_directory_name) {
std::vector<std::string> tokens = absl::StrSplit(stream_directory_name, '_');
int64_t stream_index = 0;
if (tokens.size() != 2 || tokens[0] != "stream" ||
!absl::SimpleAtoi(tokens[1], &stream_index) || stream_index < 0) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid stream directory name: ", stream_directory_name,
". Expected stream_<stream_index>."));
}
return stream_index;
}
absl::StatusOr<int64_t> ParseSourceDirectoryName(
absl::string_view source_directory_name) {
std::vector<std::string> tokens = absl::StrSplit(source_directory_name, '_');
int64_t source_index = 0;
if (tokens.size() != 2 || tokens[0] != "source" ||
!absl::SimpleAtoi(tokens[1], &source_index) || source_index < 0) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid source directory name: ", source_directory_name,
". Expected source_<source_index>."));
}
return source_index;
}
absl::StatusOr<int64_t> ParseRepetitionDirectoryName(
absl::string_view repetition_directory_name) {
std::vector<std::string> tokens =
absl::StrSplit(repetition_directory_name, '_');
int64_t repetition_index = 0;
if (tokens.size() != 2 || tokens[0] != "repetition" ||
!absl::SimpleAtoi(tokens[1], &repetition_index) || repetition_index < 0) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid repetition directory name: ", repetition_directory_name,
". Expected repetition_<repetition_index>."));
}
return repetition_index;
}
absl::StatusOr<std::pair<int64_t, int64_t>> ParseSplitFilename(
absl::string_view split_filename) {
std::vector<std::string> tokens =
absl::StrSplit(tsl::io::Basename(split_filename), '_');
int64_t local_split_index = 0, global_split_index = 0;
if (tokens.size() != 3 || tokens[0] != "split" ||
!absl::SimpleAtoi(tokens[1], &local_split_index) ||
local_split_index < 0 ||
!absl::SimpleAtoi(tokens[2], &global_split_index) ||
global_split_index < 0) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid split file name: ", split_filename,
". Expected split_<local_split_index>_<global_split_index>."));
}
if (local_split_index > global_split_index) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid split file name: ", split_filename, ". The local split index ",
local_split_index, " exceeds the global split index ",
global_split_index, "."));
}
return std::make_pair(local_split_index, global_split_index);
}
absl::StatusOr<std::pair<int64_t, int64_t>> ParseCheckpointFilename(
absl::string_view checkpoint_filename) {
std::vector<std::string> tokens = absl::StrSplit(checkpoint_filename, '_');
int64_t checkpoint_index = 0, checkpoint_num_elements = 0;
if (tokens.size() != 3 || tokens[0] != "checkpoint" ||
!absl::SimpleAtoi(tokens[1], &checkpoint_index) || checkpoint_index < 0 ||
!absl::SimpleAtoi(tokens[2], &checkpoint_num_elements) ||
(checkpoint_num_elements < 0 &&
checkpoint_num_elements != kUnknownNumElements)) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid checkpoint file name: ", checkpoint_filename,
". Expected checkpoint_<checkpoint_index>_<checkpoint_num_elements>."));
}
return std::make_pair(checkpoint_index, checkpoint_num_elements);
}
absl::StatusOr<std::tuple<int64_t, int64_t, int64_t>> ParseChunkFilename(
absl::string_view chunk_filename) {
std::vector<std::string> tokens = absl::StrSplit(chunk_filename, '_');
int64_t stream_index = 0, stream_chunk_index = 0, chunk_num_elements = 0;
if (tokens.size() != 4 || tokens[0] != "chunk" ||
!absl::SimpleAtoi(tokens[1], &stream_index) || stream_index < 0 ||
!absl::SimpleAtoi(tokens[2], &stream_chunk_index) ||
stream_chunk_index < 0 ||
!absl::SimpleAtoi(tokens[3], &chunk_num_elements) ||
(chunk_num_elements < 0 && chunk_num_elements != kUnknownNumElements)) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid chunk file name: ", chunk_filename,
". Expected "
"chunk_<stream_index>_<stream_chunk_index>_<chunk_num_elements>."));
}
return std::make_tuple(stream_index, stream_chunk_index, chunk_num_elements);
}
std::string SnapshotMetadataFilePath(absl::string_view snapshot_path_) {
return tsl::io::JoinPath(snapshot_path_, kSnapshotMetadataFileName);
}
std::string DatasetDefFilePath(absl::string_view snapshot_path_) {
return tsl::io::JoinPath(snapshot_path_, kDatasetDefFileName);
}
std::string DatasetSpecFilePath(absl::string_view snapshot_path_) {
return tsl::io::JoinPath(snapshot_path_, kDatasetSpecFileName);
}
std::string StreamDoneFilePath(absl::string_view snapshot_path,
int64_t stream_index) {
return tsl::io::JoinPath(StreamDirectory(snapshot_path, stream_index),
kDoneFileName);
}
std::string StreamWorkerFilePath(absl::string_view snapshot_path,
int64_t stream_index) {
return StreamWorkerFilePath(StreamDirectory(snapshot_path, stream_index));
}
std::string StreamWorkerFilePath(absl::string_view stream_path) {
return tsl::io::JoinPath(stream_path, kWorkerFileName);
}
std::string SnapshotDoneFilePath(absl::string_view snapshot_path) {
return tsl::io::JoinPath(snapshot_path, kDoneFileName);
}
std::string SnapshotErrorFilePath(absl::string_view snapshot_path) {
return tsl::io::JoinPath(snapshot_path, kErrorFileName);
}
std::string CheckpointsDirectory(absl::string_view snapshot_path,
int64_t stream_index) {
return tsl::io::JoinPath(StreamDirectory(snapshot_path, stream_index),
kCheckpointsDirectoryName);
}
std::string CommittedChunksDirectory(absl::string_view snapshot_path) {
return tsl::io::JoinPath(snapshot_path, kCommittedChunksDirectoryName);
}
std::string UncommittedChunksDirectory(absl::string_view snapshot_path,
int64_t stream_index) {
return tsl::io::JoinPath(StreamDirectory(snapshot_path, stream_index),
kUncommittedChunksDirectoryName);
}
}
} | #include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::FieldsAre;
using ::testing::HasSubstr;
using ::testing::MatchesRegex;
using ::testing::Pair;
using tsl::testing::IsOkAndHolds;
using tsl::testing::StatusIs;
TEST(PathUtilsTest, StreamsDirectory) {
EXPECT_THAT(StreamsDirectory("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.streams"));
}
TEST(PathUtilsTest, StreamDirectory) {
EXPECT_THAT(StreamDirectory("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0"));
}
TEST(PathUtilsTest, SplitsDirectory) {
EXPECT_THAT(SplitsDirectory("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0.splits"));
}
TEST(PathUtilsTest, SourceDirectory) {
EXPECT_THAT(
SourceDirectory("/path/to/snapshot", 0,
1),
MatchesRegex("/path/to/snapshot.streams.stream_0.splits.source_1"));
}
TEST(PathUtilsTest, RepetitionDirectory) {
EXPECT_THAT(
RepetitionDirectory("/path/to/snapshot", 0,
1, 2),
MatchesRegex(
"/path/to/snapshot.streams.stream_0.splits.source_1.repetition_2"));
}
TEST(PathUtilsTest, SplitPath) {
EXPECT_THAT(
SplitPath("/path/to/snapshot", 0, 1,
2, 3, 4),
MatchesRegex(
"/path/to/"
"snapshot.streams.stream_0.splits.source_1.repetition_2.split_3_4"));
}
TEST(PathUtilsTest, ParseStreamDirectoryName) {
EXPECT_THAT(ParseStreamDirectoryName("stream_1"), IsOkAndHolds(1));
}
TEST(PathUtilsTest, ParseSourceDirectoryName) {
EXPECT_THAT(ParseSourceDirectoryName("source_1"), IsOkAndHolds(1));
EXPECT_THAT(ParseSourceDirectoryName(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected source_<source_index>")));
EXPECT_THAT(ParseSourceDirectoryName("source_-1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected source_<source_index>")));
EXPECT_THAT(ParseSourceDirectoryName("chunk_1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected source_<source_index>")));
}
TEST(PathUtilsTest, ParseRepetitionDirectoryName) {
EXPECT_THAT(ParseRepetitionDirectoryName("repetition_1"), IsOkAndHolds(1));
EXPECT_THAT(ParseRepetitionDirectoryName(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected repetition_<repetition_index>")));
EXPECT_THAT(ParseRepetitionDirectoryName("repetition_-1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected repetition_<repetition_index>")));
EXPECT_THAT(ParseRepetitionDirectoryName("chunk_1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected repetition_<repetition_index>")));
}
TEST(PathUtilsTest, InvalidStreamDirectoryName) {
EXPECT_THAT(ParseStreamDirectoryName(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected stream_<stream_index>")));
EXPECT_THAT(ParseStreamDirectoryName("stream_-1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected stream_<stream_index>")));
EXPECT_THAT(ParseStreamDirectoryName("chunk_1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected stream_<stream_index>")));
}
TEST(PathUtilsTest, ParseSplitFilename) {
EXPECT_THAT(ParseSplitFilename("split_0_1"), IsOkAndHolds(Pair(0, 1)));
}
TEST(PathUtilsTest, InvalidSplitFilename) {
EXPECT_THAT(
ParseSplitFilename(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected split_<local_split_index>_<global_split_index>")));
EXPECT_THAT(
ParseSplitFilename("split_123"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected split_<local_split_index>_<global_split_index>")));
EXPECT_THAT(
ParseSplitFilename("split_-1_(-1)"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected split_<local_split_index>_<global_split_index>")));
EXPECT_THAT(
ParseSplitFilename("chunk_1_2"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected split_<local_split_index>_<global_split_index>")));
EXPECT_THAT(
ParseSplitFilename("split_5_0"),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"The local split index 5 exceeds the global split index 0")));
}
TEST(PathUtilsTest, ParseCheckpointFilename) {
EXPECT_THAT(ParseCheckpointFilename("checkpoint_0_1"),
IsOkAndHolds(Pair(0, 1)));
EXPECT_THAT(ParseCheckpointFilename("checkpoint_0_-1"),
IsOkAndHolds(Pair(0, -1)));
}
TEST(PathUtilsTest, InvalidCheckpointFilename) {
EXPECT_THAT(
ParseCheckpointFilename(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected "
"checkpoint_<checkpoint_index>_<checkpoint_num_elements>")));
EXPECT_THAT(
ParseCheckpointFilename("checkpoint_123"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected "
"checkpoint_<checkpoint_index>_<checkpoint_num_elements>")));
EXPECT_THAT(
ParseCheckpointFilename("checkpoint_-1_(-1)"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected "
"checkpoint_<checkpoint_index>_<checkpoint_num_elements>")));
EXPECT_THAT(
ParseCheckpointFilename("chunk_1_2"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected "
"checkpoint_<checkpoint_index>_<checkpoint_num_elements>")));
}
TEST(PathUtilsTest, ParseChunkFilename) {
EXPECT_THAT(ParseChunkFilename("chunk_0_1_2"),
IsOkAndHolds(FieldsAre(0, 1, 2)));
EXPECT_THAT(ParseChunkFilename("chunk_0_1_-1"),
IsOkAndHolds(FieldsAre(0, 1, -1)));
}
TEST(PathUtilsTest, InvalidChunkFilename) {
EXPECT_THAT(ParseChunkFilename(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected "
"chunk_<stream_index>_<stream_chunk_index>_<"
"chunk_num_elements>")));
EXPECT_THAT(ParseChunkFilename("chunk_123_0"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected "
"chunk_<stream_index>_<stream_chunk_index>_<"
"chunk_num_elements>")));
EXPECT_THAT(ParseChunkFilename("chunk_-1_(-1)_0"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected "
"chunk_<stream_index>_<stream_chunk_index>_<"
"chunk_num_elements>")));
EXPECT_THAT(ParseChunkFilename("split_1_2_3"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected "
"chunk_<stream_index>_<stream_chunk_index>_<"
"chunk_num_elements>")));
}
TEST(PathUtilsTest, StreamDoneFilePath) {
EXPECT_THAT(StreamDoneFilePath("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0.DONE"));
}
TEST(PathUtilsTest, StreamWorkerFilePath) {
EXPECT_THAT(StreamWorkerFilePath("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0.owner_worker"));
EXPECT_THAT(StreamWorkerFilePath("/path/to/snapshot/streams/stream_0"),
MatchesRegex("/path/to/snapshot.streams.stream_0.owner_worker"));
}
TEST(PathUtilsTest, SnapshotDoneFilePath) {
EXPECT_THAT(SnapshotDoneFilePath("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.DONE"));
}
TEST(PathUtilsTest, SnapshotErrorFilePath) {
EXPECT_THAT(SnapshotErrorFilePath("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.ERROR"));
}
TEST(PathUtilsTest, SnapshotMetadataFilePath) {
EXPECT_THAT(SnapshotMetadataFilePath("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.snapshot.metadata"));
}
TEST(PathUtilsTest, DatasetDefFilePath) {
EXPECT_THAT(DatasetDefFilePath("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.dataset_def.proto"));
}
TEST(PathUtilsTest, DatasetSpefFilePath) {
EXPECT_THAT(DatasetSpecFilePath("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.dataset_spec.pb"));
}
TEST(PathUtilsTest, CheckpointsDirectory) {
EXPECT_THAT(CheckpointsDirectory("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0.checkpoints"));
}
TEST(PathUtilsTest, CommittedChunksDirectory) {
EXPECT_THAT(CommittedChunksDirectory("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.chunks"));
}
TEST(PathUtilsTest, UncommittedChunksDirectory) {
EXPECT_THAT(
UncommittedChunksDirectory("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0.uncommitted_chunks"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/path_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/path_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
86af0dfd-9e9d-4348-8772-56caee98f0d0 | cpp | tensorflow/tensorflow | simple_tf_op | tensorflow/lite/kernels/shim/test_op/simple_tf_op.cc | tensorflow/lite/kernels/shim/test_op/simple_tf_op_test.cc | #include "tensorflow/lite/kernels/shim/test_op/simple_tf_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/lite/kernels/shim/tf_op_shim.h"
namespace tflite {
namespace shim {
REGISTER_TF_OP_SHIM(SimpleOpKernel);
REGISTER_KERNEL_BUILDER(
Name(SimpleOpKernel::OpName()).Device(::tensorflow::DEVICE_CPU),
SimpleOpKernel);
}
} | #include <cstdint>
#include <gtest/gtest.h>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/tstring.h"
namespace tflite {
namespace shim {
namespace {
using ::tensorflow::DT_INT64;
using ::tensorflow::DT_STRING;
using ::tensorflow::FakeInput;
using ::tensorflow::NodeDefBuilder;
using ::tensorflow::TensorShape;
using ::tensorflow::tstring;
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::ExpectTensorEqual;
class SimpleOpTfTest : public ::tensorflow::OpsTestBase {};
TEST_F(SimpleOpTfTest, Output1Size_5_N_2) {
TF_ASSERT_OK(NodeDefBuilder("simple_op", "SimpleOperation")
.Attr("output1_size", 5)
.Attr("output2_suffix", "foo")
.Attr("N", 2)
.Input(FakeInput(DT_STRING))
.Input(FakeInput(2, DT_INT64))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<tstring>(TensorShape({}), {"abc"});
AddInputFromArray<int64_t>(TensorShape({}), {123});
AddInputFromArray<int64_t>(TensorShape({2}), {456, 789});
TF_ASSERT_OK(RunOpKernel());
ExpectTensorEqual<int>(*GetOutput(0),
AsTensor<int>({0, 1, 2, 3, 4}, {5}));
ExpectTensorEqual<float>(
*GetOutput(1), AsTensor<float>({0, 0.5, 1., 1.5, 2.}, {5}));
ExpectTensorEqual<tstring>(
*GetOutput(2), AsTensor<tstring>({"0", "1", "2", "foo"}, {4}));
ExpectTensorEqual<int64_t>(*GetOutput(3),
AsTensor<int64_t>({124}, {}));
ExpectTensorEqual<int64_t>(*GetOutput(4),
AsTensor<int64_t>({457, 790}, {2}));
}
TEST_F(SimpleOpTfTest, Output1Size_3_N_0) {
TF_ASSERT_OK(NodeDefBuilder("simple_op", "SimpleOperation")
.Attr("output1_size", 3)
.Attr("output2_suffix", "foo")
.Attr("N", 0)
.Input(FakeInput(DT_STRING))
.Input(FakeInput(0, DT_INT64))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<tstring>(TensorShape({}), {"abcde"});
TF_ASSERT_OK(RunOpKernel());
ExpectTensorEqual<int>(*GetOutput(0),
AsTensor<int>({0, 1, 2, 3, 4}, {5}));
ExpectTensorEqual<float>(*GetOutput(1),
AsTensor<float>({0, 0.5, 1.}, {3}));
ExpectTensorEqual<tstring>(
*GetOutput(2),
AsTensor<tstring>({"0", "1", "2", "3", "4", "foo"}, {6}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/test_op/simple_tf_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/test_op/simple_tf_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c8a63f1d-c176-4b51-8f7a-1bb28e6cba6d | cpp | tensorflow/tensorflow | permuter | tensorflow/core/common_runtime/permuter.cc | tensorflow/core/common_runtime/permuter_test.cc | #include "tensorflow/core/common_runtime/permuter.h"
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_util.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
Permuter::Permuter()
: col_ctx_(nullptr), col_params_(nullptr), done_(nullptr), counter_(0) {}
StatusCallback Permuter::CheckCounterAndCallDone() {
return [this](const Status& s) {
mu_.lock();
status_.Update(s);
int counter = ++counter_;
Status status = status_;
mu_.unlock();
if (counter == 2) done_(status);
};
}
Status Permuter::InitializeCollectiveContext(
std::shared_ptr<CollectiveContext> col_ctx) {
DCHECK(col_ctx->dev_mgr);
col_ctx_ = col_ctx;
col_params_ = col_ctx->col_params.get();
return collective_util::InitializeDeviceAndLocality(
col_ctx->dev_mgr, col_ctx->device_name, &col_ctx->device,
&col_ctx->device_locality);
}
void Permuter::Run(StatusCallback done) {
if (col_params_->instance.permutation.size() !=
col_params_->instance.devices.size()) {
done(errors::Internal("Permutation must be the same size as devices"));
}
done_ = std::move(done);
DispatchSend(col_params_->default_rank,
col_params_->instance.permutation[col_params_->default_rank],
col_ctx_->input, CheckCounterAndCallDone());
for (int i = 0; i < col_params_->instance.permutation.size(); ++i) {
if (col_params_->default_rank == col_params_->instance.permutation[i]) {
DispatchRecv(i, col_params_->instance.permutation[i], col_ctx_->output,
CheckCounterAndCallDone());
}
}
}
void Permuter::DispatchSend(int src_rank, int target_rank, const Tensor* tensor,
const StatusCallback& done) {
string send_buf_key =
strings::StrCat(col_ctx_->exec_key, src_rank, target_rank);
VLOG(1) << "DispatchSend " << send_buf_key << " from_device "
<< col_ctx_->device_name << " to_device "
<< col_params_->instance.devices[target_rank]
<< " target_rank=" << target_rank << " src_rank=" << src_rank;
col_ctx_->col_exec->remote_access()->PostToPeer(
col_params_->instance.devices[target_rank],
col_params_->group.members[target_rank].task, send_buf_key,
col_ctx_->device, col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->output_alloc_attr(0), tensor, col_ctx_->device_locality,
col_ctx_->op_ctx->cancellation_manager(), done);
}
void Permuter::DispatchRecv(int src_rank, int target_rank, Tensor* tensor,
const StatusCallback& done) {
string recv_buf_key =
strings::StrCat(col_ctx_->exec_key, src_rank, target_rank);
VLOG(1) << "DispatchRecv " << recv_buf_key << " to_device "
<< col_ctx_->device_name << " from_device "
<< col_params_->instance.devices[src_rank]
<< " target_rank=" << target_rank << " src_rank=" << src_rank;
col_ctx_->col_exec->remote_access()->RecvFromPeer(
col_params_->instance.devices[src_rank],
col_params_->group.members[src_rank].task,
col_params_->group.members[src_rank].is_local, recv_buf_key,
col_ctx_->device, col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->output_alloc_attr(0), tensor, col_ctx_->device_locality,
0, col_ctx_->op_ctx->cancellation_manager(), done);
}
namespace {
REGISTER_COLLECTIVE(Permute, Permuter);
}
} | #include "tensorflow/core/common_runtime/permuter.h"
#include <algorithm>
#include "absl/memory/memory.h"
#include "absl/types/span.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_test_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/test_collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/threadpool_device.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
class PermuterTest : public ::testing::Test {
protected:
void Init(int num_workers, int num_devices,
const std::vector<int>& permutation, DataType dtype,
const TensorShape& shape, const DeviceType& device_type,
int fail_after) {
test_env_ = CreateCollectiveTestEnv(num_workers, num_devices, device_type);
test_env_->remote_access->set_fail_after(fail_after);
for (int wi = 0; wi < num_workers; ++wi) {
for (int di = 0; di < num_devices; ++di) {
int rank = wi * num_devices + di;
instances_.push_back(std::make_unique<DeviceInstance>(
rank, permutation, dtype, shape, test_env_.get()));
}
}
}
typedef std::function<void(Tensor*)> InitFunc;
void Permute(int fail_after) {
std::atomic<int> done(0);
for (auto& di : instances_) {
SchedClosure([&di, &done] {
di->DoPermute();
++done;
});
if (fail_after > 0) {
Env::Default()->SleepForMicroseconds(100);
}
}
while (done < instances_.size()) {
Env::Default()->SleepForMicroseconds(1000);
}
}
template <typename T>
void RunTest(DataType dtype, const DeviceType& device_type, int num_workers,
int num_devices, int tensor_len, int fail_after) {
std::vector<int> permutation(num_workers * num_devices);
std::iota(permutation.begin(), permutation.end(), 0);
for (int i = 0; i < permutation.size(); i += 2) {
if (permutation.size() == i + 1) {
std::swap(permutation[i], permutation[0]);
continue;
}
std::next_permutation(permutation.begin() + i,
permutation.begin() + i + 2);
}
Init(num_workers, num_devices, permutation, dtype,
TensorShape({tensor_len}), device_type, fail_after);
gtl::InlinedVector<T, 4> expected(tensor_len * num_devices * num_workers,
0.0);
for (int di = 0; di < instances_.size(); ++di) {
instances_[di]->InitTensor(
[&permutation, &expected, di, tensor_len](Tensor* t) {
for (size_t i = 0; i < t->NumElements(); ++i) {
float value = pow(10, static_cast<double>(di)) * i;
t->flat<T>()(i) = value;
expected[permutation[di] * tensor_len + i] = value;
}
});
}
Permute(fail_after);
for (int di = 0; di < instances_.size(); ++di) {
if (!instances_[di]->status_.ok()) {
ASSERT_GT(fail_after, 0);
ASSERT_NE(instances_[di]->status_.message().find("Deliberate failure"),
string::npos);
continue;
}
TF_EXPECT_OK(instances_[di]->status_);
test::ExpectTensorEqual<T>(
test::AsTensor<T>(
absl::MakeSpan(expected).subspan(di * tensor_len, tensor_len)),
instances_[di]->output_tensor_);
}
}
class DeviceInstance {
public:
DeviceInstance(int rank, std::vector<int> permutation, DataType dtype,
const TensorShape& shape, CollectiveTestEnv* test_env)
: test_env_(test_env),
input_tensor_(dtype, shape),
output_tensor_(dtype, shape) {
col_params_ = CreateCollectiveParams(*test_env_, rank, "Permute",
PERMUTE_COLLECTIVE, dtype, shape);
col_params_->instance.permutation = std::move(permutation);
for (const CollGroupMember& member : col_params_->group.members) {
col_params_->instance.devices.push_back(member.device.name());
}
string dev_name = col_params_->group.members[rank].device.name();
TF_CHECK_OK(test_env_->device_mgr->LookupDevice(dev_name, &device_))
<< "Couldn't find device " << dev_name
<< " existing devices: " << test_env_->device_mgr->DebugString();
}
void InitTensor(const InitFunc& f) { f(&input_tensor_); }
void DoPermute() {
status_ = RunCollective(test_env_, col_params_.get(), device_,
&input_tensor_, &output_tensor_);
}
CollectiveTestEnv* test_env_;
Tensor input_tensor_;
Tensor output_tensor_;
Device* device_;
core::RefCountPtr<CollectiveParams> col_params_;
Status status_;
};
std::unique_ptr<CollectiveTestEnv> test_env_;
std::vector<std::unique_ptr<DeviceInstance>> instances_;
};
#define DEF_TEST(B, T, W, D, L, A) \
TEST_F(PermuterTest, \
DaTy##B##_DevTy##T##_Wkr##W##_Dev##D##_Len##L##_Abrt##A) { \
DataType dtype = DT_##B; \
switch (dtype) { \
case DT_BOOL: { \
RunTest<bool>(dtype, DEVICE_##T, W, D, L, A); \
} break; \
case DT_FLOAT: { \
RunTest<float>(dtype, DEVICE_##T, W, D, L, A); \
} break; \
case DT_DOUBLE: { \
RunTest<double>(dtype, DEVICE_##T, W, D, L, A); \
} break; \
case DT_INT32: { \
RunTest<int32>(dtype, DEVICE_##T, W, D, L, A); \
} break; \
case DT_INT64: { \
RunTest<int64_t>(dtype, DEVICE_##T, W, D, L, A); \
} break; \
default: \
LOG(FATAL) << "Unimplemented"; \
} \
}
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 0)
DEF_TEST(FLOAT, CPU, 1, 3, 3, 0)
DEF_TEST(FLOAT, CPU, 1, 7, 3, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1001, 0)
DEF_TEST(FLOAT, CPU, 2, 2, 3, 0)
DEF_TEST(FLOAT, CPU, 2, 1, 128, 0)
DEF_TEST(FLOAT, CPU, 2, 4, 128, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 4095, 0)
DEF_TEST(FLOAT, CPU, 4, 4, 1045991, 0)
DEF_TEST(BOOL, CPU, 1, 4, 1, 0)
DEF_TEST(BOOL, CPU, 2, 4, 1, 0)
DEF_TEST(BOOL, CPU, 2, 4, 1001, 0)
DEF_TEST(DOUBLE, CPU, 2, 4, 128, 0)
DEF_TEST(INT32, CPU, 2, 4, 128, 0)
DEF_TEST(INT64, CPU, 2, 4, 128, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 1)
DEF_TEST(FLOAT, CPU, 2, 4, 128, 1)
DEF_TEST(FLOAT, CPU, 2, 4, 128, 5)
#endif
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
DEF_TEST(FLOAT, GPU, 1, 2, 1, 0)
DEF_TEST(FLOAT, GPU, 1, 7, 3, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 33, 0)
DEF_TEST(FLOAT, GPU, 1, 3, 64, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 4095, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1045991, 0)
DEF_TEST(BOOL, GPU, 1, 4, 1, 0)
DEF_TEST(BOOL, GPU, 1, 4, 1001, 0)
DEF_TEST(DOUBLE, GPU, 1, 8, 1001, 0)
DEF_TEST(INT64, GPU, 1, 8, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 128, 6)
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/permuter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/permuter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3426a9d7-9880-4064-a001-25e5ea338033 | cpp | tensorflow/tensorflow | stderr_reporter | tensorflow/lite/stderr_reporter.cc | tensorflow/lite/stderr_reporter_test.cc | #include "tensorflow/lite/stderr_reporter.h"
#include <stdarg.h>
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
int StderrReporter::Report(const char* format, va_list args) {
logging_internal::MinimalLogger::LogFormatted(TFLITE_LOG_ERROR, format, args);
return 0;
}
ErrorReporter* DefaultErrorReporter() {
static StderrReporter* error_reporter = new StderrReporter;
return error_reporter;
}
} | #include "tensorflow/lite/stderr_reporter.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/core/api/error_reporter.h"
namespace tflite {
namespace {
void CheckWritesToStderr(ErrorReporter *error_reporter) {
#ifndef TF_LITE_STRIP_ERROR_STRINGS
testing::internal::CaptureStderr();
#endif
TF_LITE_REPORT_ERROR(error_reporter, "Test: %d", 42);
#ifndef TF_LITE_STRIP_ERROR_STRINGS
EXPECT_EQ("ERROR: Test: 42\n", testing::internal::GetCapturedStderr());
#endif
}
TEST(StderrReporterTest, DefaultErrorReporter_WritesToStderr) {
CheckWritesToStderr(DefaultErrorReporter());
}
TEST(StderrReporterTest, StderrReporter_WritesToStderr) {
StderrReporter stderr_reporter;
CheckWritesToStderr(&stderr_reporter);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/stderr_reporter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/stderr_reporter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5485af59-308f-45e1-9106-bff86bd14755 | cpp | abseil/abseil-cpp | status_matchers | absl/status/internal/status_matchers.cc | absl/status/status_matchers_test.cc | #include "absl/status/internal/status_matchers.h"
#include <ostream>
#include <string>
#include "gmock/gmock.h"
#include "absl/base/config.h"
#include "absl/status/status.h"
namespace absl_testing {
ABSL_NAMESPACE_BEGIN
namespace status_internal {
void StatusIsMatcherCommonImpl::DescribeTo(std::ostream* os) const {
*os << ", has a status code that ";
code_matcher_.DescribeTo(os);
*os << ", and has an error message that ";
message_matcher_.DescribeTo(os);
}
void StatusIsMatcherCommonImpl::DescribeNegationTo(std::ostream* os) const {
*os << ", or has a status code that ";
code_matcher_.DescribeNegationTo(os);
*os << ", or has an error message that ";
message_matcher_.DescribeNegationTo(os);
}
bool StatusIsMatcherCommonImpl::MatchAndExplain(
const ::absl::Status& status,
::testing::MatchResultListener* result_listener) const {
::testing::StringMatchResultListener inner_listener;
if (!code_matcher_.MatchAndExplain(status.code(), &inner_listener)) {
*result_listener << (inner_listener.str().empty()
? "whose status code is wrong"
: "which has a status code " +
inner_listener.str());
return false;
}
if (!message_matcher_.Matches(std::string(status.message()))) {
*result_listener << "whose error message is wrong";
return false;
}
return true;
}
}
ABSL_NAMESPACE_END
} | #include "absl/status/status_matchers.h"
#include "gmock/gmock.h"
#include "gtest/gtest-spi.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::Gt;
TEST(StatusMatcherTest, StatusIsOk) { EXPECT_THAT(absl::OkStatus(), IsOk()); }
TEST(StatusMatcherTest, StatusOrIsOk) {
absl::StatusOr<int> ok_int = {0};
EXPECT_THAT(ok_int, IsOk());
}
TEST(StatusMatcherTest, StatusIsNotOk) {
absl::Status error = absl::UnknownError("Smigla");
EXPECT_NONFATAL_FAILURE(EXPECT_THAT(error, IsOk()), "Smigla");
}
TEST(StatusMatcherTest, StatusOrIsNotOk) {
absl::StatusOr<int> error = absl::UnknownError("Smigla");
EXPECT_NONFATAL_FAILURE(EXPECT_THAT(error, IsOk()), "Smigla");
}
TEST(StatusMatcherTest, IsOkAndHolds) {
absl::StatusOr<int> ok_int = {4};
absl::StatusOr<absl::string_view> ok_str = {"text"};
EXPECT_THAT(ok_int, IsOkAndHolds(4));
EXPECT_THAT(ok_int, IsOkAndHolds(Gt(0)));
EXPECT_THAT(ok_str, IsOkAndHolds("text"));
}
TEST(StatusMatcherTest, IsOkAndHoldsFailure) {
absl::StatusOr<int> ok_int = {502};
absl::StatusOr<int> error = absl::UnknownError("Smigla");
absl::StatusOr<absl::string_view> ok_str = {"actual"};
EXPECT_NONFATAL_FAILURE(EXPECT_THAT(ok_int, IsOkAndHolds(0)), "502");
EXPECT_NONFATAL_FAILURE(EXPECT_THAT(error, IsOkAndHolds(0)), "Smigla");
EXPECT_NONFATAL_FAILURE(EXPECT_THAT(ok_str, IsOkAndHolds("expected")),
"actual");
}
TEST(StatusMatcherTest, StatusIs) {
absl::Status unknown = absl::UnknownError("unbekannt");
absl::Status invalid = absl::InvalidArgumentError("ungueltig");
EXPECT_THAT(absl::OkStatus(), StatusIs(absl::StatusCode::kOk));
EXPECT_THAT(absl::OkStatus(), StatusIs(0));
EXPECT_THAT(unknown, StatusIs(absl::StatusCode::kUnknown));
EXPECT_THAT(unknown, StatusIs(2));
EXPECT_THAT(unknown, StatusIs(absl::StatusCode::kUnknown, "unbekannt"));
EXPECT_THAT(invalid, StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(invalid, StatusIs(3));
EXPECT_THAT(invalid,
StatusIs(absl::StatusCode::kInvalidArgument, "ungueltig"));
}
TEST(StatusMatcherTest, StatusOrIs) {
absl::StatusOr<int> ok = {42};
absl::StatusOr<int> unknown = absl::UnknownError("unbekannt");
absl::StatusOr<absl::string_view> invalid =
absl::InvalidArgumentError("ungueltig");
EXPECT_THAT(ok, StatusIs(absl::StatusCode::kOk));
EXPECT_THAT(ok, StatusIs(0));
EXPECT_THAT(unknown, StatusIs(absl::StatusCode::kUnknown));
EXPECT_THAT(unknown, StatusIs(2));
EXPECT_THAT(unknown, StatusIs(absl::StatusCode::kUnknown, "unbekannt"));
EXPECT_THAT(invalid, StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(invalid, StatusIs(3));
EXPECT_THAT(invalid,
StatusIs(absl::StatusCode::kInvalidArgument, "ungueltig"));
}
TEST(StatusMatcherTest, StatusIsFailure) {
absl::Status unknown = absl::UnknownError("unbekannt");
absl::Status invalid = absl::InvalidArgumentError("ungueltig");
EXPECT_NONFATAL_FAILURE(
EXPECT_THAT(absl::OkStatus(),
StatusIs(absl::StatusCode::kInvalidArgument)),
"OK");
EXPECT_NONFATAL_FAILURE(
EXPECT_THAT(unknown, StatusIs(absl::StatusCode::kCancelled)), "UNKNOWN");
EXPECT_NONFATAL_FAILURE(
EXPECT_THAT(unknown, StatusIs(absl::StatusCode::kUnknown, "inconnu")),
"unbekannt");
EXPECT_NONFATAL_FAILURE(
EXPECT_THAT(invalid, StatusIs(absl::StatusCode::kOutOfRange)), "INVALID");
EXPECT_NONFATAL_FAILURE(
EXPECT_THAT(invalid,
StatusIs(absl::StatusCode::kInvalidArgument, "invalide")),
"ungueltig");
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/status/internal/status_matchers.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/status/status_matchers_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
cabe3974-40bd-4f00-8029-9f686f21bb85 | cpp | tensorflow/tensorflow | hlo_phi_graph | third_party/xla/xla/service/hlo_phi_graph.cc | third_party/xla/xla/service/hlo_phi_graph_test.cc | #include "xla/service/hlo_phi_graph.h"
#include <queue>
namespace xla {
HloValue::Id PhiGraph::GetOptimizedId(const HloValue& value) {
Node* node = value_id_to_node_[value.id()];
CHECK(!node->mark_as_dead);
return node->value_id;
}
bool PhiGraph::InputsEqualTo(const HloValue& value,
absl::Span<const HloValue* const> inputs) {
auto iter = value_id_to_node_.find(value.id());
CHECK(iter != value_id_to_node_.end());
absl::flat_hash_set<HloValue::Id> existing_set;
for (Node* operand : iter->second->operands) {
existing_set.insert(operand->value_id);
}
absl::flat_hash_set<HloValue::Id> new_set;
for (const HloValue* input : inputs) {
new_set.insert(input->id());
}
return existing_set == new_set;
}
HloValue::Id PhiGraph::FindOptimizedValue(const HloValue::Id id) {
auto iter = value_id_to_node_.find(id);
CHECK(iter != value_id_to_node_.end());
CHECK(!iter->second->mark_as_dead);
return iter->second->value_id;
}
PhiGraph::Node* PhiGraph::CreateOrReuseNode(const HloValue& value) {
auto iter = value_id_to_node_.find(value.id());
if (iter == value_id_to_node_.end()) {
node_storage_.emplace_back(std::make_unique<Node>());
Node* node = node_storage_.back().get();
node->value_id = value.id();
value_id_to_node_[value.id()] = node;
node_to_value_id_[node].push_back(value.id());
return node;
} else {
CHECK_NE(iter->second, nullptr);
CHECK_EQ(iter->second->value_id, value.id());
return iter->second;
}
}
void PhiGraph::ReplaceNodeWith(PhiGraph::Node* node, PhiGraph::Node* replace) {
CHECK(node->is_phi);
if (node->mark_as_dead) {
return;
}
if (replace->mark_as_dead) {
auto iter = value_id_to_node_.find(replace->value_id);
CHECK(iter != value_id_to_node_.end());
return ReplaceNodeWith(node, iter->second);
}
CHECK(!replace->mark_as_dead);
for (Node* user : node->users) {
absl::c_replace(user->operands, node, replace);
}
for (Node* operand : node->operands) {
absl::c_replace(operand->users, node, replace);
}
for (HloValue::Id value_id : node_to_value_id_[node]) {
CHECK(value_id_to_node_.contains(value_id));
value_id_to_node_[value_id] = replace;
}
absl::c_copy(node_to_value_id_[node],
std::back_inserter(node_to_value_id_[replace]));
node_to_value_id_[node].clear();
node->mark_as_dead = true;
}
void PhiGraph::RegisterPhi(const HloValue& value,
absl::Span<const HloValue* const> inputs) {
Node* node = CreateOrReuseNode(value);
CHECK(value.is_phi());
node->is_phi = true;
node->operands.clear();
for (auto input : inputs) {
CHECK(input != nullptr);
Node* input_node = CreateOrReuseNode(*input);
node->operands.push_back(input_node);
}
}
std::string PhiGraph::ToString() {
std::string out = "PhiGraph: \n";
for (auto& node : node_storage_) {
absl::StrAppend(&out, node->value_id);
if (node->is_phi) {
absl::StrAppend(&out, ", phi");
}
if (node->mark_as_dead) {
absl::StrAppend(&out, ", dead", ":\n");
}
for (Node* input : node->operands) {
absl::StrAppend(&out, " ", input->value_id, "\n");
}
}
return out;
}
void PhiGraph::Optimize() {
VLOG(2) << "Optimizing phi graph:";
XLA_VLOG_LINES(2, ToString());
for (auto& node : node_storage_) {
for (Node* input : node->operands) {
input->users.push_back(node.get());
}
}
bool changed = true;
while (changed) {
changed = false;
absl::flat_hash_set<Node*> checked_for_closure;
for (auto& node : node_storage_) {
if (!node->is_phi) {
continue;
}
if (node->mark_as_dead) {
continue;
}
Node* node_ptr = node.get();
VLOG(2) << "Optimizing: " << node_ptr->value_id;
CHECK_GE(node_ptr->operands.size(), 1);
auto it = absl::c_find(node_ptr->operands, node_ptr);
while (it != node_ptr->operands.end()) {
node_ptr->operands.erase(it);
it = absl::c_find(node_ptr->operands, node_ptr);
}
it = absl::c_find(node_ptr->users, node_ptr);
while (it != node_ptr->users.end()) {
node_ptr->users.erase(it);
it = absl::c_find(node_ptr->users, node_ptr);
}
CHECK_GE(node_ptr->operands.size(), 1);
bool all_inputs_are_same = absl::c_all_of(
node_ptr->operands,
[&](Node* elem) { return elem == node_ptr->operands[0]; });
if (all_inputs_are_same) {
VLOG(1) << "All inputs to node " << node_ptr->value_id
<< " are the same, replacing it with "
<< node_ptr->operands[0]->value_id;
ReplaceNodeWith(node_ptr, node_ptr->operands[0]);
changed = true;
continue;
}
if (checked_for_closure.contains(node_ptr)) {
continue;
}
absl::flat_hash_set<Node*> workset;
std::queue<Node*> worklist;
Node* non_phi = nullptr;
worklist.push(node_ptr);
while (!worklist.empty()) {
Node* todo = worklist.front();
worklist.pop();
if (workset.contains(todo)) {
continue;
}
checked_for_closure.insert(todo);
workset.insert(todo);
for (Node* operand : todo->operands) {
worklist.push(operand);
}
if (!todo->is_phi) {
if (non_phi != nullptr && non_phi != todo) {
non_phi = nullptr;
break;
} else {
non_phi = todo;
}
}
}
if (non_phi != nullptr) {
for (Node* node : workset) {
if (!node->is_phi) {
CHECK_EQ(node, non_phi);
continue;
}
VLOG(1) << "Replace node " << node->value_id
<< " in the closure with node " << non_phi->value_id;
ReplaceNodeWith(node, non_phi);
changed = true;
}
}
}
}
}
} | #include "xla/service/hlo_phi_graph.h"
#include "xla/literal_util.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class PhiGraphTest : public ::testing::Test {
protected:
HloValue NewHloValue(bool is_phi) {
static int64_t id = 0;
return HloValue(id++, dummy_inst_.get(), {}, is_phi);
}
void SetUp() override {
dummy_inst_ = HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0f));
}
std::unique_ptr<HloInstruction> dummy_inst_;
};
TEST_F(PhiGraphTest, SelfReferencingPhi) {
PhiGraph phi_graph;
HloValue A = NewHloValue(false);
HloValue B = NewHloValue(true);
phi_graph.RegisterPhi(B, {&A, &B});
phi_graph.Optimize();
EXPECT_EQ(A.id(), phi_graph.FindOptimizedValue(B.id()));
}
TEST_F(PhiGraphTest, PhiWithSameInputs) {
PhiGraph phi_graph;
HloValue A = NewHloValue(false);
HloValue B = NewHloValue(true);
phi_graph.RegisterPhi(B, {&A, &A});
phi_graph.Optimize();
EXPECT_EQ(A.id(), phi_graph.FindOptimizedValue(B.id()));
}
TEST_F(PhiGraphTest, CircularPhi) {
PhiGraph phi_graph;
HloValue A = NewHloValue(true);
HloValue B = NewHloValue(true);
HloValue C = NewHloValue(true);
HloValue D = NewHloValue(false);
phi_graph.RegisterPhi(A, {&B, &C});
phi_graph.RegisterPhi(B, {&D, &C});
phi_graph.RegisterPhi(C, {&A, &B});
phi_graph.Optimize();
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(A.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(B.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(C.id()));
}
TEST_F(PhiGraphTest, NestedPhiReduction) {
PhiGraph phi_graph;
HloValue A = NewHloValue(true);
HloValue B = NewHloValue(true);
HloValue C = NewHloValue(true);
HloValue D = NewHloValue(false);
HloValue E = NewHloValue(true);
phi_graph.RegisterPhi(A, {&B, &C});
phi_graph.RegisterPhi(B, {&E, &C});
phi_graph.RegisterPhi(C, {&A, &B});
phi_graph.RegisterPhi(E, {&D, &D});
phi_graph.Optimize();
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(A.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(B.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(C.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(E.id()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_phi_graph.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_phi_graph_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1157f66a-b3ed-4eb8-96c5-4f7828a45e7f | cpp | tensorflow/tensorflow | variant_add_n | tensorflow/lite/kernels/variants/list_kernels/variant_add_n.cc | tensorflow/lite/kernels/variants/list_kernels/variant_add_n_test.cc | #include <algorithm>
#include <cstring>
#include <utility>
#include <vector>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/portable_tensor.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_util.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace variants {
namespace ops {
namespace add_n {
namespace {
using ::tflite::variants::TensorArray;
constexpr int kInputTensor1 = 0;
constexpr int kOutputTensor = 0;
struct OpData {
int scratch_tensor_index;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
context->AddTensors(context, 1, &op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, NumInputs(node) >= 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(1);
node->temporaries->data[0] = op_data->scratch_tensor_index;
TfLiteTensor* scratch_tensor;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 0, &scratch_tensor));
scratch_tensor->type = kTfLiteNoType;
scratch_tensor->allocation_type = kTfLiteDynamic;
for (int i = kInputTensor1 + 1; i < NumInputs(node); ++i) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input));
TF_LITE_ENSURE_EQ(context, input->type, kTfLiteVariant);
}
output->type = kTfLiteVariant;
output->allocation_type = kTfLiteVariantObject;
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
CpuBackendContext* cpu_backend_context =
CpuBackendContext::GetFromContext(context);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteTensor* scratch_tensor;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 0, &scratch_tensor));
const TensorArray* const arr =
reinterpret_cast<const TensorArray*>(input1->data.data);
const int num_elements = arr->NumElements();
const TfLiteType t = arr->ElementType();
const int num_inputs = NumInputs(node);
IntArrayUniquePtr merged_shape = BuildTfLiteArray(*arr->ElementShape());
std::vector<const TensorArray*> input_arrs;
input_arrs.reserve(num_inputs);
input_arrs.push_back(arr);
for (int i = 1; i < num_inputs; ++i) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input));
const TensorArray* const arr_i =
reinterpret_cast<const TensorArray*>(input->data.data);
TF_LITE_ENSURE_EQ(context, num_elements, arr_i->NumElements());
TF_LITE_ENSURE_EQ(context, t, arr_i->ElementType());
merged_shape = variants::MergeShapesOrNull(
std::move(merged_shape), BuildTfLiteArray(*arr_i->ElementShape()));
TF_LITE_ENSURE(context, merged_shape != nullptr);
input_arrs.push_back(arr_i);
}
TF_LITE_ENSURE_OK(context, TfLiteTensorVariantRealloc<TensorArray>(
output, t, BuildTfLiteArray(0)));
TensorArray* const output_arr =
reinterpret_cast<TensorArray*>(output->data.data);
output_arr->Resize(num_elements);
for (int i = 0; i < num_elements; ++i) {
TfLiteIntArray* row_shape = nullptr;
std::vector<TfLiteTensor*> row_tensors;
for (const auto* array : input_arrs) {
const TfLiteTensor* at = array->At(i);
if (!at) continue;
if (!row_shape)
row_shape = at->dims;
else
TF_LITE_ENSURE(context, TfLiteIntArrayEqual(row_shape, at->dims));
row_tensors.push_back(const_cast<TfLiteTensor*>(at));
}
if (row_shape == nullptr) {
TF_LITE_ENSURE(context,
variants::IsShapeFullyDefined(*merged_shape.get()));
TensorUniquePtr row_output = BuildTfLiteTensor(
t, BuildTfLiteArray(*merged_shape.get()), kTfLiteDynamic);
memset(row_output->data.data, 0, row_output->bytes);
output_arr->Set(i, std::move(row_output));
continue;
}
TensorUniquePtr row_output =
BuildTfLiteTensor(t, BuildTfLiteArray(*row_shape), kTfLiteDynamic);
if (row_tensors.size() < 2) {
TfLiteTensorCopy(row_tensors[0], row_output.get());
output_arr->Set(i, std::move(row_output));
continue;
}
const int num_inputs_for_row = static_cast<int>(row_tensors.size());
const int thread_count =
std::min(std::max(1, static_cast<int>(num_inputs_for_row) / 2),
cpu_backend_context->max_num_threads());
IntArrayUniquePtr scratch_shape = BuildTfLiteArray(
{thread_count * static_cast<int>(NumElements(row_tensors[0]))});
scratch_tensor->type = t;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, scratch_tensor,
BuildTfLiteArray(*row_shape).release()));
const RuntimeShape row_runtime_shape(row_shape->size, row_shape->data);
if (t == kTfLiteInt32) {
VectorOfTensors<int> tensors(row_tensors);
optimized_ops::AddN<int>(row_runtime_shape, num_inputs, tensors.data(),
GetTensorData<int>(row_output.get()),
GetTensorData<int>(scratch_tensor),
cpu_backend_context);
} else if (t == kTfLiteFloat32) {
VectorOfTensors<float> tensors(row_tensors);
optimized_ops::AddN<float>(row_runtime_shape, num_inputs, tensors.data(),
GetTensorData<float>(row_output.get()),
GetTensorData<float>(scratch_tensor),
cpu_backend_context);
} else {
TF_LITE_KERNEL_LOG(context, "Subtype is not supported for variant addn.");
return kTfLiteError;
}
TF_LITE_ENSURE(context, output_arr->Set(i, std::move(row_output)));
}
return kTfLiteOk;
}
}
}
TfLiteRegistration* Register_VARIANT_ADD_N() {
static TfLiteRegistration r = {add_n::Init, add_n::Free, add_n::Prepare,
add_n::Eval};
return &r;
}
}
}
} | #include <optional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/portable_type_to_tflitetype.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace variants {
namespace ops {
namespace {
using ::testing::AllOf;
class ListAddNModel : public ListOpModel {
public:
explicit ListAddNModel(int num_inputs) {
std::vector<std::vector<int>> input_shapes(num_inputs, std::vector<int>{});
for (int i = 0; i < num_inputs; ++i) {
input_inds_.push_back(AddInput({TensorType_VARIANT, {}}));
}
output_ind_ = AddOutput({TensorType_VARIANT, {}});
SetCustomOp("VariantAddN", {}, Register_VARIANT_ADD_N);
BuildInterpreter(input_shapes);
}
const TensorArray* GetOutput() {
TfLiteTensor* tensor = interpreter_->tensor(output_ind_);
TFLITE_CHECK(tensor != nullptr && tensor->type == kTfLiteVariant &&
tensor->allocation_type == kTfLiteVariantObject);
return static_cast<const TensorArray*>(
static_cast<const VariantData*>(tensor->data.data));
}
int GetIndOfInput(int input) { return input_inds_[input]; }
private:
std::vector<int> input_inds_;
int output_ind_;
};
template <typename T>
class ListAddNTest : public ::testing::Test {};
TYPED_TEST_SUITE_P(ListAddNTest);
TYPED_TEST_P(ListAddNTest, TwoInputs_AllItemsPresent_AllSameShape) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListAddNModel m(2);
for (const int i : {0, 1}) {
const int list_ind = m.GetIndOfInput(i);
m.PopulateListTensor(list_ind, {}, 2, tfl_type);
m.ListSetItem(list_ind, 0, {2, 2}, tfl_type,
std::vector<TypeParam>(4, 1).data());
m.ListSetItem(list_ind, 1, {2, 2}, tfl_type,
std::vector<TypeParam>(4, 1).data());
}
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* const arr = m.GetOutput();
ASSERT_EQ(arr->NumElements(), 2);
ASSERT_EQ(arr->ElementType(), tfl_type);
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith<TypeParam>(2)));
EXPECT_THAT(arr->At(1), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith<TypeParam>(2)));
}
TYPED_TEST_P(ListAddNTest, TwoInputs_AllItemsPresent_ListsContainMixedShapes) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListAddNModel m(2);
for (const int i : {0, 1}) {
const int list_ind = m.GetIndOfInput(i);
m.PopulateListTensor(list_ind, {}, 2, tfl_type);
m.ListSetItem(list_ind, 0, {2, 2}, tfl_type,
std::vector<TypeParam>(4, 1).data());
m.ListSetItem(list_ind, 1, {3, 3}, tfl_type,
std::vector<TypeParam>(9, 1).data());
}
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* const arr = m.GetOutput();
ASSERT_EQ(arr->NumElements(), 2);
ASSERT_EQ(arr->ElementType(), tfl_type);
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith<TypeParam>(2)));
EXPECT_THAT(arr->At(1), AllOf(IsAllocatedAs(tfl_type), DimsAre({3, 3}),
FilledWith<TypeParam>(2)));
}
TYPED_TEST_P(ListAddNTest, TwoInputs_NoItemsPresent_ListShapesMerge) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListAddNModel m(2);
m.PopulateListTensor(m.GetIndOfInput(0), {2, -1}, 1, tfl_type);
m.PopulateListTensor(m.GetIndOfInput(1), {-1, 2}, 1, tfl_type);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* const arr = m.GetOutput();
ASSERT_EQ(arr->NumElements(), 1);
ASSERT_EQ(arr->ElementType(), tfl_type);
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith<TypeParam>(0)));
}
TYPED_TEST_P(ListAddNTest, TwoInputs_NoItemsPresent_ListShapesUndefinedError) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListAddNModel m(2);
m.PopulateListTensor(m.GetIndOfInput(0), {2, -1}, 1, tfl_type);
m.PopulateListTensor(m.GetIndOfInput(1), {-1, -1}, 1, tfl_type);
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TYPED_TEST_P(ListAddNTest, TwoInputs_SomeItemsPresent_UsesElementShape) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListAddNModel m(2);
m.PopulateListTensor(m.GetIndOfInput(0), {}, 1, tfl_type);
m.PopulateListTensor(m.GetIndOfInput(1), {}, 1, tfl_type);
m.ListSetItem(m.GetIndOfInput(0), 0, {3, 3}, tfl_type,
std::vector<TypeParam>(9, 1).data());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* const arr = m.GetOutput();
ASSERT_EQ(arr->NumElements(), 1);
ASSERT_EQ(arr->ElementType(), tfl_type);
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({3, 3}),
FilledWith<TypeParam>(1)));
}
REGISTER_TYPED_TEST_SUITE_P(ListAddNTest,
TwoInputs_AllItemsPresent_AllSameShape,
TwoInputs_AllItemsPresent_ListsContainMixedShapes,
TwoInputs_NoItemsPresent_ListShapesMerge,
TwoInputs_SomeItemsPresent_UsesElementShape,
TwoInputs_NoItemsPresent_ListShapesUndefinedError);
using ValidTypes = ::testing::Types<int, float>;
INSTANTIATE_TYPED_TEST_SUITE_P(ListAddNTests, ListAddNTest, ValidTypes);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/variant_add_n.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/variant_add_n_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3501a4c3-1526-4991-a582-faa2a95b8622 | cpp | tensorflow/tensorflow | perception_ops_utils | tensorflow/compiler/mlir/lite/utils/perception_ops_utils.cc | tensorflow/compiler/mlir/lite/utils/perception_ops_utils_test.cc | #include "tensorflow/compiler/mlir/lite/utils/perception_ops_utils.h"
#include <string>
#include "flatbuffers/base.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/core/c/builtin_op_data.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
namespace mlir {
namespace TFL {
namespace {
constexpr char kTFImplements[] = "tf._implements";
constexpr char kMaxUnpooling[] = "MaxUnpooling2D";
constexpr char kImageWarping[] = "DenseImageWarp";
inline ConstBytesAttr CustomOption(OpBuilder* builder,
const std::string& content) {
return ConstBytesAttr::get(builder->getContext(),
StringRef(content.data(), content.size()));
}
inline LogicalResult HasIntegerArrayWithSize(func::FuncOp* func,
const DictionaryAttr& attrs,
const std::string& attr_name,
int N) {
ArrayAttr array_attr =
mlir::dyn_cast_or_null<ArrayAttr>(attrs.get(attr_name));
if (array_attr == nullptr || array_attr.size() != N) {
return func->emitWarning()
<< "'" << attr_name << "' attribute for " << kMaxUnpooling
<< " must be set and has size of " << N;
}
for (Attribute integer_attr : array_attr.getValue()) {
IntegerAttr value = mlir::dyn_cast<IntegerAttr>(integer_attr);
if (!value) {
return func->emitWarning()
<< "'" << attr_name << "' attribute for " << kMaxUnpooling
<< " does not contain integer values";
}
}
return success();
}
inline LogicalResult GetIntegerArraySafe(
func::FuncOp* func, const DictionaryAttr& attrs,
const std::string& attr_name, llvm::SmallVectorImpl<int32_t>* results,
int N) {
ArrayAttr array_attr =
mlir::dyn_cast_or_null<ArrayAttr>(attrs.get(attr_name));
if (array_attr == nullptr || array_attr.size() != N) {
return func->emitError()
<< "'" << attr_name << "' attribute for " << kMaxUnpooling
<< " must be set and has size of " << N;
}
results->reserve(N);
for (Attribute integer_attr : array_attr.getValue()) {
IntegerAttr value = mlir::dyn_cast<IntegerAttr>(integer_attr);
if (!value) {
return func->emitError()
<< "'" << attr_name << "' attribute for " << kMaxUnpooling
<< " does not contain integer values";
}
results->push_back(value.getInt());
}
return success();
}
}
LogicalResult ConvertMaxUnpoolingFunc::RewriteFunc() {
func_.eraseBody();
func_.addEntryBlock();
func_->setAttr(kTFImplements,
StringAttr::get(func_.getContext(), kMaxUnpooling));
OpBuilder builder(func_.getBody());
std::string custom_option_buffer;
if (failed(CreateCustomOptions(custom_option_buffer))) {
return failure();
}
auto op = builder.create<CustomOp>(
func_.getLoc(), func_.getFunctionType().getResults(),
func_.getArguments(), kMaxUnpooling,
CustomOption(&builder, custom_option_buffer));
builder.create<func::ReturnOp>(func_.getLoc(), op.getResults());
return success();
}
LogicalResult ConvertMaxUnpoolingFunc::VerifySignature() {
if (func_.getNumArguments() != 2) {
return func_.emitWarning()
<< "Invalid number of arguments to " << kMaxUnpooling << ": "
<< func_.getNumArguments();
}
if (func_.getFunctionType().getNumResults() != 1) {
return func_.emitWarning()
<< "Invalid number of results from " << kMaxUnpooling << ": "
<< func_.getFunctionType().getNumResults();
}
auto attrs = attr_.getAttrs();
if (failed(HasIntegerArrayWithSize(&func_, attrs, "pool_size", 2))) {
return failure();
}
if (failed(HasIntegerArrayWithSize(&func_, attrs, "strides", 2))) {
return failure();
}
auto padding = mlir::dyn_cast_or_null<StringAttr>(attrs.get("padding"));
if (!padding) {
return func_.emitWarning() << "'padding' attribute for " << kMaxUnpooling
<< " is not set or not a string";
}
if (padding.getValue() != "VALID" && padding.getValue() != "SAME") {
return func_.emitWarning()
<< "Padding for " << kMaxUnpooling << " must be 'SAME' or 'VALID'";
}
return success();
}
LogicalResult ConvertMaxUnpoolingFunc::CreateCustomOptions(
std::string& custom_option_buffer) {
auto attrs = attr_.getAttrs();
TfLitePoolParams pool_params;
llvm::SmallVector<int32_t, 2> pool_size;
if (failed(GetIntegerArraySafe(&func_, attrs, "pool_size", &pool_size, 2))) {
return failure();
}
pool_params.filter_height = pool_size[0];
pool_params.filter_width = pool_size[1];
llvm::SmallVector<int32_t, 2> strides;
if (failed(GetIntegerArraySafe(&func_, attrs, "strides", &strides, 2))) {
return failure();
}
pool_params.stride_height = strides[0];
pool_params.stride_width = strides[1];
auto padding = mlir::dyn_cast_or_null<StringAttr>(attrs.get("padding"));
if (!padding) {
return func_.emitError() << "'padding' attribute for " << kMaxUnpooling
<< " is not set or not a string";
}
if (padding.getValue() == "VALID") {
pool_params.padding = kTfLitePaddingValid;
} else if (padding.getValue() == "SAME") {
pool_params.padding = kTfLitePaddingSame;
} else {
return func_.emitError()
<< "Padding for " << kMaxUnpooling << " must be 'SAME' or 'VALID'";
}
pool_params.activation = kTfLiteActNone;
pool_params.computed.padding = TfLitePaddingValues{0, 0, 0, 0};
#if FLATBUFFERS_LITTLEENDIAN == 0
int32_t* p = reinterpret_cast<int32_t*>(&pool_params);
for (size_t i = 0; i < sizeof(TfLitePoolParams) / 4; i++, p++)
*p = flatbuffers::EndianSwap(*p);
#endif
custom_option_buffer.assign(reinterpret_cast<char*>(&pool_params),
sizeof(TfLitePoolParams));
return success();
}
LogicalResult ConvertDenseImageWarpFunc::RewriteFunc() {
func_.eraseBody();
func_.addEntryBlock();
func_->setAttr(kTFImplements,
StringAttr::get(func_.getContext(), kImageWarping));
OpBuilder builder(func_.getBody());
auto op = builder.create<CustomOp>(func_.getLoc(),
func_.getFunctionType().getResults(),
func_.getArguments(), kImageWarping,
CustomOption(&builder, ""));
builder.create<func::ReturnOp>(func_.getLoc(), op.getResults());
return success();
}
LogicalResult ConvertDenseImageWarpFunc::VerifySignature() {
if (func_.getNumArguments() != 2) {
return func_.emitWarning()
<< "Invalid number of arguments to " << kImageWarping << ": "
<< func_.getNumArguments();
}
if (func_.getFunctionType().getNumResults() != 1) {
return func_.emitWarning()
<< "Invalid number of results from " << kImageWarping << ": "
<< func_.getFunctionType().getNumResults();
}
auto image_type = mlir::dyn_cast_or_null<RankedTensorType>(
func_.getFunctionType().getInput(0));
if (!image_type || !image_type.getElementType().isF32() ||
image_type.getRank() != 4) {
return func_.emitWarning() << "Image should be a 4D float tensor";
}
auto flow_type = mlir::dyn_cast_or_null<RankedTensorType>(
func_.getFunctionType().getInput(1));
if (!flow_type || !flow_type.getElementType().isF32() ||
flow_type.getRank() != 4) {
return func_.emitWarning() << "Flow should be a 4D float tensor";
}
auto output_type = mlir::dyn_cast_or_null<RankedTensorType>(
func_.getFunctionType().getResult(0));
if (!output_type || !output_type.getElementType().isF32() ||
output_type.getRank() != 4) {
return func_.emitWarning() << "Output should be a 4D float tensor";
}
return success();
}
}
} | #include "tensorflow/compiler/mlir/lite/utils/perception_ops_utils.h"
#include <memory>
#include <string>
#include <vector>
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_attributes.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace TFL {
namespace {
template <int NInput, int NOutput>
func::FuncOp createMaxUnpoolingFunc(
mlir::Builder* builder, const SmallVector<mlir::Type, NInput>& input_types,
const SmallVector<mlir::Type, NOutput>& output_types) {
auto func_type = builder->getFunctionType(input_types, output_types);
auto func = func::FuncOp::create(
mlir::NameLoc::get(builder->getStringAttr("fused_func")), "fused_func",
func_type, {});
func.addEntryBlock();
mlir::StringAttr attr_value = builder->getStringAttr("MaxUnpooling2D");
func->setAttr("tf._implements", attr_value);
return func;
}
func::FuncOp createMaxUnpoolingFunc(
mlir::Builder* builder, const SmallVector<int64_t, 4>& input_shape,
const SmallVector<int64_t, 4>& output_shape) {
auto input_type = RankedTensorType::get(input_shape, builder->getF32Type());
auto indices_type = RankedTensorType::get(input_shape, builder->getI64Type());
auto output_type = RankedTensorType::get(output_shape, builder->getF32Type());
SmallVector<mlir::Type, 2> input_types{input_type, indices_type};
SmallVector<mlir::Type, 1> output_types{output_type};
return createMaxUnpoolingFunc<2, 1>(builder, input_types, output_types);
}
template <int N>
ArrayAttr createInt32Array(mlir::Builder* builder, mlir::MLIRContext* context,
const SmallVector<int32_t, N>& values) {
SmallVector<Attribute, N> ret;
for (int32_t value : values) {
ret.push_back(builder->getI32IntegerAttr(value));
}
return ArrayAttr::get(context, ret);
}
template <int N>
ArrayAttr createInt64Array(mlir::Builder* builder, mlir::MLIRContext* context,
const SmallVector<int64_t, N>& values) {
SmallVector<Attribute, N> ret;
for (int64_t value : values) {
ret.push_back(builder->getI64IntegerAttr(value));
}
return ArrayAttr::get(context, ret);
}
mlir::TF::FuncAttr createMaxUnpoolingAttr(mlir::MLIRContext* context,
const std::string& padding,
const ArrayAttr& pool_size,
const ArrayAttr& strides) {
SmallVector<::mlir::NamedAttribute, 3> fields;
auto padding_id = ::mlir::StringAttr::get(context, "padding");
fields.emplace_back(padding_id, StringAttr::get(context, padding));
auto pool_size_id = ::mlir::StringAttr::get(context, "pool_size");
fields.emplace_back(pool_size_id, pool_size);
auto strides_id = ::mlir::StringAttr::get(context, "strides");
fields.emplace_back(strides_id, strides);
DictionaryAttr dict = DictionaryAttr::get(context, fields);
return TF::FuncAttr::get(context, "MaxUnpooling2D", dict);
}
}
class PerceptionUtilsTest : public ::testing::Test {
protected:
PerceptionUtilsTest() {}
void SetUp() override {
context_ = std::make_unique<mlir::MLIRContext>();
context_->loadDialect<mlir::arith::ArithDialect, mlir::func::FuncDialect,
mlir::TF::TensorFlowDialect, TensorFlowLiteDialect>();
builder_ = std::make_unique<mlir::Builder>(context_.get());
fused_max_unpooling_func_ =
createMaxUnpoolingFunc(builder_.get(), {2, 4, 4, 2}, {2, 2, 2, 2});
func_attr_ = createMaxUnpoolingAttr(
context_.get(), "SAME",
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}),
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}));
}
void TearDown() override {
fused_max_unpooling_func_.erase();
builder_.reset();
}
func::FuncOp fused_max_unpooling_func_;
mlir::TF::FuncAttr func_attr_;
std::unique_ptr<mlir::MLIRContext> context_;
std::unique_ptr<mlir::Builder> builder_;
};
TEST_F(PerceptionUtilsTest, VerifySignatureValid) {
mlir::TFL::ConvertMaxUnpoolingFunc convert(fused_max_unpooling_func_,
func_attr_);
EXPECT_FALSE(failed(convert.VerifySignature()));
}
TEST_F(PerceptionUtilsTest, VerifySignatureInvalid) {
auto input_type = RankedTensorType::get({1, 2, 2, 1}, builder_->getF32Type());
auto output_type =
RankedTensorType::get({1, 2, 1, 1}, builder_->getF32Type());
SmallVector<mlir::Type, 1> input_types{input_type};
SmallVector<mlir::Type, 1> output_types{output_type};
auto max_unpooling_func =
createMaxUnpoolingFunc<1, 1>(builder_.get(), input_types, output_types);
mlir::TFL::ConvertMaxUnpoolingFunc convert(max_unpooling_func, func_attr_);
EXPECT_TRUE(failed(convert.VerifySignature()));
max_unpooling_func->erase();
}
TEST_F(PerceptionUtilsTest, RewriteValid) {
mlir::TFL::ConvertMaxUnpoolingFunc convert(fused_max_unpooling_func_,
func_attr_);
EXPECT_FALSE(failed(convert.RewriteFunc()));
}
TEST_F(PerceptionUtilsTest, RewriteWrongPadding) {
auto func_attr = createMaxUnpoolingAttr(
context_.get(), "INVALID",
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}),
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}));
mlir::TFL::ConvertMaxUnpoolingFunc convert(fused_max_unpooling_func_,
func_attr);
EXPECT_TRUE(failed(convert.RewriteFunc()));
}
TEST_F(PerceptionUtilsTest, RewriteWrongFilter) {
auto func_attr = createMaxUnpoolingAttr(
context_.get(), "VALID",
createInt32Array<2>(builder_.get(), context_.get(), {2, 2, 2}),
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}));
mlir::TFL::ConvertMaxUnpoolingFunc convert(fused_max_unpooling_func_,
func_attr);
EXPECT_TRUE(failed(convert.RewriteFunc()));
}
TEST_F(PerceptionUtilsTest, RewriteWrongStrides) {
auto func_attr = createMaxUnpoolingAttr(
context_.get(), "VALID",
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}),
createInt32Array<2>(builder_.get(), context_.get(), {2, 2, 0}));
mlir::TFL::ConvertMaxUnpoolingFunc convert(fused_max_unpooling_func_,
func_attr);
EXPECT_TRUE(failed(convert.RewriteFunc()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/utils/perception_ops_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/utils/perception_ops_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4f642d61-6d96-4e9c-995b-78264081631f | cpp | tensorflow/tensorflow | crc32c | third_party/xla/xla/tsl/lib/hash/crc32c.cc | third_party/xla/xla/tsl/lib/hash/crc32c_test.cc | #include "xla/tsl/lib/hash/crc32c.h"
#include <stdint.h>
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace crc32c {
#if defined(TF_CORD_SUPPORT)
uint32 Extend(uint32 crc, const absl::Cord &cord) {
for (absl::string_view fragment : cord.Chunks()) {
crc = Extend(crc, fragment.data(), fragment.size());
}
return crc;
}
#endif
}
} | #include "xla/tsl/lib/hash/crc32c.h"
#include <string>
#include "absl/strings/cord.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace crc32c {
TEST(CRC, StandardResults) {
char buf[32];
memset(buf, 0, sizeof(buf));
ASSERT_EQ(0x8a9136aa, Value(buf, sizeof(buf)));
memset(buf, 0xff, sizeof(buf));
ASSERT_EQ(0x62a8ab43, Value(buf, sizeof(buf)));
for (int i = 0; i < 32; i++) {
buf[i] = i;
}
ASSERT_EQ(0x46dd794e, Value(buf, sizeof(buf)));
for (int i = 0; i < 32; i++) {
buf[i] = 31 - i;
}
ASSERT_EQ(0x113fdb5c, Value(buf, sizeof(buf)));
unsigned char data[48] = {
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
ASSERT_EQ(0xd9963a56, Value(reinterpret_cast<char*>(data), sizeof(data)));
ASSERT_EQ(0xdd1b19be, Value(reinterpret_cast<char*>(data), sizeof(data) - 7));
ASSERT_EQ(0x4930c4b1,
Value(reinterpret_cast<char*>(data) + 1, sizeof(data) - 4));
}
TEST(CRC, Values) { ASSERT_NE(Value("a", 1), Value("foo", 3)); }
TEST(CRC, Extend) {
ASSERT_EQ(Value("hello world", 11), Extend(Value("hello ", 6), "world", 5));
}
TEST(CRC, Mask) {
uint32 crc = Value("foo", 3);
ASSERT_NE(crc, Mask(crc));
ASSERT_NE(crc, Mask(Mask(crc)));
ASSERT_EQ(crc, Unmask(Mask(crc)));
ASSERT_EQ(crc, Unmask(Unmask(Mask(Mask(crc)))));
}
#if defined(PLATFORM_GOOGLE)
TEST(CRC, ValuesWithCord) {
ASSERT_NE(Value(absl::Cord("a")), Value(absl::Cord("foo")));
}
TEST(CRC, ExtendWithCord) {
ASSERT_EQ(Value(absl::Cord("hello world")),
Extend(Value(absl::Cord("hello ")), absl::Cord("world")));
}
#endif
static void BM_CRC(::testing::benchmark::State& state) {
int len = state.range(0);
std::string input(len, 'x');
uint32 h = 0;
for (auto s : state) {
h = Extend(h, input.data() + 1, len - 1);
}
state.SetBytesProcessed(state.iterations() * len);
VLOG(1) << h;
}
BENCHMARK(BM_CRC)->Range(1, 256 * 1024);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/hash/crc32c.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/hash/crc32c_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
84a69cff-dda7-4779-a36a-8b037048acc0 | cpp | google/tensorstore | murmurhash3 | tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3.cc | tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3_test.cc | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3.h"
#include <cstdint>
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
namespace {
constexpr uint32_t MurmurHash3_x86_128Mix(uint32_t h) {
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
constexpr uint32_t RotateLeft(uint32_t x, int r) {
return (x << r) | (x >> (32 - r));
}
}
void MurmurHash3_x86_128Hash64Bits(uint64_t input, uint32_t h[4]) {
uint64_t h1 = h[0], h2 = h[1], h3 = h[2], h4 = h[3];
const uint32_t c1 = 0x239b961b;
const uint32_t c2 = 0xab0e9789;
const uint32_t c3 = 0x38b34ae5;
const uint32_t low = static_cast<uint32_t>(input);
const uint32_t high = input >> 32;
uint32_t k2 = high * c2;
k2 = RotateLeft(k2, 16);
k2 *= c3;
h2 ^= k2;
uint32_t k1 = low * c1;
k1 = RotateLeft(k1, 15);
k1 *= c2;
h1 ^= k1;
const uint32_t len = 8;
h1 ^= len;
h2 ^= len;
h3 ^= len;
h4 ^= len;
h1 += h2;
h1 += h3;
h1 += h4;
h2 += h1;
h3 += h1;
h4 += h1;
h1 = MurmurHash3_x86_128Mix(h1);
h2 = MurmurHash3_x86_128Mix(h2);
h3 = MurmurHash3_x86_128Mix(h3);
h4 = MurmurHash3_x86_128Mix(h4);
h1 += h2;
h1 += h3;
h1 += h4;
h2 += h1;
h3 += h1;
h4 += h1;
h[0] = h1;
h[1] = h2;
h[2] = h3;
h[3] = h4;
}
}
} | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3.h"
#include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::neuroglancer_uint64_sharded::MurmurHash3_x86_128Hash64Bits;
TEST(MurmurHash3Test, Basic) {
uint32_t h[4];
h[0] = h[1] = h[2] = h[3] = 0;
MurmurHash3_x86_128Hash64Bits(0, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x00000000e028ae41, 0x000000004772b084,
0x000000004772b084, 0x000000004772b084));
h[0] = h[1] = h[2] = h[3] = 1;
MurmurHash3_x86_128Hash64Bits(0, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x000000005ad58a7e, 0x0000000054337108,
0x0000000054337108, 0x0000000054337108));
h[0] = h[1] = h[2] = h[3] = 2;
MurmurHash3_x86_128Hash64Bits(0, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x0000000064010da2, 0x0000000062e8bc17,
0x0000000062e8bc17, 0x0000000062e8bc17));
h[0] = h[1] = h[2] = h[3] = 0;
MurmurHash3_x86_128Hash64Bits(1, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x0000000016d4ce9a, 0x00000000e8bd67d6,
0x00000000e8bd67d6, 0x00000000e8bd67d6));
h[0] = h[1] = h[2] = h[3] = 1;
MurmurHash3_x86_128Hash64Bits(1, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x000000004b7ab8c6, 0x00000000eb555955,
0x00000000eb555955, 0x00000000eb555955));
h[0] = h[1] = h[2] = h[3] = 2;
MurmurHash3_x86_128Hash64Bits(1, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x00000000eb2301be, 0x0000000048e12494,
0x0000000048e12494, 0x0000000048e12494));
h[0] = h[1] = h[2] = h[3] = 0;
MurmurHash3_x86_128Hash64Bits(42, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x000000005119f47a, 0x00000000c20b94f9,
0x00000000c20b94f9, 0x00000000c20b94f9));
h[0] = h[1] = h[2] = h[3] = 1;
MurmurHash3_x86_128Hash64Bits(42, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x00000000d6b51bca, 0x00000000a25ad86b,
0x00000000a25ad86b, 0x00000000a25ad86b));
h[0] = h[1] = h[2] = h[3] = 2;
MurmurHash3_x86_128Hash64Bits(42, h);
EXPECT_THAT(h,
::testing::ElementsAre(0x000000002d83d9c7, 0x00000000082115eb,
0x00000000082115eb, 0x00000000082115eb));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f0a397fc-dd5d-4fcd-9a26-2967c72f8146 | cpp | tensorflow/tensorflow | map_and_batch_fusion | tensorflow/core/grappler/optimizers/data/map_and_batch_fusion.cc | tensorflow/core/grappler/optimizers/data/map_and_batch_fusion_test.cc | #include "tensorflow/core/grappler/optimizers/data/map_and_batch_fusion.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kFusedOpName[] = "MapAndBatchDataset";
constexpr char kParallelMap[] = "ParallelMapDataset";
constexpr char kParallelMapV2[] = "ParallelMapDatasetV2";
bool IsParallelMap(const NodeDef& node) {
return node.op() == kParallelMap || node.op() == kParallelMapV2;
}
NodeDef MakeMapAndBatchNode(const NodeDef& map_node, const NodeDef& batch_node,
MutableGraphView* graph) {
NodeDef new_node;
new_node.set_op(kFusedOpName);
graph_utils::SetUniqueGraphNodeName(kFusedOpName, graph->graph(), &new_node);
new_node.add_input(map_node.input(0));
int num_other_args;
if (IsParallelMap(map_node)) {
num_other_args = map_node.input_size() - 2;
} else {
num_other_args = map_node.input_size() - 1;
}
for (int i = 0; i < num_other_args; i++) {
new_node.add_input(map_node.input(i + 1));
}
new_node.add_input(batch_node.input(1));
if (map_node.op() == kParallelMap) {
NodeDef* v = graph->GetNode(map_node.input(map_node.input_size() - 1));
NodeDef* tmp = graph_utils::AddScalarConstNode<int64_t>(
v->attr().at("value").tensor().int_val(0), graph);
new_node.add_input(tmp->name());
} else if (map_node.op() == kParallelMapV2) {
new_node.add_input(map_node.input(map_node.input_size() - 1));
} else {
NodeDef* tmp = graph_utils::AddScalarConstNode<int64_t>(1, graph);
new_node.add_input(tmp->name());
}
if (batch_node.op() == "BatchDatasetV2") {
new_node.add_input(batch_node.input(2));
} else {
NodeDef* tmp = graph_utils::AddScalarConstNode<bool>(false, graph);
new_node.add_input(tmp->name());
}
for (auto key : {"f", "Targuments"}) {
graph_utils::CopyAttribute(key, map_node, &new_node);
}
graph_utils::CopyShapesAndTypesAttrs(batch_node, &new_node);
for (auto key : {"preserve_cardinality"}) {
if (gtl::FindOrNull(map_node.attr(), key)) {
graph_utils::CopyAttribute(key, map_node, &new_node);
}
}
graph_utils::MaybeSetFusedMetadata(map_node, batch_node, &new_node);
return new_node;
}
}
Status MapAndBatchFusion::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
for (const NodeDef& node : item.graph.node()) {
if (node.op() != "BatchDataset" && node.op() != "BatchDatasetV2") {
continue;
}
const NodeDef& batch_node = node;
NodeDef* node2 = graph_utils::GetInputNode(batch_node, graph);
if (node2->op() != "MapDataset" && !IsParallelMap(*node2)) {
continue;
}
if (node2->attr().find("use_unbounded_threadpool") != node2->attr().end() &&
node2->attr().at("use_unbounded_threadpool").b()) {
continue;
}
NodeDef* map_node = node2;
auto* new_node =
graph.AddNode(MakeMapAndBatchNode(*map_node, batch_node, &graph));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(batch_node.name(), new_node->name()));
nodes_to_delete.insert(map_node->name());
nodes_to_delete.insert(batch_node.name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(MapAndBatchFusion, "map_and_batch_fusion");
}
} | #include "tensorflow/core/grappler/optimizers/data/map_and_batch_fusion.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
TEST(MapAndBatchFusionTest, FuseMapAndBatchNodesIntoOne) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
range_attrs, &graph);
NodeDef *captured_input_node =
graph_utils::AddScalarConstNode<StringPiece>("hello", &graph);
NodeDef *map_node;
{
std::vector<string> map_inputs(2);
map_inputs[0] = range_node->name();
map_inputs[1] = captured_input_node->name();
std::vector<std::pair<string, AttrValue>> map_attrs(2);
AttrValue f_attr;
SetAttrValue("f", &f_attr);
map_attrs[0] = std::make_pair("f", f_attr);
AttrValue args_attr;
SetAttrValue("Targuments", &args_attr);
map_attrs[1] = std::make_pair("Targuments", args_attr);
map_node =
graph_utils::AddNode("", "MapDataset", map_inputs, map_attrs, &graph);
}
NodeDef *batch_size_node =
graph_utils::AddScalarConstNode<int64_t>(5, &graph);
NodeDef *batch_node;
{
std::vector<string> batch_inputs(2);
batch_inputs[0] = map_node->name();
batch_inputs[1] = batch_size_node->name();
std::vector<std::pair<string, AttrValue>> batch_attrs(2);
AttrValue shapes_attr;
SetAttrValue("output_shapes", &shapes_attr);
batch_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue("output_types", &types_attr);
batch_attrs[1] = std::make_pair("output_types", types_attr);
batch_node = graph_utils::AddNode("", "BatchDataset", batch_inputs,
batch_attrs, &graph);
}
MapAndBatchFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(map_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(batch_node->name(), output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapAndBatchDataset", output));
NodeDef map_and_batch_node = output.node(
graph_utils::FindGraphNodeWithOp("MapAndBatchDataset", output));
EXPECT_EQ(map_and_batch_node.input_size(), 5);
EXPECT_EQ(map_and_batch_node.input(0), map_node->input(0));
EXPECT_EQ(map_and_batch_node.input(1), map_node->input(1));
EXPECT_EQ(map_and_batch_node.input(2), batch_node->input(1));
NodeDef num_parallel_calls_node = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(3), output));
EXPECT_EQ(num_parallel_calls_node.attr().at("value").tensor().int64_val(0),
1);
NodeDef drop_remainder_node = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(4), output));
EXPECT_EQ(drop_remainder_node.attr().at("value").tensor().bool_val(0), false);
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("f"),
map_node->attr().at("f")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("Targuments"),
map_node->attr().at("Targuments")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_shapes"),
batch_node->attr().at("output_shapes")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_types"),
batch_node->attr().at("output_types")));
}
TEST(MapAndBatchFusionTest, FuseMapAndBatchV2NodesIntoOne) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
range_attrs, &graph);
NodeDef *captured_input_node =
graph_utils::AddScalarConstNode<StringPiece>("hello", &graph);
NodeDef *map_node;
{
std::vector<string> map_inputs(2);
map_inputs[0] = range_node->name();
map_inputs[1] = captured_input_node->name();
std::vector<std::pair<string, AttrValue>> map_attrs(2);
AttrValue f_attr;
SetAttrValue("f", &f_attr);
map_attrs[0] = std::make_pair("f", f_attr);
AttrValue args_attr;
SetAttrValue("Targuments", &args_attr);
map_attrs[1] = std::make_pair("Targuments", args_attr);
map_node =
graph_utils::AddNode("", "MapDataset", map_inputs, map_attrs, &graph);
}
NodeDef *batch_size_node =
graph_utils::AddScalarConstNode<int64_t>(5, &graph);
NodeDef *drop_remainder_node =
graph_utils::AddScalarConstNode<bool>(true, &graph);
NodeDef *batch_node;
{
std::vector<string> batch_inputs(3);
batch_inputs[0] = map_node->name();
batch_inputs[1] = batch_size_node->name();
batch_inputs[2] = drop_remainder_node->name();
std::vector<std::pair<string, AttrValue>> batch_attrs(2);
AttrValue shapes_attr;
SetAttrValue("output_shapes", &shapes_attr);
batch_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue("output_types", &types_attr);
batch_attrs[1] = std::make_pair("output_types", types_attr);
batch_node = graph_utils::AddNode("", "BatchDatasetV2", batch_inputs,
batch_attrs, &graph);
}
MapAndBatchFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(map_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(batch_node->name(), output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapAndBatchDataset", output));
NodeDef map_and_batch_node = output.node(
graph_utils::FindGraphNodeWithOp("MapAndBatchDataset", output));
EXPECT_EQ(map_and_batch_node.input_size(), 5);
EXPECT_EQ(map_and_batch_node.input(0), map_node->input(0));
EXPECT_EQ(map_and_batch_node.input(1), map_node->input(1));
EXPECT_EQ(map_and_batch_node.input(2), batch_node->input(1));
NodeDef num_parallel_calls_node = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(3), output));
EXPECT_EQ(num_parallel_calls_node.attr().at("value").tensor().int64_val(0),
1);
EXPECT_EQ(map_and_batch_node.input(4), batch_node->input(2));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("f"),
map_node->attr().at("f")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("Targuments"),
map_node->attr().at("Targuments")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_shapes"),
batch_node->attr().at("output_shapes")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_types"),
batch_node->attr().at("output_types")));
}
TEST(MapAndBatchFusionTest, FuseParallelMapAndBatchNodesIntoOne) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
range_attrs, &graph);
NodeDef *captured_input_node =
graph_utils::AddScalarConstNode<StringPiece>("hello", &graph);
NodeDef *num_parallel_calls_node =
graph_utils::AddScalarConstNode<int>(2, &graph);
NodeDef *map_node;
{
std::vector<string> map_inputs(3);
map_inputs[0] = range_node->name();
map_inputs[1] = captured_input_node->name();
map_inputs[2] = num_parallel_calls_node->name();
std::vector<std::pair<string, AttrValue>> map_attrs(2);
AttrValue f_attr;
SetAttrValue("f", &f_attr);
map_attrs[0] = std::make_pair("f", f_attr);
AttrValue args_attr;
SetAttrValue("Targuments", &args_attr);
map_attrs[1] = std::make_pair("Targuments", args_attr);
map_node = graph_utils::AddNode("", "ParallelMapDataset", map_inputs,
map_attrs, &graph);
}
NodeDef *batch_size_node =
graph_utils::AddScalarConstNode<int64_t>(5, &graph);
NodeDef *batch_node;
{
std::vector<string> batch_inputs(2);
batch_inputs[0] = map_node->name();
batch_inputs[1] = batch_size_node->name();
std::vector<std::pair<string, AttrValue>> batch_attrs(2);
AttrValue shapes_attr;
SetAttrValue("output_shapes", &shapes_attr);
batch_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue("output_types", &types_attr);
batch_attrs[1] = std::make_pair("output_types", types_attr);
batch_node = graph_utils::AddNode("", "BatchDataset", batch_inputs,
batch_attrs, &graph);
}
MapAndBatchFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(map_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(batch_node->name(), output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapAndBatchDataset", output));
NodeDef map_and_batch_node = output.node(
graph_utils::FindGraphNodeWithOp("MapAndBatchDataset", output));
EXPECT_EQ(map_and_batch_node.input_size(), 5);
EXPECT_EQ(map_and_batch_node.input(0), map_node->input(0));
EXPECT_EQ(map_and_batch_node.input(1), map_node->input(1));
EXPECT_EQ(map_and_batch_node.input(2), batch_node->input(1));
NodeDef num_parallel_calls_node2 = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(3), output));
EXPECT_EQ(num_parallel_calls_node2.attr().at("value").tensor().int64_val(0),
2);
NodeDef drop_remainder_node = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(4), output));
EXPECT_EQ(drop_remainder_node.attr().at("value").tensor().bool_val(0), false);
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("f"),
map_node->attr().at("f")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("Targuments"),
map_node->attr().at("Targuments")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_shapes"),
batch_node->attr().at("output_shapes")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_types"),
batch_node->attr().at("output_types")));
}
TEST(MapAndBatchFusionTest, FuseParallelMapV2AndBatchNodesIntoOne) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
range_attrs, &graph);
NodeDef *captured_input_node =
graph_utils::AddScalarConstNode<StringPiece>("hello", &graph);
NodeDef *num_parallel_calls_node =
graph_utils::AddScalarConstNode<int64_t>(2, &graph);
NodeDef *map_node;
{
std::vector<string> map_inputs(3);
map_inputs[0] = range_node->name();
map_inputs[1] = captured_input_node->name();
map_inputs[2] = num_parallel_calls_node->name();
std::vector<std::pair<string, AttrValue>> map_attrs(2);
AttrValue f_attr;
SetAttrValue("f", &f_attr);
map_attrs[0] = std::make_pair("f", f_attr);
AttrValue args_attr;
SetAttrValue("Targuments", &args_attr);
map_attrs[1] = std::make_pair("Targuments", args_attr);
map_node = graph_utils::AddNode("", "ParallelMapDatasetV2", map_inputs,
map_attrs, &graph);
}
NodeDef *batch_size_node =
graph_utils::AddScalarConstNode<int64_t>(5, &graph);
NodeDef *batch_node;
{
std::vector<string> batch_inputs(2);
batch_inputs[0] = map_node->name();
batch_inputs[1] = batch_size_node->name();
std::vector<std::pair<string, AttrValue>> batch_attrs(2);
AttrValue shapes_attr;
SetAttrValue("output_shapes", &shapes_attr);
batch_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue("output_types", &types_attr);
batch_attrs[1] = std::make_pair("output_types", types_attr);
batch_node = graph_utils::AddNode("", "BatchDataset", batch_inputs,
batch_attrs, &graph);
}
MapAndBatchFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(map_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(batch_node->name(), output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapAndBatchDataset", output));
NodeDef map_and_batch_node = output.node(
graph_utils::FindGraphNodeWithOp("MapAndBatchDataset", output));
EXPECT_EQ(map_and_batch_node.input_size(), 5);
EXPECT_EQ(map_and_batch_node.input(0), map_node->input(0));
EXPECT_EQ(map_and_batch_node.input(1), map_node->input(1));
EXPECT_EQ(map_and_batch_node.input(2), batch_node->input(1));
NodeDef num_parallel_calls_node2 = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(3), output));
EXPECT_EQ(num_parallel_calls_node2.attr().at("value").tensor().int64_val(0),
2);
NodeDef drop_remainder_node = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(4), output));
EXPECT_EQ(drop_remainder_node.attr().at("value").tensor().bool_val(0), false);
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("f"),
map_node->attr().at("f")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("Targuments"),
map_node->attr().at("Targuments")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_shapes"),
batch_node->attr().at("output_shapes")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_types"),
batch_node->attr().at("output_types")));
}
TEST(MapAndBatchFusionTest, NoChange) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
range_attrs, &graph);
NodeDef *batch_size_node =
graph_utils::AddScalarConstNode<int64_t>(5, &graph);
std::vector<string> batch_inputs(2);
batch_inputs[0] = range_node->name();
batch_inputs[1] = batch_size_node->name();
std::vector<std::pair<string, AttrValue>> batch_attrs(2);
AttrValue shapes_attr;
SetAttrValue("output_shapes", &shapes_attr);
batch_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue("output_types", &types_attr);
batch_attrs[1] = std::make_pair("output_types", types_attr);
graph_utils::AddNode("", "BatchDataset", batch_inputs, batch_attrs, &graph);
MapAndBatchFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::Compare(*graph.graph(), output));
}
TEST(MapAndBatchFusionTest, NoChange_UnboundedThreadpoolParallelMap) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
range_attrs, &graph);
NodeDef *captured_input_node =
graph_utils::AddScalarConstNode<StringPiece>("hello", &graph);
NodeDef *num_parallel_calls_node =
graph_utils::AddScalarConstNode<int>(2, &graph);
NodeDef *map_node;
{
std::vector<string> map_inputs(3);
map_inputs[0] = range_node->name();
map_inputs[1] = captured_input_node->name();
map_inputs[2] = num_parallel_calls_node->name();
std::vector<std::pair<string, AttrValue>> map_attrs(3);
AttrValue f_attr;
SetAttrValue("f", &f_attr);
map_attrs[0] = std::make_pair("f", f_attr);
AttrValue args_attr;
SetAttrValue("Targuments", &args_attr);
map_attrs[1] = std::make_pair("Targuments", args_attr);
AttrValue use_unbounded_threadpool_attr;
SetAttrValue(true, &use_unbounded_threadpool_attr);
map_attrs[2] = std::make_pair("use_unbounded_threadpool",
use_unbounded_threadpool_attr);
map_node = graph_utils::AddNode("", "ParallelMapDataset", map_inputs,
map_attrs, &graph);
}
NodeDef *batch_size_node =
graph_utils::AddScalarConstNode<int64_t>(5, &graph);
NodeDef *batch_node;
{
std::vector<string> batch_inputs(2);
batch_inputs[0] = map_node->name();
batch_inputs[1] = batch_size_node->name();
std::vector<std::pair<string, AttrValue>> batch_attrs(2);
AttrValue shapes_attr;
SetAttrValue("output_shapes", &shapes_attr);
batch_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue("output_types", &types_attr);
batch_attrs[1] = std::make_pair("output_types", types_attr);
batch_node = graph_utils::AddNode("", "BatchDataset", batch_inputs,
batch_attrs, &graph);
}
MapAndBatchFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::Compare(*graph.graph(), output));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/map_and_batch_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/map_and_batch_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
749cf212-4dad-46a0-ab63-46bc108d5073 | cpp | tensorflow/tensorflow | quantized_reshape_op | tensorflow/core/kernels/quantized_reshape_op.cc | tensorflow/core/kernels/quantized_reshape_op_test.cc | #include <vector>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/reshape_op.h"
namespace tensorflow {
class QuantizedReshapeOp : public ReshapeOp {
public:
explicit QuantizedReshapeOp(OpKernelConstruction* c) : ReshapeOp(c) {}
void Compute(OpKernelContext* ctx) override {
ReshapeOp::Compute(ctx);
if (!ctx->status().ok()) {
return;
}
const auto& input_min_float_tensor = ctx->input(2);
const auto& input_min_float_shape = input_min_float_tensor.shape();
OP_REQUIRES(ctx,
TensorShapeUtils::IsScalar(input_min_float_shape) ||
(TensorShapeUtils::IsVector(input_min_float_shape) &&
(input_min_float_shape.dim_size(0) == 1)),
errors::InvalidArgument(
"input_min must be a scalar or a vector of 1 element"));
const float input_min_float = input_min_float_tensor.flat<float>()(0);
const auto& input_max_float_tensor = ctx->input(3);
const auto& input_max_float_shape = input_max_float_tensor.shape();
OP_REQUIRES(ctx,
TensorShapeUtils::IsScalar(input_max_float_shape) ||
(TensorShapeUtils::IsVector(input_max_float_shape) &&
(input_max_float_shape.dim_size(0) == 1)),
errors::InvalidArgument(
"input_max must be a scalar or a vector of 1 element"));
const float input_max_float = input_max_float_tensor.flat<float>()(0);
Tensor* output_min = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_min));
output_min->flat<float>()(0) = input_min_float;
Tensor* output_max = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(2, TensorShape({}), &output_max));
output_max->flat<float>()(0) = input_max_float;
}
};
#define REGISTER_CPU_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("QuantizedReshape") \
.Device(DEVICE_CPU) \
.HostMemory("shape") \
.TypeConstraint<type>("T"), \
QuantizedReshapeOp)
REGISTER_CPU_KERNEL(::tensorflow::quint8);
REGISTER_CPU_KERNEL(::tensorflow::qint32);
#undef REGISTER_CPU_KERNEL
} | #include <functional>
#include <memory>
#include <vector>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
class QuantizedReshapeTest : public OpsTestBase {
protected:
QuantizedReshapeTest() {}
};
TEST_F(QuantizedReshapeTest, Reshape) {
TF_ASSERT_OK(NodeDefBuilder("quantized_reshape", "QuantizedReshape")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
Tensor input(DT_QUINT8, {10, 20});
Tensor expected(DT_QUINT8, {5, 10, 4});
for (int i = 0; i < input.shape().num_elements(); ++i) {
input.flat<quint8>()(i) = quint8(i);
expected.flat<quint8>()(i) = quint8(i);
}
AddInputFromArray<quint8>(input.shape(), input.flat<quint8>());
AddInputFromList<int32>({3}, {5, 10, 4});
AddInputFromArray<float>(TensorShape({1}), {-10});
AddInputFromArray<float>(TensorShape({1}), {20});
TF_ASSERT_OK(RunOpKernel());
EXPECT_EQ(-10, GetOutput(1)->flat<float>()(0));
EXPECT_EQ(20, GetOutput(2)->flat<float>()(0));
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_reshape_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_reshape_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
228440bd-1351-4db5-9e28-9f82e36e1b7b | cpp | google/cel-cpp | struct | extensions/protobuf/internal/struct.cc | extensions/protobuf/internal/struct_test.cc | #include "extensions/protobuf/internal/struct.h"
#include <string>
#include <type_traits>
#include <utility>
#include "google/protobuf/struct.pb.h"
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
#include "absl/functional/overload.h"
#include "absl/log/absl_check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/variant.h"
#include "common/json.h"
#include "extensions/protobuf/internal/is_generated_message.h"
#include "extensions/protobuf/internal/is_message_lite.h"
#include "extensions/protobuf/internal/map_reflection.h"
#include "extensions/protobuf/internal/struct_lite.h"
#include "internal/status_macros.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/map_field.h"
#include "google/protobuf/message.h"
#include "google/protobuf/reflection.h"
namespace cel::extensions::protobuf_internal {
namespace {
template <typename T>
std::enable_if_t<NotMessageLite<T>, google::protobuf::Message*> UpCastMessage(
T* message) {
return message;
}
template <typename T>
std::enable_if_t<IsMessageLite<T>, google::protobuf::Message*> UpCastMessage(T* message);
absl::StatusOr<absl::Nonnull<const google::protobuf::Descriptor*>> GetDescriptor(
const google::protobuf::Message& message) {
const auto* descriptor = message.GetDescriptor();
if (ABSL_PREDICT_FALSE(descriptor == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing descriptor"));
}
return descriptor;
}
absl::StatusOr<absl::Nonnull<const google::protobuf::Reflection*>> GetReflection(
const google::protobuf::Message& message) {
const auto* reflection = message.GetReflection();
if (ABSL_PREDICT_FALSE(reflection == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing reflection"));
}
return reflection;
}
absl::StatusOr<absl::Nonnull<const google::protobuf::FieldDescriptor*>> FindFieldByNumber(
absl::Nonnull<const google::protobuf::Descriptor*> descriptor, int number) {
const auto* field = descriptor->FindFieldByNumber(number);
if (ABSL_PREDICT_FALSE(field == nullptr)) {
return absl::InternalError(
absl::StrCat(descriptor->full_name(),
" missing descriptor for field number: ", number));
}
return field;
}
absl::StatusOr<absl::Nonnull<const google::protobuf::OneofDescriptor*>> FindOneofByName(
absl::Nonnull<const google::protobuf::Descriptor*> descriptor,
absl::string_view name) {
const auto* oneof = descriptor->FindOneofByName(name);
if (ABSL_PREDICT_FALSE(oneof == nullptr)) {
return absl::InternalError(absl::StrCat(
descriptor->full_name(), " missing descriptor for oneof: ", name));
}
return oneof;
}
absl::Status CheckFieldType(absl::Nonnull<const google::protobuf::FieldDescriptor*> field,
google::protobuf::FieldDescriptor::CppType type) {
if (ABSL_PREDICT_FALSE(field->cpp_type() != type)) {
return absl::InternalError(absl::StrCat(
field->full_name(), " has unexpected type: ", field->cpp_type_name()));
}
return absl::OkStatus();
}
absl::Status CheckFieldSingular(
absl::Nonnull<const google::protobuf::FieldDescriptor*> field) {
if (ABSL_PREDICT_FALSE(field->is_repeated() || field->is_map())) {
return absl::InternalError(absl::StrCat(
field->full_name(), " has unexpected cardinality: REPEATED"));
}
return absl::OkStatus();
}
absl::Status CheckFieldRepeated(
absl::Nonnull<const google::protobuf::FieldDescriptor*> field) {
if (ABSL_PREDICT_FALSE(!field->is_repeated() && !field->is_map())) {
return absl::InternalError(absl::StrCat(
field->full_name(), " has unexpected cardinality: SINGULAR"));
}
return absl::OkStatus();
}
absl::Status CheckFieldMap(
absl::Nonnull<const google::protobuf::FieldDescriptor*> field) {
if (ABSL_PREDICT_FALSE(!field->is_map())) {
if (field->is_repeated()) {
return absl::InternalError(
absl::StrCat(field->full_name(),
" has unexpected type: ", field->cpp_type_name()));
} else {
return absl::InternalError(absl::StrCat(
field->full_name(), " has unexpected cardinality: SINGULAR"));
}
}
return absl::OkStatus();
}
absl::Status CheckFieldEnumType(
absl::Nonnull<const google::protobuf::FieldDescriptor*> field,
absl::string_view name) {
CEL_RETURN_IF_ERROR(
CheckFieldType(field, google::protobuf::FieldDescriptor::CPPTYPE_ENUM));
if (ABSL_PREDICT_FALSE(field->enum_type()->full_name() != name)) {
return absl::InternalError(absl::StrCat(
field->full_name(),
" has unexpected type: ", field->enum_type()->full_name()));
}
return absl::OkStatus();
}
absl::Status CheckFieldMessageType(
absl::Nonnull<const google::protobuf::FieldDescriptor*> field,
absl::string_view name) {
CEL_RETURN_IF_ERROR(
CheckFieldType(field, google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE));
if (ABSL_PREDICT_FALSE(field->message_type()->full_name() != name)) {
return absl::InternalError(absl::StrCat(
field->full_name(),
" has unexpected type: ", field->message_type()->full_name()));
}
return absl::OkStatus();
}
}
absl::StatusOr<Json> DynamicValueProtoToJson(const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Value");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(message));
if constexpr (NotMessageLite<google::protobuf::Value>) {
if (IsGeneratedMessage(message)) {
return GeneratedValueProtoToJson(
google::protobuf::DownCastMessage<google::protobuf::Value>(message));
}
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(message));
CEL_ASSIGN_OR_RETURN(const auto* kind_desc, FindOneofByName(desc, "kind"));
const auto* value_desc =
reflection->GetOneofFieldDescriptor(message, kind_desc);
if (value_desc == nullptr) {
return kJsonNull;
}
switch (value_desc->number()) {
case google::protobuf::Value::kNullValueFieldNumber:
CEL_RETURN_IF_ERROR(
CheckFieldEnumType(value_desc, "google.protobuf.NullValue"));
CEL_RETURN_IF_ERROR(CheckFieldSingular(value_desc));
return kJsonNull;
case google::protobuf::Value::kNumberValueFieldNumber:
CEL_RETURN_IF_ERROR(
CheckFieldType(value_desc, google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE));
CEL_RETURN_IF_ERROR(CheckFieldSingular(value_desc));
return reflection->GetDouble(message, value_desc);
case google::protobuf::Value::kStringValueFieldNumber:
CEL_RETURN_IF_ERROR(
CheckFieldType(value_desc, google::protobuf::FieldDescriptor::CPPTYPE_STRING));
CEL_RETURN_IF_ERROR(CheckFieldSingular(value_desc));
return reflection->GetCord(message, value_desc);
case google::protobuf::Value::kBoolValueFieldNumber:
CEL_RETURN_IF_ERROR(
CheckFieldType(value_desc, google::protobuf::FieldDescriptor::CPPTYPE_BOOL));
CEL_RETURN_IF_ERROR(CheckFieldSingular(value_desc));
return reflection->GetBool(message, value_desc);
case google::protobuf::Value::kStructValueFieldNumber:
CEL_RETURN_IF_ERROR(
CheckFieldMessageType(value_desc, "google.protobuf.Struct"));
CEL_RETURN_IF_ERROR(CheckFieldSingular(value_desc));
return DynamicStructProtoToJson(
reflection->GetMessage(message, value_desc));
case google::protobuf::Value::kListValueFieldNumber:
CEL_RETURN_IF_ERROR(
CheckFieldMessageType(value_desc, "google.protobuf.ListValue"));
CEL_RETURN_IF_ERROR(CheckFieldSingular(value_desc));
return DynamicListValueProtoToJson(
reflection->GetMessage(message, value_desc));
default:
return absl::InternalError(
absl::StrCat(value_desc->full_name(),
" has unexpected number: ", value_desc->number()));
}
}
absl::StatusOr<Json> DynamicListValueProtoToJson(
const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.ListValue");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(message));
if constexpr (NotMessageLite<google::protobuf::ListValue>) {
if (IsGeneratedMessage(message)) {
return GeneratedListValueProtoToJson(
google::protobuf::DownCastMessage<google::protobuf::ListValue>(message));
}
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(message));
CEL_ASSIGN_OR_RETURN(
const auto* values_field,
FindFieldByNumber(desc, google::protobuf::ListValue::kValuesFieldNumber));
CEL_RETURN_IF_ERROR(
CheckFieldMessageType(values_field, "google.protobuf.Value"));
CEL_RETURN_IF_ERROR(CheckFieldRepeated(values_field));
const auto& repeated_field_ref =
reflection->GetRepeatedFieldRef<google::protobuf::Message>(message, values_field);
JsonArrayBuilder builder;
builder.reserve(repeated_field_ref.size());
for (const auto& element : repeated_field_ref) {
CEL_ASSIGN_OR_RETURN(auto value, DynamicValueProtoToJson(element));
builder.push_back(std::move(value));
}
return std::move(builder).Build();
}
absl::StatusOr<Json> DynamicStructProtoToJson(const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Struct");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(message));
if constexpr (NotMessageLite<google::protobuf::Struct>) {
if (IsGeneratedMessage(message)) {
return GeneratedStructProtoToJson(
google::protobuf::DownCastMessage<google::protobuf::Struct>(message));
}
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(message));
CEL_ASSIGN_OR_RETURN(
const auto* fields_field,
FindFieldByNumber(desc, google::protobuf::Struct::kFieldsFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldMap(fields_field));
CEL_RETURN_IF_ERROR(CheckFieldType(fields_field->message_type()->map_key(),
google::protobuf::FieldDescriptor::CPPTYPE_STRING));
CEL_RETURN_IF_ERROR(CheckFieldMessageType(
fields_field->message_type()->map_value(), "google.protobuf.Value"));
auto map_begin =
protobuf_internal::MapBegin(*reflection, message, *fields_field);
auto map_end = protobuf_internal::MapEnd(*reflection, message, *fields_field);
JsonObjectBuilder builder;
builder.reserve(
protobuf_internal::MapSize(*reflection, message, *fields_field));
for (; map_begin != map_end; ++map_begin) {
CEL_ASSIGN_OR_RETURN(
auto value,
DynamicValueProtoToJson(map_begin.GetValueRef().GetMessageValue()));
builder.insert_or_assign(absl::Cord(map_begin.GetKey().GetStringValue()),
std::move(value));
}
return std::move(builder).Build();
}
absl::Status DynamicValueProtoFromJson(const Json& json,
google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Value");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(message));
if constexpr (NotMessageLite<google::protobuf::Value>) {
if (IsGeneratedMessage(message)) {
return GeneratedValueProtoFromJson(
json, google::protobuf::DownCastMessage<google::protobuf::Value>(message));
}
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(message));
return absl::visit(
absl::Overload(
[&message, &desc, &reflection](JsonNull) -> absl::Status {
CEL_ASSIGN_OR_RETURN(
const auto* null_value_field,
FindFieldByNumber(
desc, google::protobuf::Value::kNullValueFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldEnumType(
null_value_field, "google.protobuf.NullValue"));
CEL_RETURN_IF_ERROR(CheckFieldSingular(null_value_field));
reflection->SetEnumValue(&message, null_value_field, 0);
return absl::OkStatus();
},
[&message, &desc, &reflection](JsonBool value) -> absl::Status {
CEL_ASSIGN_OR_RETURN(
const auto* bool_value_field,
FindFieldByNumber(
desc, google::protobuf::Value::kBoolValueFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldType(
bool_value_field, google::protobuf::FieldDescriptor::CPPTYPE_BOOL));
CEL_RETURN_IF_ERROR(CheckFieldSingular(bool_value_field));
reflection->SetBool(&message, bool_value_field, value);
return absl::OkStatus();
},
[&message, &desc, &reflection](JsonNumber value) -> absl::Status {
CEL_ASSIGN_OR_RETURN(
const auto* number_value_field,
FindFieldByNumber(
desc, google::protobuf::Value::kNumberValueFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldType(
number_value_field, google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE));
CEL_RETURN_IF_ERROR(CheckFieldSingular(number_value_field));
reflection->SetDouble(&message, number_value_field, value);
return absl::OkStatus();
},
[&message, &desc,
&reflection](const JsonString& value) -> absl::Status {
CEL_ASSIGN_OR_RETURN(
const auto* string_value_field,
FindFieldByNumber(
desc, google::protobuf::Value::kStringValueFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldType(
string_value_field, google::protobuf::FieldDescriptor::CPPTYPE_STRING));
CEL_RETURN_IF_ERROR(CheckFieldSingular(string_value_field));
reflection->SetString(&message, string_value_field,
static_cast<std::string>(value));
return absl::OkStatus();
},
[&message, &desc,
&reflection](const JsonArray& value) -> absl::Status {
CEL_ASSIGN_OR_RETURN(
const auto* list_value_field,
FindFieldByNumber(
desc, google::protobuf::Value::kListValueFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldMessageType(
list_value_field, "google.protobuf.ListValue"));
CEL_RETURN_IF_ERROR(CheckFieldSingular(list_value_field));
return DynamicListValueProtoFromJson(
value, *reflection->MutableMessage(&message, list_value_field));
},
[&message, &desc,
&reflection](const JsonObject& value) -> absl::Status {
CEL_ASSIGN_OR_RETURN(
const auto* struct_value_field,
FindFieldByNumber(
desc, google::protobuf::Value::kStructValueFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldMessageType(
struct_value_field, "google.protobuf.Struct"));
CEL_RETURN_IF_ERROR(CheckFieldSingular(struct_value_field));
return DynamicStructProtoFromJson(
value,
*reflection->MutableMessage(&message, struct_value_field));
}),
json);
}
absl::Status DynamicListValueProtoFromJson(const JsonArray& json,
google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.ListValue");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(message));
if constexpr (NotMessageLite<google::protobuf::ListValue>) {
if (IsGeneratedMessage(message)) {
return GeneratedListValueProtoFromJson(
json, google::protobuf::DownCastMessage<google::protobuf::ListValue>(message));
}
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(message));
CEL_ASSIGN_OR_RETURN(
const auto* values_field,
FindFieldByNumber(desc, google::protobuf::ListValue::kValuesFieldNumber));
CEL_RETURN_IF_ERROR(
CheckFieldMessageType(values_field, "google.protobuf.Value"));
CEL_RETURN_IF_ERROR(CheckFieldRepeated(values_field));
auto repeated_field_ref =
reflection->GetMutableRepeatedFieldRef<google::protobuf::Message>(&message,
values_field);
repeated_field_ref.Clear();
for (const auto& element : json) {
auto scratch = absl::WrapUnique(repeated_field_ref.NewMessage());
CEL_RETURN_IF_ERROR(DynamicValueProtoFromJson(element, *scratch));
repeated_field_ref.Add(*scratch);
}
return absl::OkStatus();
}
absl::Status DynamicStructProtoFromJson(const JsonObject& json,
google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Struct");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(message));
if constexpr (NotMessageLite<google::protobuf::Struct>) {
if (IsGeneratedMessage(message)) {
return GeneratedStructProtoFromJson(
json, google::protobuf::DownCastMessage<google::protobuf::Struct>(message));
}
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(message));
CEL_ASSIGN_OR_RETURN(
const auto* fields_field,
FindFieldByNumber(desc, google::protobuf::Struct::kFieldsFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldMap(fields_field));
CEL_RETURN_IF_ERROR(CheckFieldType(fields_field->message_type()->map_key(),
google::protobuf::FieldDescriptor::CPPTYPE_STRING));
CEL_RETURN_IF_ERROR(CheckFieldMessageType(
fields_field->message_type()->map_value(), "google.protobuf.Value"));
for (const auto& entry : json) {
std::string map_key_string = static_cast<std::string>(entry.first);
google::protobuf::MapKey map_key;
map_key.SetStringValue(map_key_string);
google::protobuf::MapValueRef map_value;
protobuf_internal::InsertOrLookupMapValue(
*reflection, &message, *fields_field, map_key, &map_value);
CEL_RETURN_IF_ERROR(DynamicValueProtoFromJson(
entry.second, *map_value.MutableMessageValue()));
}
return absl::OkStatus();
}
absl::Status DynamicValueProtoSetNullValue(google::protobuf::Message* message) {
ABSL_DCHECK_EQ(message->GetTypeName(), "google.protobuf.Value");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(*message));
if constexpr (NotMessageLite<google::protobuf::Value>) {
if (IsGeneratedMessage(*message)) {
GeneratedValueProtoSetNullValue(
google::protobuf::DownCastMessage<google::protobuf::Value>(message));
return absl::OkStatus();
}
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(*message));
CEL_ASSIGN_OR_RETURN(
const auto* null_value_field,
FindFieldByNumber(desc, google::protobuf::Value::kNullValueFieldNumber));
CEL_RETURN_IF_ERROR(
CheckFieldEnumType(null_value_field, "google.protobuf.NullValue"));
CEL_RETURN_IF_ERROR(CheckFieldSingular(null_value_field));
reflection->SetEnumValue(message, null_value_field, 0);
return absl::OkStatus();
}
absl::Status DynamicValueProtoSetBoolValue(bool value,
google::protobuf::Message* message) {
ABSL_DCHECK_EQ(message->GetTypeName(), "google.protobuf.Value");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(*message));
if constexpr (NotMessageLite<google::protobuf::Value>) {
if (IsGeneratedMessage(*message)) {
GeneratedValueProtoSetBoolValue(
value, google::protobuf::DownCastMessage<google::protobuf::Value>(message));
return absl::OkStatus();
}
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(*message));
CEL_ASSIGN_OR_RETURN(
const auto* bool_value_field,
FindFieldByNumber(desc, google::protobuf::Value::kBoolValueFieldNumber));
CEL_RETURN_IF_ERROR(
CheckFieldType(bool_value_field, google::protobuf::FieldDescriptor::CPPTYPE_BOOL));
CEL_RETURN_IF_ERROR(CheckFieldSingular(bool_value_field));
reflection->SetBool(message, bool_value_field, value);
return absl::OkStatus();
}
absl::Status DynamicValueProtoSetNumberValue(double value,
google::protobuf::Message* message) {
ABSL_DCHECK_EQ(message->GetTypeName(), "google.protobuf.Value");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(*message));
if constexpr (NotMessageLite<google::protobuf::Value>) {
if (IsGeneratedMessage(*message)) {
GeneratedValueProtoSetNumberValue(
value, google::protobuf::DownCastMessage<google::protobuf::Value>(message));
return absl::OkStatus();
}
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(*message));
CEL_ASSIGN_OR_RETURN(
const auto* number_value_field,
FindFieldByNumber(desc,
google::protobuf::Value::kNumberValueFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldType(number_value_field,
google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE));
CEL_RETURN_IF_ERROR(CheckFieldSingular(number_value_field));
reflection->SetDouble(message, number_value_field, value);
return absl::OkStatus();
}
absl::Status DynamicValueProtoSetStringValue(absl::string_view value,
google::protobuf::Message* message) {
ABSL_DCHECK_EQ(message->GetTypeName(), "google.protobuf.Value");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(*message));
if constexpr (NotMessageLite<google::protobuf::Value>) {
if (IsGeneratedMessage(*message)) {
GeneratedValueProtoSetStringValue(
value, google::protobuf::DownCastMessage<google::protobuf::Value>(message));
return absl::OkStatus();
}
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(*message));
CEL_ASSIGN_OR_RETURN(
const auto* string_value_field,
FindFieldByNumber(desc,
google::protobuf::Value::kStringValueFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldType(string_value_field,
google::protobuf::FieldDescriptor::CPPTYPE_STRING));
CEL_RETURN_IF_ERROR(CheckFieldSingular(string_value_field));
reflection->SetString(message, string_value_field,
static_cast<std::string>(value));
return absl::OkStatus();
}
absl::StatusOr<google::protobuf::Message*> DynamicValueProtoMutableListValue(
google::protobuf::Message* message) {
ABSL_DCHECK_EQ(message->GetTypeName(), "google.protobuf.Value");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(*message));
if constexpr (NotMessageLite<google::protobuf::Value> &&
NotMessageLite<google::protobuf::ListValue>) {
if (IsGeneratedMessage(*message)) {
return UpCastMessage(GeneratedValueProtoMutableListValue(
google::protobuf::DownCastMessage<google::protobuf::Value>(message)));
}
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(*message));
CEL_ASSIGN_OR_RETURN(
const auto* list_value_field,
FindFieldByNumber(desc, google::protobuf::Value::kListValueFieldNumber));
CEL_RETURN_IF_ERROR(
CheckFieldMessageType(list_value_field, "google.protobuf.ListValue"));
CEL_RETURN_IF_ERROR(CheckFieldSingular(list_value_field));
return reflection->MutableMessage(message, list_value_field);
}
absl::StatusOr<google::protobuf::Message*> DynamicValueProtoMutableStructValue(
google::protobuf::Message* message) {
ABSL_DCHECK_EQ(message->GetTypeName(), "google.protobuf.Value");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(*message));
if constexpr (NotMessageLite<google::protobuf::Value> &&
NotMessageLite<google::protobuf::Struct>) {
if (IsGeneratedMessage(*message)) {
return UpCastMessage(GeneratedValueProtoMutableStructValue(
google::protobuf::DownCastMessage<google::protobuf::Value>(message)));
}
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(*message));
CEL_ASSIGN_OR_RETURN(
const auto* struct_value_field,
FindFieldByNumber(desc,
google::protobuf::Value::kStructValueFieldNumber));
CEL_RETURN_IF_ERROR(
CheckFieldMessageType(struct_value_field, "google.protobuf.Struct"));
CEL_RETURN_IF_ERROR(CheckFieldSingular(struct_value_field));
return reflection->MutableMessage(message, struct_value_field);
}
absl::StatusOr<google::protobuf::Message*> DynamicListValueProtoAddElement(
google::protobuf::Message* message) {
ABSL_DCHECK_EQ(message->GetTypeName(), "google.protobuf.ListValue");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(*message));
if constexpr (NotMessageLite<google::protobuf::ListValue>) {
if (IsGeneratedMessage(*message)) {
return UpCastMessage(GeneratedListValueProtoAddElement(
google::protobuf::DownCastMessage<google::protobuf::ListValue>(message)));
}
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(*message));
CEL_ASSIGN_OR_RETURN(
const auto* element_field,
FindFieldByNumber(desc, google::protobuf::ListValue::kValuesFieldNumber));
CEL_RETURN_IF_ERROR(
CheckFieldMessageType(element_field, "google.protobuf.Value"));
CEL_RETURN_IF_ERROR(CheckFieldRepeated(element_field));
return reflection->AddMessage(message, element_field);
}
absl::StatusOr<google::protobuf::Message*> DynamicStructValueProtoAddField(
absl::string_view name, google::protobuf::Message* message) {
ABSL_DCHECK_EQ(message->GetTypeName(), "google.protobuf.Struct");
CEL_ASSIGN_OR_RETURN(const auto* desc, GetDescriptor(*message));
if constexpr (NotMessageLite<google::protobuf::Struct>) {
if (IsGeneratedMessage(*message)) {
return UpCastMessage(GeneratedStructValueProtoAddField(
name, google::protobuf::DownCastMessage<google::protobuf::Struct>(message)));
}
}
CEL_ASSIGN_OR_RETURN(const auto* reflection, GetReflection(*message));
CEL_ASSIGN_OR_RETURN(
const auto* map_field,
FindFieldByNumber(desc, google::protobuf::Struct::kFieldsFieldNumber));
CEL_RETURN_IF_ERROR(CheckFieldMap(map_field));
CEL_RETURN_IF_ERROR(CheckFieldType(map_field->message_type()->map_key(),
google::protobuf::FieldDescriptor::CPPTYPE_STRING));
CEL_RETURN_IF_ERROR(CheckFieldMessageType(
map_field->message_type()->map_value(), "google.protobuf.Value"));
std::string key_string = std::string(name);
google::protobuf::MapKey key;
key.SetStringValue(key_string);
google::protobuf::MapValueRef value;
InsertOrLookupMapValue(*reflection, message, *map_field, key, &value);
return value.MutableMessageValue();
}
} | #include "extensions/protobuf/internal/struct.h"
#include <memory>
#include "google/protobuf/struct.pb.h"
#include "google/protobuf/descriptor.pb.h"
#include "absl/log/absl_check.h"
#include "absl/memory/memory.h"
#include "common/json.h"
#include "extensions/protobuf/internal/struct_lite.h"
#include "internal/testing.h"
#include "testutil/util.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/descriptor_database.h"
#include "google/protobuf/dynamic_message.h"
#include "google/protobuf/text_format.h"
namespace cel::extensions::protobuf_internal {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::google::api::expr::testutil::EqualsProto;
using ::testing::IsEmpty;
using ::testing::VariantWith;
template <typename T>
T ParseTextOrDie(absl::string_view text) {
T proto;
ABSL_CHECK(google::protobuf::TextFormat::ParseFromString(text, &proto));
return proto;
}
std::unique_ptr<google::protobuf::Message> ParseTextOrDie(
absl::string_view text, const google::protobuf::Message& prototype) {
auto message = absl::WrapUnique(prototype.New());
ABSL_CHECK(google::protobuf::TextFormat::ParseFromString(text, message.get()));
return message;
}
TEST(Value, Generated) {
google::protobuf::Value proto;
EXPECT_THAT(GeneratedValueProtoToJson(proto),
IsOkAndHolds(VariantWith<JsonNull>(kJsonNull)));
proto.set_null_value(google::protobuf::NULL_VALUE);
EXPECT_THAT(GeneratedValueProtoToJson(proto),
IsOkAndHolds(VariantWith<JsonNull>(kJsonNull)));
proto.Clear();
EXPECT_OK(GeneratedValueProtoFromJson(Json(), proto));
EXPECT_THAT(proto, EqualsProto(ParseTextOrDie<google::protobuf::Value>(
R"pb(null_value: 0)pb")));
proto.set_bool_value(true);
EXPECT_THAT(GeneratedValueProtoToJson(proto),
IsOkAndHolds(VariantWith<JsonBool>(true)));
proto.Clear();
EXPECT_OK(GeneratedValueProtoFromJson(Json(true), proto));
EXPECT_THAT(proto, EqualsProto(ParseTextOrDie<google::protobuf::Value>(
R"pb(bool_value: true)pb")));
proto.set_number_value(1.0);
EXPECT_THAT(GeneratedValueProtoToJson(proto),
IsOkAndHolds(VariantWith<JsonNumber>(1.0)));
proto.Clear();
EXPECT_OK(GeneratedValueProtoFromJson(Json(1.0), proto));
EXPECT_THAT(proto, EqualsProto(ParseTextOrDie<google::protobuf::Value>(
R"pb(number_value: 1.0)pb")));
proto.set_string_value("foo");
EXPECT_THAT(GeneratedValueProtoToJson(proto),
IsOkAndHolds(VariantWith<JsonString>(JsonString("foo"))));
proto.Clear();
EXPECT_OK(GeneratedValueProtoFromJson(Json(JsonString("foo")), proto));
EXPECT_THAT(proto, EqualsProto(ParseTextOrDie<google::protobuf::Value>(
R"pb(string_value: "foo")pb")));
proto.mutable_list_value();
EXPECT_THAT(GeneratedValueProtoToJson(proto),
IsOkAndHolds(VariantWith<JsonArray>(IsEmpty())));
proto.Clear();
EXPECT_OK(GeneratedValueProtoFromJson(Json(JsonArray()), proto));
EXPECT_THAT(proto, EqualsProto(ParseTextOrDie<google::protobuf::Value>(
R"pb(list_value: {})pb")));
proto.mutable_struct_value();
EXPECT_THAT(GeneratedValueProtoToJson(proto),
IsOkAndHolds(VariantWith<JsonObject>(IsEmpty())));
proto.Clear();
EXPECT_OK(GeneratedValueProtoFromJson(Json(JsonObject()), proto));
EXPECT_THAT(proto, EqualsProto(ParseTextOrDie<google::protobuf::Value>(
R"pb(struct_value: {})pb")));
}
TEST(Value, Dynamic) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::Value::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
std::unique_ptr<google::protobuf::Message> proto = absl::WrapUnique(
factory.GetPrototype(pool.FindMessageTypeByName("google.protobuf.Value"))
->New());
const auto* reflection = proto->GetReflection();
const auto* descriptor = proto->GetDescriptor();
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonNull>(kJsonNull)));
reflection->SetEnumValue(proto.get(),
descriptor->FindFieldByName("null_value"), 0);
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonNull>(kJsonNull)));
proto->Clear();
EXPECT_OK(DynamicValueProtoFromJson(Json(), *proto));
EXPECT_THAT(*proto,
EqualsProto(*ParseTextOrDie(R"pb(null_value: 0)pb", *proto)));
reflection->SetBool(proto.get(), descriptor->FindFieldByName("bool_value"),
true);
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonBool>(true)));
proto->Clear();
EXPECT_OK(DynamicValueProtoFromJson(Json(true), *proto));
EXPECT_THAT(*proto,
EqualsProto(*ParseTextOrDie(R"pb(bool_value: true)pb", *proto)));
reflection->SetDouble(proto.get(),
descriptor->FindFieldByName("number_value"), 1.0);
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonNumber>(1.0)));
proto->Clear();
EXPECT_OK(DynamicValueProtoFromJson(Json(1.0), *proto));
EXPECT_THAT(*proto,
EqualsProto(*ParseTextOrDie(R"pb(number_value: 1.0)pb", *proto)));
reflection->SetString(proto.get(),
descriptor->FindFieldByName("string_value"), "foo");
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonString>(JsonString("foo"))));
proto->Clear();
EXPECT_OK(DynamicValueProtoFromJson(Json(JsonString("foo")), *proto));
EXPECT_THAT(*proto, EqualsProto(*ParseTextOrDie(R"pb(string_value: "foo")pb",
*proto)));
reflection->MutableMessage(
proto.get(), descriptor->FindFieldByName("list_value"), &factory);
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonArray>(IsEmpty())));
proto->Clear();
EXPECT_OK(DynamicValueProtoFromJson(Json(JsonArray()), *proto));
EXPECT_THAT(*proto,
EqualsProto(*ParseTextOrDie(R"pb(list_value: {})pb", *proto)));
reflection->MutableMessage(
proto.get(), descriptor->FindFieldByName("struct_value"), &factory);
EXPECT_THAT(DynamicValueProtoToJson(*proto),
IsOkAndHolds(VariantWith<JsonObject>(IsEmpty())));
EXPECT_OK(DynamicValueProtoFromJson(Json(JsonObject()), *proto));
EXPECT_THAT(*proto,
EqualsProto(*ParseTextOrDie(R"pb(struct_value: {})pb", *proto)));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/protobuf/internal/struct.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/protobuf/internal/struct_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
0b50e74e-6281-418c-afaa-2bc9ca84c7fd | cpp | google/quiche | quic_crypto_client_config | quiche/quic/core/crypto/quic_crypto_client_config.cc | quiche/quic/core/crypto/quic_crypto_client_config_test.cc | #include "quiche/quic/core/crypto/quic_crypto_client_config.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/memory/memory.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "openssl/ssl.h"
#include "quiche/quic/core/crypto/cert_compressor.h"
#include "quiche/quic/core/crypto/chacha20_poly1305_encrypter.h"
#include "quiche/quic/core/crypto/crypto_framer.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/crypto/crypto_utils.h"
#include "quiche/quic/core/crypto/curve25519_key_exchange.h"
#include "quiche/quic/core/crypto/key_exchange.h"
#include "quiche/quic/core/crypto/p256_key_exchange.h"
#include "quiche/quic/core/crypto/proof_verifier.h"
#include "quiche/quic/core/crypto/quic_encrypter.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/crypto/tls_client_connection.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_client_stats.h"
#include "quiche/quic/platform/api/quic_hostname_utils.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
namespace {
void RecordInchoateClientHelloReason(
QuicCryptoClientConfig::CachedState::ServerConfigState state) {
QUIC_CLIENT_HISTOGRAM_ENUM(
"QuicInchoateClientHelloReason", state,
QuicCryptoClientConfig::CachedState::SERVER_CONFIG_COUNT, "");
}
void RecordDiskCacheServerConfigState(
QuicCryptoClientConfig::CachedState::ServerConfigState state) {
QUIC_CLIENT_HISTOGRAM_ENUM(
"QuicServerInfo.DiskCacheState", state,
QuicCryptoClientConfig::CachedState::SERVER_CONFIG_COUNT, "");
}
}
QuicCryptoClientConfig::QuicCryptoClientConfig(
std::unique_ptr<ProofVerifier> proof_verifier)
: QuicCryptoClientConfig(std::move(proof_verifier), nullptr) {}
QuicCryptoClientConfig::QuicCryptoClientConfig(
std::unique_ptr<ProofVerifier> proof_verifier,
std::shared_ptr<SessionCache> session_cache)
: proof_verifier_(std::move(proof_verifier)),
session_cache_(std::move(session_cache)),
ssl_ctx_(TlsClientConnection::CreateSslCtx(
!GetQuicFlag(quic_disable_client_tls_zero_rtt))) {
QUICHE_DCHECK(proof_verifier_.get());
SetDefaults();
}
QuicCryptoClientConfig::~QuicCryptoClientConfig() {}
QuicCryptoClientConfig::CachedState::CachedState()
: server_config_valid_(false),
expiration_time_(QuicWallTime::Zero()),
generation_counter_(0) {}
QuicCryptoClientConfig::CachedState::~CachedState() {}
bool QuicCryptoClientConfig::CachedState::IsComplete(QuicWallTime now) const {
if (server_config_.empty()) {
RecordInchoateClientHelloReason(SERVER_CONFIG_EMPTY);
return false;
}
if (!server_config_valid_) {
RecordInchoateClientHelloReason(SERVER_CONFIG_INVALID);
return false;
}
const CryptoHandshakeMessage* scfg = GetServerConfig();
if (!scfg) {
RecordInchoateClientHelloReason(SERVER_CONFIG_CORRUPTED);
QUICHE_DCHECK(false);
return false;
}
if (now.IsBefore(expiration_time_)) {
return true;
}
QUIC_CLIENT_HISTOGRAM_TIMES(
"QuicClientHelloServerConfig.InvalidDuration",
QuicTime::Delta::FromSeconds(now.ToUNIXSeconds() -
expiration_time_.ToUNIXSeconds()),
QuicTime::Delta::FromSeconds(60),
QuicTime::Delta::FromSeconds(20 * 24 * 3600),
50, "");
RecordInchoateClientHelloReason(SERVER_CONFIG_EXPIRED);
return false;
}
bool QuicCryptoClientConfig::CachedState::IsEmpty() const {
return server_config_.empty();
}
const CryptoHandshakeMessage*
QuicCryptoClientConfig::CachedState::GetServerConfig() const {
if (server_config_.empty()) {
return nullptr;
}
if (!scfg_) {
scfg_ = CryptoFramer::ParseMessage(server_config_);
QUICHE_DCHECK(scfg_.get());
}
return scfg_.get();
}
QuicCryptoClientConfig::CachedState::ServerConfigState
QuicCryptoClientConfig::CachedState::SetServerConfig(
absl::string_view server_config, QuicWallTime now, QuicWallTime expiry_time,
std::string* error_details) {
const bool matches_existing = server_config == server_config_;
std::unique_ptr<CryptoHandshakeMessage> new_scfg_storage;
const CryptoHandshakeMessage* new_scfg;
if (!matches_existing) {
new_scfg_storage = CryptoFramer::ParseMessage(server_config);
new_scfg = new_scfg_storage.get();
} else {
new_scfg = GetServerConfig();
}
if (!new_scfg) {
*error_details = "SCFG invalid";
return SERVER_CONFIG_INVALID;
}
if (expiry_time.IsZero()) {
uint64_t expiry_seconds;
if (new_scfg->GetUint64(kEXPY, &expiry_seconds) != QUIC_NO_ERROR) {
*error_details = "SCFG missing EXPY";
return SERVER_CONFIG_INVALID_EXPIRY;
}
expiration_time_ = QuicWallTime::FromUNIXSeconds(expiry_seconds);
} else {
expiration_time_ = expiry_time;
}
if (now.IsAfter(expiration_time_)) {
*error_details = "SCFG has expired";
return SERVER_CONFIG_EXPIRED;
}
if (!matches_existing) {
server_config_ = std::string(server_config);
SetProofInvalid();
scfg_ = std::move(new_scfg_storage);
}
return SERVER_CONFIG_VALID;
}
void QuicCryptoClientConfig::CachedState::InvalidateServerConfig() {
server_config_.clear();
scfg_.reset();
SetProofInvalid();
}
void QuicCryptoClientConfig::CachedState::SetProof(
const std::vector<std::string>& certs, absl::string_view cert_sct,
absl::string_view chlo_hash, absl::string_view signature) {
bool has_changed = signature != server_config_sig_ ||
chlo_hash != chlo_hash_ || certs_.size() != certs.size();
if (!has_changed) {
for (size_t i = 0; i < certs_.size(); i++) {
if (certs_[i] != certs[i]) {
has_changed = true;
break;
}
}
}
if (!has_changed) {
return;
}
SetProofInvalid();
certs_ = certs;
cert_sct_ = std::string(cert_sct);
chlo_hash_ = std::string(chlo_hash);
server_config_sig_ = std::string(signature);
}
void QuicCryptoClientConfig::CachedState::Clear() {
server_config_.clear();
source_address_token_.clear();
certs_.clear();
cert_sct_.clear();
chlo_hash_.clear();
server_config_sig_.clear();
server_config_valid_ = false;
proof_verify_details_.reset();
scfg_.reset();
++generation_counter_;
}
void QuicCryptoClientConfig::CachedState::ClearProof() {
SetProofInvalid();
certs_.clear();
cert_sct_.clear();
chlo_hash_.clear();
server_config_sig_.clear();
}
void QuicCryptoClientConfig::CachedState::SetProofValid() {
server_config_valid_ = true;
}
void QuicCryptoClientConfig::CachedState::SetProofInvalid() {
server_config_valid_ = false;
++generation_counter_;
}
bool QuicCryptoClientConfig::CachedState::Initialize(
absl::string_view server_config, absl::string_view source_address_token,
const std::vector<std::string>& certs, const std::string& cert_sct,
absl::string_view chlo_hash, absl::string_view signature, QuicWallTime now,
QuicWallTime expiration_time) {
QUICHE_DCHECK(server_config_.empty());
if (server_config.empty()) {
RecordDiskCacheServerConfigState(SERVER_CONFIG_EMPTY);
return false;
}
std::string error_details;
ServerConfigState state =
SetServerConfig(server_config, now, expiration_time, &error_details);
RecordDiskCacheServerConfigState(state);
if (state != SERVER_CONFIG_VALID) {
QUIC_DVLOG(1) << "SetServerConfig failed with " << error_details;
return false;
}
chlo_hash_.assign(chlo_hash.data(), chlo_hash.size());
server_config_sig_.assign(signature.data(), signature.size());
source_address_token_.assign(source_address_token.data(),
source_address_token.size());
certs_ = certs;
cert_sct_ = cert_sct;
return true;
}
const std::string& QuicCryptoClientConfig::CachedState::server_config() const {
return server_config_;
}
const std::string& QuicCryptoClientConfig::CachedState::source_address_token()
const {
return source_address_token_;
}
const std::vector<std::string>& QuicCryptoClientConfig::CachedState::certs()
const {
return certs_;
}
const std::string& QuicCryptoClientConfig::CachedState::cert_sct() const {
return cert_sct_;
}
const std::string& QuicCryptoClientConfig::CachedState::chlo_hash() const {
return chlo_hash_;
}
const std::string& QuicCryptoClientConfig::CachedState::signature() const {
return server_config_sig_;
}
bool QuicCryptoClientConfig::CachedState::proof_valid() const {
return server_config_valid_;
}
uint64_t QuicCryptoClientConfig::CachedState::generation_counter() const {
return generation_counter_;
}
const ProofVerifyDetails*
QuicCryptoClientConfig::CachedState::proof_verify_details() const {
return proof_verify_details_.get();
}
void QuicCryptoClientConfig::CachedState::set_source_address_token(
absl::string_view token) {
source_address_token_ = std::string(token);
}
void QuicCryptoClientConfig::CachedState::set_cert_sct(
absl::string_view cert_sct) {
cert_sct_ = std::string(cert_sct);
}
void QuicCryptoClientConfig::CachedState::SetProofVerifyDetails(
ProofVerifyDetails* details) {
proof_verify_details_.reset(details);
}
void QuicCryptoClientConfig::CachedState::InitializeFrom(
const QuicCryptoClientConfig::CachedState& other) {
QUICHE_DCHECK(server_config_.empty());
QUICHE_DCHECK(!server_config_valid_);
server_config_ = other.server_config_;
source_address_token_ = other.source_address_token_;
certs_ = other.certs_;
cert_sct_ = other.cert_sct_;
chlo_hash_ = other.chlo_hash_;
server_config_sig_ = other.server_config_sig_;
server_config_valid_ = other.server_config_valid_;
expiration_time_ = other.expiration_time_;
if (other.proof_verify_details_ != nullptr) {
proof_verify_details_.reset(other.proof_verify_details_->Clone());
}
++generation_counter_;
}
void QuicCryptoClientConfig::SetDefaults() {
kexs = {kC255, kP256};
if (EVP_has_aes_hardware() == 1) {
aead = {kAESG, kCC20};
} else {
aead = {kCC20, kAESG};
}
}
QuicCryptoClientConfig::CachedState* QuicCryptoClientConfig::LookupOrCreate(
const QuicServerId& server_id) {
auto it = cached_states_.find(server_id);
if (it != cached_states_.end()) {
return it->second.get();
}
CachedState* cached = new CachedState;
cached_states_.insert(std::make_pair(server_id, absl::WrapUnique(cached)));
bool cache_populated = PopulateFromCanonicalConfig(server_id, cached);
QUIC_CLIENT_HISTOGRAM_BOOL(
"QuicCryptoClientConfig.PopulatedFromCanonicalConfig", cache_populated,
"");
return cached;
}
void QuicCryptoClientConfig::ClearCachedStates(const ServerIdFilter& filter) {
for (auto it = cached_states_.begin(); it != cached_states_.end(); ++it) {
if (filter.Matches(it->first)) it->second->Clear();
}
}
void QuicCryptoClientConfig::FillInchoateClientHello(
const QuicServerId& server_id, const ParsedQuicVersion preferred_version,
const CachedState* cached, QuicRandom* rand, bool demand_x509_proof,
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters>
out_params,
CryptoHandshakeMessage* out) const {
out->set_tag(kCHLO);
out->set_minimum_size(1);
if (QuicHostnameUtils::IsValidSNI(server_id.host())) {
out->SetStringPiece(kSNI, server_id.host());
}
out->SetVersion(kVER, preferred_version);
if (!user_agent_id_.empty()) {
out->SetStringPiece(kUAID, user_agent_id_);
}
if (!alpn_.empty()) {
out->SetStringPiece(kALPN, alpn_);
}
const CryptoHandshakeMessage* scfg = cached->GetServerConfig();
if (scfg != nullptr) {
absl::string_view scid;
if (scfg->GetStringPiece(kSCID, &scid)) {
out->SetStringPiece(kSCID, scid);
}
}
if (!cached->source_address_token().empty()) {
out->SetStringPiece(kSourceAddressTokenTag, cached->source_address_token());
}
if (!demand_x509_proof) {
return;
}
char proof_nonce[32];
rand->RandBytes(proof_nonce, ABSL_ARRAYSIZE(proof_nonce));
out->SetStringPiece(
kNONP, absl::string_view(proof_nonce, ABSL_ARRAYSIZE(proof_nonce)));
out->SetVector(kPDMD, QuicTagVector{kX509});
out->SetStringPiece(kCertificateSCTTag, "");
const std::vector<std::string>& certs = cached->certs();
out_params->cached_certs = certs;
if (!certs.empty()) {
std::vector<uint64_t> hashes;
hashes.reserve(certs.size());
for (auto i = certs.begin(); i != certs.end(); ++i) {
hashes.push_back(QuicUtils::FNV1a_64_Hash(*i));
}
out->SetVector(kCCRT, hashes);
}
}
QuicErrorCode QuicCryptoClientConfig::FillClientHello(
const QuicServerId& server_id, QuicConnectionId connection_id,
const ParsedQuicVersion preferred_version,
const ParsedQuicVersion actual_version, const CachedState* cached,
QuicWallTime now, QuicRandom* rand,
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters>
out_params,
CryptoHandshakeMessage* out, std::string* error_details) const {
QUICHE_DCHECK(error_details != nullptr);
QUIC_BUG_IF(quic_bug_12943_2,
!QuicUtils::IsConnectionIdValidForVersion(
connection_id, preferred_version.transport_version))
<< "FillClientHello: attempted to use connection ID " << connection_id
<< " which is invalid with version " << preferred_version;
FillInchoateClientHello(server_id, preferred_version, cached, rand,
true, out_params, out);
out->set_minimum_size(1);
const CryptoHandshakeMessage* scfg = cached->GetServerConfig();
if (!scfg) {
*error_details = "Handshake not ready";
return QUIC_CRYPTO_INTERNAL_ERROR;
}
absl::string_view scid;
if (!scfg->GetStringPiece(kSCID, &scid)) {
*error_details = "SCFG missing SCID";
return QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER;
}
out->SetStringPiece(kSCID, scid);
out->SetStringPiece(kCertificateSCTTag, "");
QuicTagVector their_aeads;
QuicTagVector their_key_exchanges;
if (scfg->GetTaglist(kAEAD, &their_aeads) != QUIC_NO_ERROR ||
scfg->GetTaglist(kKEXS, &their_key_exchanges) != QUIC_NO_ERROR) {
*error_details = "Missing AEAD or KEXS";
return QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER;
}
size_t key_exchange_index;
if (!FindMutualQuicTag(aead, their_aeads, &out_params->aead, nullptr) ||
!FindMutualQuicTag(kexs, their_key_exchanges, &out_params->key_exchange,
&key_exchange_index)) {
*error_details = "Unsupported AEAD or KEXS";
return QUIC_CRYPTO_NO_SUPPORT;
}
out->SetVector(kAEAD, QuicTagVector{out_params->aead});
out->SetVector(kKEXS, QuicTagVector{out_params->key_exchange});
absl::string_view public_value;
if (scfg->GetNthValue24(kPUBS, key_exchange_index, &public_value) !=
QUIC_NO_ERROR) {
*error_details = "Missing public value";
return QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER;
}
absl::string_view orbit;
if (!scfg->GetStringPiece(kORBT, &orbit) || orbit.size() != kOrbitSize) {
*error_details = "SCFG missing OBIT";
return QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND;
}
CryptoUtils::GenerateNonce(now, rand, orbit, &out_params->client_nonce);
out->SetStringPiece(kNONC, out_params->client_nonce);
if (!out_params->server_nonce.empty()) {
out->SetStringPiece(kServerNonceTag, out_params->server_nonce);
}
switch (out_params->key_exchange) {
case kC255:
out_params->client_key_exchange = Curve25519KeyExchange::New(
Curve25519KeyExchange::NewPrivateKey(rand));
break;
case kP256:
out_params->client_key_exchange =
P256KeyExchange::New(P256KeyExchange::NewPrivateKey());
break;
default:
QUICHE_DCHECK(false);
*error_details = "Configured to support an unknown key exchange";
return QUIC_CRYPTO_INTERNAL_ERROR;
}
if (!out_params->client_key_exchange->CalculateSharedKeySync(
public_value, &out_params->initial_premaster_secret)) {
*error_details = "Key exchange failure";
return QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER;
}
out->SetStringPiece(kPUBS, out_params->client_key_exchange->public_value());
const std::vector<std::string>& certs = cached->certs();
if (certs.empty()) {
*error_details = "No certs to calculate XLCT";
return QUIC_CRYPTO_INTERNAL_ERROR;
}
out->SetValue(kXLCT, CryptoUtils::ComputeLeafCertHash(certs[0]));
out_params->hkdf_input_suffix.clear();
out_params->hkdf_input_suffix.append(connection_id.data(),
connection_id.length());
const QuicData& client_hello_serialized = out->GetSerialized();
out_params->hkdf_input_suffix.append(client_hello_serialized.data(),
client_hello_serialized.length());
out_params->hkdf_input_suffix.append(cached->server_config());
if (certs.empty()) {
*error_details = "No certs found to include in KDF";
return QUIC_CRYPTO_INTERNAL_ERROR;
}
out_params->hkdf_input_suffix.append(certs[0]);
std::string hkdf_input;
const size_t label_len = strlen(QuicCryptoConfig::kInitialLabel) + 1;
hkdf_input.reserve(label_len + out_params->hkdf_input_suffix.size());
hkdf_input.append(QuicCryptoConfig::kInitialLabel, label_len);
hkdf_input.append(out_params->hkdf_input_suffix);
std::string* subkey_secret = &out_params->initial_subkey_secret;
if (!CryptoUtils::DeriveKeys(
actual_version, out_params->initial_premaster_secret,
out_params->aead, out_params->client_nonce, out_params->server_nonce,
pre_shared_key_, hkdf_input, Perspective::IS_CLIENT,
CryptoUtils::Diversification::Pending(),
&out_params->initial_crypters, subkey_secret)) {
*error_details = "Symmetric key setup failed";
return QUIC_CRYPTO_SYMMETRIC_KEY_SETUP_FAILED;
}
return QUIC_NO_ERROR;
}
QuicErrorCode QuicCryptoClientConfig::CacheNewServerConfig(
const CryptoHandshakeMessage& message, QuicWallTime now,
QuicTransportVersion , absl::string_view chlo_hash,
const std::vector<std::string>& cached_certs, CachedState* cached,
std::string* error_details) {
QUICHE_DCHECK(error_details != nullptr);
absl::string_view scfg;
if (!message.GetStringPiece(kSCFG, &scfg)) {
*error_details = "Missing SCFG";
return QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND;
}
QuicWallTime expiration_time = QuicWallTime::Zero();
uint64_t expiry_seconds;
if (message.GetUint64(kSTTL, &expiry_seconds) == QUIC_NO_ERROR) {
expiration_time = now.Add(QuicTime::Delta::FromSeconds(
std::min(expiry_seconds, kNumSecondsPerWeek)));
}
CachedState::ServerConfigState state =
cached->SetServerConfig(scfg, now, expiration_time, error_details);
if (state == CachedState::SERVER_CONFIG_EXPIRED) {
return QUIC_CRYPTO_SERVER_CONFIG_EXPIRED;
}
if (state != CachedState::SERVER_CONFIG_VALID) {
return QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER;
}
absl::string_view token;
if (message.GetStringPiece(kSourceAddressTokenTag, &token)) {
cached->set_source_address_token(token);
}
absl::string_view proof, cert_bytes, cert_sct;
bool has_proof = message.GetStringPiece(kPROF, &proof);
bool has_cert = message.GetStringPiece(kCertificateTag, &cert_bytes);
if (has_proof && has_cert) {
std::vector<std::string> certs;
if (!CertCompressor::DecompressChain(cert_bytes, cached_certs, &certs)) {
*error_details = "Certificate data invalid";
return QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER;
}
message.GetStringPiece(kCertificateSCTTag, &cert_sct);
cached->SetProof(certs, cert_sct, chlo_hash, proof);
} else {
cached->ClearProof();
if (has_proof && !has_cert) {
*error_details = "Certificate missing";
return QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER;
}
if (!has_proof && has_cert) {
*error_details = "Proof missing";
return QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER;
}
}
return QUIC_NO_ERROR;
}
QuicErrorCode QuicCryptoClientConfig::ProcessRejection(
const CryptoHandshakeMessage& rej, QuicWallTime now,
const QuicTransportVersion version, absl::string_view chlo_hash,
CachedState* cached,
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters>
out_params,
std::string* error_details) {
QUICHE_DCHECK(error_details != nullptr);
if (rej.tag() != kREJ) {
*error_details = "Message is not REJ";
return QUIC_CRYPTO_INTERNAL_ERROR;
}
QuicErrorCode error =
CacheNewServerConfig(rej, now, version, chlo_hash,
out_params->cached_certs, cached, error_details);
if (error != QUIC_NO_ERROR) {
return error;
}
absl::string_view nonce;
if (rej.GetStringPiece(kServerNonceTag, &nonce)) {
out_params->server_nonce = std::string(nonce);
}
return QUIC_NO_ERROR;
}
QuicErrorCode QuicCryptoClientConfig::ProcessServerHello(
const CryptoHandshakeMessage& server_hello,
QuicConnectionId , ParsedQuicVersion version,
const ParsedQuicVersionVector& negotiated_versions, CachedState* cached,
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters>
out_params,
std::string* error_details) {
QUICHE_DCHECK(error_details != nullptr);
QuicErrorCode valid = CryptoUtils::ValidateServerHello(
server_hello, negotiated_versions, error_details);
if (valid != QUIC_NO_ERROR) {
return valid;
}
absl::string_view token;
if (server_hello.GetStringPiece(kSourceAddressTokenTag, &token)) {
cached->set_source_address_token(token);
}
absl::string_view shlo_nonce;
if (!server_hello.GetStringPiece(kServerNonceTag, &shlo_nonce)) {
*error_details = "server hello missing server nonce";
return QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER;
}
absl::string_view public_value;
if (!server_hello.GetStringPiece(kPUBS, &public_value)) {
*error_details = "server hello missing forward secure public value";
return QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER;
}
if (!out_params->client_key_exchange->CalculateSharedKeySync(
public_value, &out_params->forward_secure_premaster_secret)) {
*error_details = "Key exchange failure";
return QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER;
}
std::string hkdf_input;
const size_t label_len = strlen(QuicCryptoConfig::kForwardSecureLabel) + 1;
hkdf_input.reserve(label_len + out_params->hkdf_input_suffix.size());
hkdf_input.append(QuicCryptoConfig::kForwardSecureLabel, label_len);
hkdf_input.append(out_params->hkdf_input_suffix);
if (!CryptoUtils::DeriveKeys(
version, out_params->forward_secure_premaster_secret,
out_params->aead, out_params->client_nonce,
shlo_nonce.empty() ? out_params->server_nonce : shlo_nonce,
pre_shared_key_, hkdf_input, Perspective::IS_CLIENT,
CryptoUtils::Diversification::Never(),
&out_params->forward_secure_crypters, &out_params->subkey_secret)) {
*error_details = "Symmetric key setup failed";
return QUIC_CRYPTO_SYMMETRIC_KEY_SETUP_FAILED;
}
return QUIC_NO_ERROR;
}
QuicErrorCode QuicCryptoClientConfig::ProcessServerConfigUpdate(
const CryptoHandshakeMessage& server_config_update, QuicWallTime now,
const QuicTransportVersion version, absl::string_view chlo_hash,
CachedState* cached,
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters>
out_params,
std::string* error_details) {
QUICHE_DCHECK(error_details != nullptr);
if (server_config_update.tag() != kSCUP) {
*error_details = "ServerConfigUpdate must have kSCUP tag.";
return QUIC_INVALID_CRYPTO_MESSAGE_TYPE;
}
return CacheNewServerConfig(server_config_update, now, version, chlo_hash,
out_params->cached_certs, cached, error_details);
}
ProofVerifier* QuicCryptoClientConfig::proof_verifier() const {
return proof_verifier_.get();
}
SessionCache* QuicCryptoClientConfig::session_cache() const {
return session_cache_.get();
}
void QuicCryptoClientConfig::set_session_cache(
std::shared_ptr<SessionCache> session_cache) {
session_cache_ = std::move(session_cache);
}
ClientProofSource* QuicCryptoClientConfig::proof_source() const {
return proof_source_.get();
}
void QuicCryptoClientConfig::set_proof_source(
std::unique_ptr<ClientProofSource> proof_source) {
proof_source_ = std::move(proof_source);
}
SSL_CTX* QuicCryptoClientConfig::ssl_ctx() const { return ssl_ctx_.get(); }
void QuicCryptoClientConfig::InitializeFrom(
const QuicServerId& server_id, const QuicServerId& canonical_server_id,
QuicCryptoClientConfig* canonical_crypto_config) {
CachedState* canonical_cached =
canonical_crypto_config->LookupOrCreate(canonical_server_id);
if (!canonical_cached->proof_valid()) {
return;
}
CachedState* cached = LookupOrCreate(server_id);
cached->InitializeFrom(*canonical_cached);
}
void QuicCryptoClientConfig::AddCanonicalSuffix(const std::string& suffix) {
canonical_suffixes_.push_back(suffix);
}
bool QuicCryptoClientConfig::PopulateFromCanonicalConfig(
const QuicServerId& server_id, CachedState* cached) {
QUICHE_DCHECK(cached->IsEmpty());
size_t i = 0;
for (; i < canonical_suffixes_.size(); ++i) {
if (absl::EndsWithIgnoreCase(server_id.host(), canonical_suffixes_[i])) {
break;
}
}
if (i == canonical_suffixes_.size()) {
return false;
}
QuicServerId suffix_server_id(canonical_suffixes_[i], server_id.port());
auto it = canonical_server_map_.lower_bound(suffix_server_id);
if (it == canonical_server_map_.end() || it->first != suffix_server_id) {
canonical_server_map_.insert(
it, std::make_pair(std::move(suffix_server_id), std::move(server_id)));
return false;
}
const QuicServerId& canonical_server_id = it->second;
CachedState* canonical_state = cached_states_[canonical_server_id].get();
if (!canonical_state->proof_valid()) {
return false;
}
it->second = server_id;
cached->InitializeFrom(*canonical_state);
return true;
}
} | #include "quiche/quic/core/crypto/quic_crypto_client_config.h"
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/proof_verifier.h"
#include "quiche/quic/core/quic_server_id.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/quic/test_tools/mock_random.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
using testing::StartsWith;
namespace quic {
namespace test {
namespace {
class TestProofVerifyDetails : public ProofVerifyDetails {
~TestProofVerifyDetails() override {}
ProofVerifyDetails* Clone() const override {
return new TestProofVerifyDetails;
}
};
class OneServerIdFilter : public QuicCryptoClientConfig::ServerIdFilter {
public:
explicit OneServerIdFilter(const QuicServerId* server_id)
: server_id_(*server_id) {}
bool Matches(const QuicServerId& server_id) const override {
return server_id == server_id_;
}
private:
const QuicServerId server_id_;
};
class AllServerIdsFilter : public QuicCryptoClientConfig::ServerIdFilter {
public:
bool Matches(const QuicServerId& ) const override {
return true;
}
};
}
class QuicCryptoClientConfigTest : public QuicTest {};
TEST_F(QuicCryptoClientConfigTest, CachedState_IsEmpty) {
QuicCryptoClientConfig::CachedState state;
EXPECT_TRUE(state.IsEmpty());
}
TEST_F(QuicCryptoClientConfigTest, CachedState_IsComplete) {
QuicCryptoClientConfig::CachedState state;
EXPECT_FALSE(state.IsComplete(QuicWallTime::FromUNIXSeconds(0)));
}
TEST_F(QuicCryptoClientConfigTest, CachedState_GenerationCounter) {
QuicCryptoClientConfig::CachedState state;
EXPECT_EQ(0u, state.generation_counter());
state.SetProofInvalid();
EXPECT_EQ(1u, state.generation_counter());
}
TEST_F(QuicCryptoClientConfigTest, CachedState_SetProofVerifyDetails) {
QuicCryptoClientConfig::CachedState state;
EXPECT_TRUE(state.proof_verify_details() == nullptr);
ProofVerifyDetails* details = new TestProofVerifyDetails;
state.SetProofVerifyDetails(details);
EXPECT_EQ(details, state.proof_verify_details());
}
TEST_F(QuicCryptoClientConfigTest, CachedState_InitializeFrom) {
QuicCryptoClientConfig::CachedState state;
QuicCryptoClientConfig::CachedState other;
state.set_source_address_token("TOKEN");
other.InitializeFrom(state);
EXPECT_EQ(state.server_config(), other.server_config());
EXPECT_EQ(state.source_address_token(), other.source_address_token());
EXPECT_EQ(state.certs(), other.certs());
EXPECT_EQ(1u, other.generation_counter());
}
TEST_F(QuicCryptoClientConfigTest, InchoateChlo) {
QuicCryptoClientConfig::CachedState state;
QuicCryptoClientConfig config(crypto_test_utils::ProofVerifierForTesting());
config.set_user_agent_id("quic-tester");
config.set_alpn("hq");
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters> params(
new QuicCryptoNegotiatedParameters);
CryptoHandshakeMessage msg;
QuicServerId server_id("www.google.com", 443);
MockRandom rand;
config.FillInchoateClientHello(server_id, QuicVersionMax(), &state, &rand,
true, params, &msg);
QuicVersionLabel cver;
EXPECT_THAT(msg.GetVersionLabel(kVER, &cver), IsQuicNoError());
EXPECT_EQ(CreateQuicVersionLabel(QuicVersionMax()), cver);
absl::string_view proof_nonce;
EXPECT_TRUE(msg.GetStringPiece(kNONP, &proof_nonce));
EXPECT_EQ(std::string(32, 'r'), proof_nonce);
absl::string_view user_agent_id;
EXPECT_TRUE(msg.GetStringPiece(kUAID, &user_agent_id));
EXPECT_EQ("quic-tester", user_agent_id);
absl::string_view alpn;
EXPECT_TRUE(msg.GetStringPiece(kALPN, &alpn));
EXPECT_EQ("hq", alpn);
EXPECT_EQ(msg.minimum_size(), 1u);
}
TEST_F(QuicCryptoClientConfigTest, InchoateChloIsNotPadded) {
QuicCryptoClientConfig::CachedState state;
QuicCryptoClientConfig config(crypto_test_utils::ProofVerifierForTesting());
config.set_pad_inchoate_hello(false);
config.set_user_agent_id("quic-tester");
config.set_alpn("hq");
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters> params(
new QuicCryptoNegotiatedParameters);
CryptoHandshakeMessage msg;
QuicServerId server_id("www.google.com", 443);
MockRandom rand;
config.FillInchoateClientHello(server_id, QuicVersionMax(), &state, &rand,
true, params, &msg);
EXPECT_EQ(msg.minimum_size(), 1u);
}
TEST_F(QuicCryptoClientConfigTest, PreferAesGcm) {
QuicCryptoClientConfig config(crypto_test_utils::ProofVerifierForTesting());
if (EVP_has_aes_hardware() == 1) {
EXPECT_EQ(kAESG, config.aead[0]);
} else {
EXPECT_EQ(kCC20, config.aead[0]);
}
}
TEST_F(QuicCryptoClientConfigTest, InchoateChloSecure) {
QuicCryptoClientConfig::CachedState state;
QuicCryptoClientConfig config(crypto_test_utils::ProofVerifierForTesting());
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters> params(
new QuicCryptoNegotiatedParameters);
CryptoHandshakeMessage msg;
QuicServerId server_id("www.google.com", 443);
MockRandom rand;
config.FillInchoateClientHello(server_id, QuicVersionMax(), &state, &rand,
true, params, &msg);
QuicTag pdmd;
EXPECT_THAT(msg.GetUint32(kPDMD, &pdmd), IsQuicNoError());
EXPECT_EQ(kX509, pdmd);
absl::string_view scid;
EXPECT_FALSE(msg.GetStringPiece(kSCID, &scid));
}
TEST_F(QuicCryptoClientConfigTest, InchoateChloSecureWithSCIDNoEXPY) {
QuicCryptoClientConfig::CachedState state;
CryptoHandshakeMessage scfg;
scfg.set_tag(kSCFG);
scfg.SetStringPiece(kSCID, "12345678");
std::string details;
QuicWallTime now = QuicWallTime::FromUNIXSeconds(1);
QuicWallTime expiry = QuicWallTime::FromUNIXSeconds(2);
state.SetServerConfig(scfg.GetSerialized().AsStringPiece(), now, expiry,
&details);
EXPECT_FALSE(state.IsEmpty());
QuicCryptoClientConfig config(crypto_test_utils::ProofVerifierForTesting());
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters> params(
new QuicCryptoNegotiatedParameters);
CryptoHandshakeMessage msg;
QuicServerId server_id("www.google.com", 443);
MockRandom rand;
config.FillInchoateClientHello(server_id, QuicVersionMax(), &state, &rand,
true, params, &msg);
absl::string_view scid;
EXPECT_TRUE(msg.GetStringPiece(kSCID, &scid));
EXPECT_EQ("12345678", scid);
}
TEST_F(QuicCryptoClientConfigTest, InchoateChloSecureWithSCID) {
QuicCryptoClientConfig::CachedState state;
CryptoHandshakeMessage scfg;
scfg.set_tag(kSCFG);
uint64_t future = 1;
scfg.SetValue(kEXPY, future);
scfg.SetStringPiece(kSCID, "12345678");
std::string details;
state.SetServerConfig(scfg.GetSerialized().AsStringPiece(),
QuicWallTime::FromUNIXSeconds(1),
QuicWallTime::FromUNIXSeconds(0), &details);
EXPECT_FALSE(state.IsEmpty());
QuicCryptoClientConfig config(crypto_test_utils::ProofVerifierForTesting());
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters> params(
new QuicCryptoNegotiatedParameters);
CryptoHandshakeMessage msg;
QuicServerId server_id("www.google.com", 443);
MockRandom rand;
config.FillInchoateClientHello(server_id, QuicVersionMax(), &state, &rand,
true, params, &msg);
absl::string_view scid;
EXPECT_TRUE(msg.GetStringPiece(kSCID, &scid));
EXPECT_EQ("12345678", scid);
}
TEST_F(QuicCryptoClientConfigTest, FillClientHello) {
QuicCryptoClientConfig::CachedState state;
QuicCryptoClientConfig config(crypto_test_utils::ProofVerifierForTesting());
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters> params(
new QuicCryptoNegotiatedParameters);
QuicConnectionId kConnectionId = TestConnectionId(1234);
std::string error_details;
MockRandom rand;
CryptoHandshakeMessage chlo;
QuicServerId server_id("www.google.com", 443);
config.FillClientHello(server_id, kConnectionId, QuicVersionMax(),
QuicVersionMax(), &state, QuicWallTime::Zero(), &rand,
params, &chlo, &error_details);
QuicVersionLabel cver;
EXPECT_THAT(chlo.GetVersionLabel(kVER, &cver), IsQuicNoError());
EXPECT_EQ(CreateQuicVersionLabel(QuicVersionMax()), cver);
}
TEST_F(QuicCryptoClientConfigTest, FillClientHelloNoPadding) {
QuicCryptoClientConfig::CachedState state;
QuicCryptoClientConfig config(crypto_test_utils::ProofVerifierForTesting());
config.set_pad_full_hello(false);
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters> params(
new QuicCryptoNegotiatedParameters);
QuicConnectionId kConnectionId = TestConnectionId(1234);
std::string error_details;
MockRandom rand;
CryptoHandshakeMessage chlo;
QuicServerId server_id("www.google.com", 443);
config.FillClientHello(server_id, kConnectionId, QuicVersionMax(),
QuicVersionMax(), &state, QuicWallTime::Zero(), &rand,
params, &chlo, &error_details);
QuicVersionLabel cver;
EXPECT_THAT(chlo.GetVersionLabel(kVER, &cver), IsQuicNoError());
EXPECT_EQ(CreateQuicVersionLabel(QuicVersionMax()), cver);
EXPECT_EQ(chlo.minimum_size(), 1u);
}
TEST_F(QuicCryptoClientConfigTest, ProcessServerDowngradeAttack) {
ParsedQuicVersionVector supported_versions = AllSupportedVersions();
if (supported_versions.size() == 1) {
return;
}
ParsedQuicVersionVector supported_version_vector;
for (size_t i = supported_versions.size(); i > 0; --i) {
supported_version_vector.push_back(supported_versions[i - 1]);
}
CryptoHandshakeMessage msg;
msg.set_tag(kSHLO);
msg.SetVersionVector(kVER, supported_version_vector);
QuicCryptoClientConfig::CachedState cached;
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters>
out_params(new QuicCryptoNegotiatedParameters);
std::string error;
QuicCryptoClientConfig config(crypto_test_utils::ProofVerifierForTesting());
EXPECT_THAT(config.ProcessServerHello(
msg, EmptyQuicConnectionId(), supported_versions.front(),
supported_versions, &cached, out_params, &error),
IsError(QUIC_VERSION_NEGOTIATION_MISMATCH));
EXPECT_THAT(error, StartsWith("Downgrade attack detected: ServerVersions"));
}
TEST_F(QuicCryptoClientConfigTest, InitializeFrom) {
QuicCryptoClientConfig config(crypto_test_utils::ProofVerifierForTesting());
QuicServerId canonical_server_id("www.google.com", 443);
QuicCryptoClientConfig::CachedState* state =
config.LookupOrCreate(canonical_server_id);
state->set_source_address_token("TOKEN");
state->SetProofValid();
QuicServerId other_server_id("mail.google.com", 443);
config.InitializeFrom(other_server_id, canonical_server_id, &config);
QuicCryptoClientConfig::CachedState* other =
config.LookupOrCreate(other_server_id);
EXPECT_EQ(state->server_config(), other->server_config());
EXPECT_EQ(state->source_address_token(), other->source_address_token());
EXPECT_EQ(state->certs(), other->certs());
EXPECT_EQ(1u, other->generation_counter());
}
TEST_F(QuicCryptoClientConfigTest, Canonical) {
QuicCryptoClientConfig config(crypto_test_utils::ProofVerifierForTesting());
config.AddCanonicalSuffix(".google.com");
QuicServerId canonical_id1("www.google.com", 443);
QuicServerId canonical_id2("mail.google.com", 443);
QuicCryptoClientConfig::CachedState* state =
config.LookupOrCreate(canonical_id1);
state->set_source_address_token("TOKEN");
state->SetProofValid();
QuicCryptoClientConfig::CachedState* other =
config.LookupOrCreate(canonical_id2);
EXPECT_TRUE(state->IsEmpty());
EXPECT_EQ(state->server_config(), other->server_config());
EXPECT_EQ(state->source_address_token(), other->source_address_token());
EXPECT_EQ(state->certs(), other->certs());
EXPECT_EQ(1u, other->generation_counter());
QuicServerId different_id("mail.google.org", 443);
EXPECT_TRUE(config.LookupOrCreate(different_id)->IsEmpty());
}
TEST_F(QuicCryptoClientConfigTest, CanonicalNotUsedIfNotValid) {
QuicCryptoClientConfig config(crypto_test_utils::ProofVerifierForTesting());
config.AddCanonicalSuffix(".google.com");
QuicServerId canonical_id1("www.google.com", 443);
QuicServerId canonical_id2("mail.google.com", 443);
QuicCryptoClientConfig::CachedState* state =
config.LookupOrCreate(canonical_id1);
state->set_source_address_token("TOKEN");
EXPECT_TRUE(config.LookupOrCreate(canonical_id2)->IsEmpty());
}
TEST_F(QuicCryptoClientConfigTest, ClearCachedStates) {
QuicCryptoClientConfig config(crypto_test_utils::ProofVerifierForTesting());
struct TestCase {
TestCase(const std::string& host, QuicCryptoClientConfig* config)
: server_id(host, 443), state(config->LookupOrCreate(server_id)) {
CryptoHandshakeMessage scfg;
scfg.set_tag(kSCFG);
uint64_t future = 1;
scfg.SetValue(kEXPY, future);
scfg.SetStringPiece(kSCID, "12345678");
std::string details;
state->SetServerConfig(scfg.GetSerialized().AsStringPiece(),
QuicWallTime::FromUNIXSeconds(0),
QuicWallTime::FromUNIXSeconds(future), &details);
std::vector<std::string> certs(1);
certs[0] = "Hello Cert for " + host;
state->SetProof(certs, "cert_sct", "chlo_hash", "signature");
state->set_source_address_token("TOKEN");
state->SetProofValid();
EXPECT_EQ(2u, state->generation_counter());
}
QuicServerId server_id;
QuicCryptoClientConfig::CachedState* state;
} test_cases[] = {TestCase("www.google.com", &config),
TestCase("www.example.com", &config)};
for (const TestCase& test_case : test_cases) {
QuicCryptoClientConfig::CachedState* other =
config.LookupOrCreate(test_case.server_id);
EXPECT_EQ(test_case.state, other);
EXPECT_EQ(2u, other->generation_counter());
}
OneServerIdFilter google_com_filter(&test_cases[0].server_id);
config.ClearCachedStates(google_com_filter);
QuicCryptoClientConfig::CachedState* cleared_cache =
config.LookupOrCreate(test_cases[0].server_id);
EXPECT_EQ(test_cases[0].state, cleared_cache);
EXPECT_FALSE(cleared_cache->proof_valid());
EXPECT_TRUE(cleared_cache->server_config().empty());
EXPECT_TRUE(cleared_cache->certs().empty());
EXPECT_TRUE(cleared_cache->cert_sct().empty());
EXPECT_TRUE(cleared_cache->signature().empty());
EXPECT_EQ(3u, cleared_cache->generation_counter());
QuicCryptoClientConfig::CachedState* existing_cache =
config.LookupOrCreate(test_cases[1].server_id);
EXPECT_EQ(test_cases[1].state, existing_cache);
EXPECT_TRUE(existing_cache->proof_valid());
EXPECT_FALSE(existing_cache->server_config().empty());
EXPECT_FALSE(existing_cache->certs().empty());
EXPECT_FALSE(existing_cache->cert_sct().empty());
EXPECT_FALSE(existing_cache->signature().empty());
EXPECT_EQ(2u, existing_cache->generation_counter());
AllServerIdsFilter all_server_ids;
config.ClearCachedStates(all_server_ids);
cleared_cache = config.LookupOrCreate(test_cases[1].server_id);
EXPECT_EQ(test_cases[1].state, cleared_cache);
EXPECT_FALSE(cleared_cache->proof_valid());
EXPECT_TRUE(cleared_cache->server_config().empty());
EXPECT_TRUE(cleared_cache->certs().empty());
EXPECT_TRUE(cleared_cache->cert_sct().empty());
EXPECT_TRUE(cleared_cache->signature().empty());
EXPECT_EQ(3u, cleared_cache->generation_counter());
}
TEST_F(QuicCryptoClientConfigTest, ProcessReject) {
CryptoHandshakeMessage rej;
crypto_test_utils::FillInDummyReject(&rej);
QuicCryptoClientConfig::CachedState cached;
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters>
out_params(new QuicCryptoNegotiatedParameters);
std::string error;
QuicCryptoClientConfig config(crypto_test_utils::ProofVerifierForTesting());
EXPECT_THAT(
config.ProcessRejection(
rej, QuicWallTime::FromUNIXSeconds(0),
AllSupportedVersionsWithQuicCrypto().front().transport_version, "",
&cached, out_params, &error),
IsQuicNoError());
}
TEST_F(QuicCryptoClientConfigTest, ProcessRejectWithLongTTL) {
CryptoHandshakeMessage rej;
crypto_test_utils::FillInDummyReject(&rej);
QuicTime::Delta one_week = QuicTime::Delta::FromSeconds(kNumSecondsPerWeek);
int64_t long_ttl = 3 * one_week.ToSeconds();
rej.SetValue(kSTTL, long_ttl);
QuicCryptoClientConfig::CachedState cached;
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters>
out_params(new QuicCryptoNegotiatedParameters);
std::string error;
QuicCryptoClientConfig config(crypto_test_utils::ProofVerifierForTesting());
EXPECT_THAT(
config.ProcessRejection(
rej, QuicWallTime::FromUNIXSeconds(0),
AllSupportedVersionsWithQuicCrypto().front().transport_version, "",
&cached, out_params, &error),
IsQuicNoError());
cached.SetProofValid();
EXPECT_FALSE(cached.IsComplete(QuicWallTime::FromUNIXSeconds(long_ttl)));
EXPECT_FALSE(
cached.IsComplete(QuicWallTime::FromUNIXSeconds(one_week.ToSeconds())));
EXPECT_TRUE(cached.IsComplete(
QuicWallTime::FromUNIXSeconds(one_week.ToSeconds() - 1)));
}
TEST_F(QuicCryptoClientConfigTest, ServerNonceinSHLO) {
CryptoHandshakeMessage msg;
msg.set_tag(kSHLO);
ParsedQuicVersionVector supported_versions;
ParsedQuicVersion version = AllSupportedVersions().front();
supported_versions.push_back(version);
msg.SetVersionVector(kVER, supported_versions);
QuicCryptoClientConfig config(crypto_test_utils::ProofVerifierForTesting());
QuicCryptoClientConfig::CachedState cached;
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters>
out_params(new QuicCryptoNegotiatedParameters);
std::string error_details;
EXPECT_THAT(config.ProcessServerHello(msg, EmptyQuicConnectionId(), version,
supported_versions, &cached, out_params,
&error_details),
IsError(QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER));
EXPECT_EQ("server hello missing server nonce", error_details);
}
TEST_F(QuicCryptoClientConfigTest, MultipleCanonicalEntries) {
QuicCryptoClientConfig config(crypto_test_utils::ProofVerifierForTesting());
config.AddCanonicalSuffix(".google.com");
QuicServerId canonical_server_id1("www.google.com", 443);
QuicCryptoClientConfig::CachedState* state1 =
config.LookupOrCreate(canonical_server_id1);
CryptoHandshakeMessage scfg;
scfg.set_tag(kSCFG);
scfg.SetStringPiece(kSCID, "12345678");
std::string details;
QuicWallTime now = QuicWallTime::FromUNIXSeconds(1);
QuicWallTime expiry = QuicWallTime::FromUNIXSeconds(2);
state1->SetServerConfig(scfg.GetSerialized().AsStringPiece(), now, expiry,
&details);
state1->set_source_address_token("TOKEN");
state1->SetProofValid();
EXPECT_FALSE(state1->IsEmpty());
QuicServerId canonical_server_id2("mail.google.com", 443);
QuicCryptoClientConfig::CachedState* state2 =
config.LookupOrCreate(canonical_server_id2);
EXPECT_FALSE(state2->IsEmpty());
const CryptoHandshakeMessage* const scfg2 = state2->GetServerConfig();
ASSERT_TRUE(scfg2);
EXPECT_EQ(kSCFG, scfg2->tag());
config.AddCanonicalSuffix(".example.com");
QuicServerId canonical_server_id3("www.example.com", 443);
QuicCryptoClientConfig::CachedState* state3 =
config.LookupOrCreate(canonical_server_id3);
EXPECT_TRUE(state3->IsEmpty());
const CryptoHandshakeMessage* const scfg3 = state3->GetServerConfig();
EXPECT_FALSE(scfg3);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/quic_crypto_client_config.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/quic_crypto_client_config_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
6e92a8e9-0fdc-4b80-93d8-bd223b973557 | cpp | tensorflow/tensorflow | gpu_cost_model_stats_collection | third_party/xla/xla/service/gpu/model/gpu_cost_model_stats_collection.cc | third_party/xla/xla/service/gpu/model/gpu_cost_model_stats_collection_test.cc | #include "xla/service/gpu/model/gpu_cost_model_stats_collection.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/model/gpu_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "tsl/platform/status.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> GpuCostModelStatsCollection::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (auto* computation : module->MakeComputationPostOrder()) {
TF_CHECK_OK(computation->Accept(&cost_analysis_));
for (auto* fusion_instr : computation->instructions()) {
if (fusion_instr->opcode() != HloOpcode::kFusion) continue;
GpuPerformanceModel::RecordEstimatedRunTime(
fusion_instr, device_info_, &cost_analysis_,
GpuPerformanceModelOptions::ForModule(module));
}
}
return false;
}
}
} | #include "xla/service/gpu/model/gpu_cost_model_stats_collection.h"
#include <stdint.h>
#include <memory>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class GpuCostModelStatsCollectionTest : public HloTestBase {
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
GpuCostModelStatsCollection cost_model_stats_{
TestGpuDeviceInfo::RTXA6000DeviceInfo(),
GpuHloCostAnalysis::Options{ShapeSizeBytesFunction(),
{},
{},
true}};
};
TEST_F(GpuCostModelStatsCollectionTest, FusinInEntryComputation) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
log {
p = f32[16384]{0} parameter(0)
ROOT l = f32[16384]{0} log(p)
}
ENTRY main {
%p0 = f32[16384] parameter(0)
ROOT %res = f32[16384]{0} fusion(p0), kind=kInput, calls=log
}
)"));
EXPECT_FALSE(cost_model_stats_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
root->backend_config<GpuBackendConfig>());
const FusionBackendConfig& backend_config =
gpu_config.fusion_backend_config();
EXPECT_TRUE(backend_config.has_reification_cost());
EXPECT_GT(backend_config.reification_cost().end_to_end_cycles(), 0);
}
TEST_F(GpuCostModelStatsCollectionTest, FusinInWhileComputation) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
cond {
p = f32[16384]{0} parameter(0)
ROOT %constant.2 = pred[] constant(true)
}
log {
p = f32[16384]{0} parameter(0)
ROOT l = f32[16384]{0} log(p)
}
loop {
%p0 = f32[16384] parameter(0)
ROOT %res = f32[16384]{0} fusion(p0), kind=kInput, calls=log
}
ENTRY main {
%p0 = f32[16384] parameter(0)
ROOT %while = f32[16384] while(%p0), body=%loop, condition=%cond
})"));
EXPECT_FALSE(cost_model_stats_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()
->root_instruction()
->while_body()
->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
root->backend_config<GpuBackendConfig>());
const FusionBackendConfig& backend_config =
gpu_config.fusion_backend_config();
EXPECT_TRUE(backend_config.has_reification_cost());
EXPECT_GT(backend_config.reification_cost().end_to_end_cycles(), 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_cost_model_stats_collection.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_cost_model_stats_collection_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b3ef2df2-c7c5-4ea3-a7ab-56928ca266f3 | cpp | google/tensorstore | dtype | tensorstore/driver/zarr/dtype.cc | tensorstore/driver/zarr/dtype_test.cc | #include "tensorstore/driver/zarr/dtype.h"
#include <stddef.h>
#include "absl/base/optimization.h"
#include "tensorstore/data_type.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_zarr {
constexpr char kDtypeBfloat16[] = "bfloat16";
constexpr char kDtypeFloat8e4m3fn[] = "float8_e4m3fn";
constexpr char kDtypeFloat8e4m3fnuz[] = "float8_e4m3fnuz";
constexpr char kDtypeFloat8e4m3b11fnuz[] = "float8_e4m3b11fnuz";
constexpr char kDtypeFloat8e5m2[] = "float8_e5m2";
constexpr char kDtypeFloat8e5m2fnuz[] = "float8_e5m2fnuz";
constexpr char kDtypeInt4[] = "int4";
Result<ZarrDType::BaseDType> ParseBaseDType(std::string_view dtype) {
using D = ZarrDType::BaseDType;
if (dtype == kDtypeBfloat16) {
return D{std::string(dtype), dtype_v<::tensorstore::dtypes::bfloat16_t>,
endian::little};
}
if (dtype == kDtypeFloat8e4m3fn) {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float8_e4m3fn_t>, endian::little};
}
if (dtype == kDtypeFloat8e4m3fnuz) {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float8_e4m3fnuz_t>, endian::little};
}
if (dtype == kDtypeFloat8e4m3b11fnuz) {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float8_e4m3b11fnuz_t>,
endian::little};
}
if (dtype == kDtypeFloat8e5m2) {
return D{std::string(dtype), dtype_v<::tensorstore::dtypes::float8_e5m2_t>,
endian::little};
}
if (dtype == kDtypeFloat8e5m2fnuz) {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float8_e5m2fnuz_t>, endian::little};
}
if (dtype == kDtypeInt4) {
return D{std::string(dtype), dtype_v<::tensorstore::dtypes::int4_t>,
endian::little};
}
if (dtype.size() < 3) goto error;
{
const char endian_indicator = dtype[0];
const char type_indicator = dtype[1];
const std::string_view suffix = dtype.substr(2);
endian endian_value;
switch (endian_indicator) {
case '<':
endian_value = endian::little;
break;
case '>':
endian_value = endian::big;
break;
case '|':
endian_value = endian::native;
break;
default:
goto error;
}
switch (type_indicator) {
case 'b':
if (suffix != "1") goto error;
ABSL_FALLTHROUGH_INTENDED;
case 'S':
case 'V':
endian_value = endian::native;
break;
case 'i':
case 'u':
if (endian_indicator == '|') {
if (suffix != "1") goto error;
endian_value = endian::native;
break;
} else if (suffix == "1") {
endian_value = endian::native;
break;
}
[[fallthrough]];
case 'f':
case 'c':
case 'm':
case 'M':
if (endian_indicator == '|') {
goto error;
}
break;
}
switch (type_indicator) {
case 'b':
return D{std::string(dtype), dtype_v<bool>, endian::native};
case 'i':
if (suffix == "1") {
return D{std::string(dtype), dtype_v<int8_t>, endian_value};
}
if (suffix == "2") {
return D{std::string(dtype), dtype_v<int16_t>, endian_value};
}
if (suffix == "4") {
return D{std::string(dtype), dtype_v<int32_t>, endian_value};
}
if (suffix == "8") {
return D{std::string(dtype), dtype_v<int64_t>, endian_value};
}
goto error;
case 'u':
if (suffix == "1") {
return D{std::string(dtype), dtype_v<uint8_t>, endian_value};
}
if (suffix == "2") {
return D{std::string(dtype), dtype_v<uint16_t>, endian_value};
}
if (suffix == "4") {
return D{std::string(dtype), dtype_v<uint32_t>, endian_value};
}
if (suffix == "8") {
return D{std::string(dtype), dtype_v<uint64_t>, endian_value};
}
goto error;
case 'f':
if (suffix == "2") {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float16_t>, endian_value};
}
if (suffix == "4") {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float32_t>, endian_value};
}
if (suffix == "8") {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float64_t>, endian_value};
}
goto error;
case 'c':
if (suffix == "8") {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::complex64_t>, endian_value};
}
if (suffix == "16") {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::complex128_t>, endian_value};
}
goto error;
case 'S':
case 'V': {
Index num_elements = 0;
for (char c : suffix) {
if (internal::MulOverflow(num_elements, Index(10), &num_elements))
goto error;
if (c < '0' || c > '9') goto error;
if (internal::AddOverflow(num_elements, Index(c - '0'),
&num_elements))
goto error;
}
return D{std::string(dtype),
(type_indicator == 'S')
? DataType(dtype_v<::tensorstore::dtypes::char_t>)
: DataType(dtype_v<::tensorstore::dtypes::byte_t>),
endian::native,
{num_elements}};
}
}
}
error:
return absl::InvalidArgumentError(
tensorstore::StrCat("Unsupported zarr dtype: ", QuoteString(dtype)));
}
namespace {
Result<ZarrDType> ParseDTypeNoDerived(const nlohmann::json& value) {
ZarrDType out;
if (value.is_string()) {
out.has_fields = false;
out.fields.resize(1);
TENSORSTORE_ASSIGN_OR_RETURN(
static_cast<ZarrDType::BaseDType&>(out.fields[0]),
ParseBaseDType(value.get<std::string>()));
return out;
}
out.has_fields = true;
auto parse_result = internal_json::JsonParseArray(
value,
[&](std::ptrdiff_t size) {
out.fields.resize(size);
return absl::OkStatus();
},
[&](const ::nlohmann::json& x, std::ptrdiff_t field_i) {
auto& field = out.fields[field_i];
return internal_json::JsonParseArray(
x,
[&](std::ptrdiff_t size) {
if (size < 2 || size > 3) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected array of size 2 or 3, but received: ", x.dump()));
}
return absl::OkStatus();
},
[&](const ::nlohmann::json& v, std::ptrdiff_t i) {
switch (i) {
case 0:
if (internal_json::JsonRequireValueAs(v, &field.name).ok()) {
if (!field.name.empty()) return absl::OkStatus();
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected non-empty string, but received: ", v.dump()));
case 1: {
std::string dtype_string;
TENSORSTORE_RETURN_IF_ERROR(
internal_json::JsonRequireValueAs(v, &dtype_string));
TENSORSTORE_ASSIGN_OR_RETURN(
static_cast<ZarrDType::BaseDType&>(field),
ParseBaseDType(dtype_string));
return absl::OkStatus();
}
case 2: {
return internal_json::JsonParseArray(
v,
[&](std::ptrdiff_t size) {
field.outer_shape.resize(size);
return absl::OkStatus();
},
[&](const ::nlohmann::json& x, std::ptrdiff_t j) {
return internal_json::JsonRequireInteger(
x, &field.outer_shape[j], true, 1,
kInfIndex);
});
}
default:
ABSL_UNREACHABLE();
}
});
});
if (!parse_result.ok()) return parse_result;
return out;
}
}
absl::Status ValidateDType(ZarrDType& dtype) {
dtype.bytes_per_outer_element = 0;
for (size_t field_i = 0; field_i < dtype.fields.size(); ++field_i) {
auto& field = dtype.fields[field_i];
if (std::any_of(
dtype.fields.begin(), dtype.fields.begin() + field_i,
[&](const ZarrDType::Field& f) { return f.name == field.name; })) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Field name ", QuoteString(field.name), " occurs more than once"));
}
field.field_shape.resize(field.flexible_shape.size() +
field.outer_shape.size());
std::copy(field.flexible_shape.begin(), field.flexible_shape.end(),
std::copy(field.outer_shape.begin(), field.outer_shape.end(),
field.field_shape.begin()));
field.num_inner_elements = ProductOfExtents(span(field.field_shape));
if (field.num_inner_elements == std::numeric_limits<Index>::max()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Product of dimensions ", span(field.field_shape), " is too large"));
}
if (internal::MulOverflow(field.num_inner_elements,
static_cast<Index>(field.dtype->size),
&field.num_bytes)) {
return absl::InvalidArgumentError("Field size in bytes is too large");
}
field.byte_offset = dtype.bytes_per_outer_element;
if (internal::AddOverflow(dtype.bytes_per_outer_element, field.num_bytes,
&dtype.bytes_per_outer_element)) {
return absl::InvalidArgumentError(
"Total number of bytes per outer array element is too large");
}
}
return absl::OkStatus();
}
Result<ZarrDType> ParseDType(const nlohmann::json& value) {
TENSORSTORE_ASSIGN_OR_RETURN(ZarrDType dtype, ParseDTypeNoDerived(value));
TENSORSTORE_RETURN_IF_ERROR(ValidateDType(dtype));
return dtype;
}
void to_json(::nlohmann::json& out, const ZarrDType::Field& field) {
using array_t = ::nlohmann::json::array_t;
if (field.outer_shape.empty()) {
out = array_t{field.name, field.encoded_dtype};
} else {
out = array_t{field.name, field.encoded_dtype, field.outer_shape};
}
}
void to_json(::nlohmann::json& out,
const ZarrDType& dtype) {
if (!dtype.has_fields) {
out = dtype.fields[0].encoded_dtype;
} else {
out = dtype.fields;
}
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(ZarrDType, [](auto is_loading,
const auto& options,
auto* obj, auto* j) {
if constexpr (is_loading) {
TENSORSTORE_ASSIGN_OR_RETURN(*obj, ParseDType(*j));
} else {
to_json(*j, *obj);
}
return absl::OkStatus();
})
char EndianIndicator(tensorstore::endian e) {
return e == tensorstore::endian::little ? '<' : '>';
}
Result<ZarrDType::BaseDType> ChooseBaseDType(DataType dtype) {
ZarrDType::BaseDType base_dtype;
base_dtype.endian = endian::native;
base_dtype.dtype = dtype;
const auto set_typestr = [&](std::string_view typestr, int size) {
if (size > 1) {
base_dtype.encoded_dtype = tensorstore::StrCat(
EndianIndicator(base_dtype.endian), typestr, size);
} else {
base_dtype.encoded_dtype = tensorstore::StrCat("|", typestr, size);
}
};
switch (dtype.id()) {
case DataTypeId::bool_t:
set_typestr("b", 1);
break;
case DataTypeId::uint8_t:
set_typestr("u", 1);
break;
case DataTypeId::uint16_t:
set_typestr("u", 2);
break;
case DataTypeId::uint32_t:
set_typestr("u", 4);
break;
case DataTypeId::uint64_t:
set_typestr("u", 8);
break;
case DataTypeId::int4_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeInt4;
break;
case DataTypeId::int8_t:
set_typestr("i", 1);
break;
case DataTypeId::int16_t:
set_typestr("i", 2);
break;
case DataTypeId::int32_t:
set_typestr("i", 4);
break;
case DataTypeId::int64_t:
set_typestr("i", 8);
break;
case DataTypeId::float8_e4m3fn_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeFloat8e4m3fn;
break;
case DataTypeId::float8_e4m3fnuz_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeFloat8e4m3fnuz;
break;
case DataTypeId::float8_e4m3b11fnuz_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeFloat8e4m3b11fnuz;
break;
case DataTypeId::float8_e5m2_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeFloat8e5m2;
break;
case DataTypeId::float8_e5m2fnuz_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeFloat8e5m2fnuz;
break;
case DataTypeId::float16_t:
set_typestr("f", 2);
break;
case DataTypeId::bfloat16_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeBfloat16;
break;
case DataTypeId::float32_t:
set_typestr("f", 4);
break;
case DataTypeId::float64_t:
set_typestr("f", 8);
break;
case DataTypeId::complex64_t:
set_typestr("c", 8);
break;
case DataTypeId::complex128_t:
set_typestr("c", 16);
break;
default:
return absl::InvalidArgumentError(
tensorstore::StrCat("Data type not supported: ", dtype));
}
return base_dtype;
}
}
} | #include "tensorstore/driver/zarr/dtype.h"
#include <stdint.h>
#include <cstddef>
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr/metadata_testutil.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::DataType;
using ::tensorstore::dtype_v;
using ::tensorstore::endian;
using ::tensorstore::Index;
using ::tensorstore::kInfIndex;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr::ChooseBaseDType;
using ::tensorstore::internal_zarr::ParseBaseDType;
using ::tensorstore::internal_zarr::ParseDType;
using ::tensorstore::internal_zarr::ZarrDType;
void CheckBaseDType(std::string dtype, DataType r, endian e,
std::vector<Index> flexible_shape) {
EXPECT_THAT(ParseBaseDType(dtype), ::testing::Optional(ZarrDType::BaseDType{
dtype, r, e, flexible_shape}))
<< dtype;
}
TEST(ParseBaseDType, Success) {
CheckBaseDType("|b1", dtype_v<bool>, endian::native, {});
CheckBaseDType("<b1", dtype_v<bool>, endian::native, {});
CheckBaseDType(">b1", dtype_v<bool>, endian::native, {});
CheckBaseDType("|S150", dtype_v<char>, endian::native, {150});
CheckBaseDType(">S150", dtype_v<char>, endian::native, {150});
CheckBaseDType("<S150", dtype_v<char>, endian::native, {150});
CheckBaseDType("|S9223372036854775807", dtype_v<char>, endian::native,
{9223372036854775807});
CheckBaseDType("|V150", dtype_v<std::byte>, endian::native, {150});
CheckBaseDType("<V150", dtype_v<std::byte>, endian::native, {150});
CheckBaseDType(">V150", dtype_v<std::byte>, endian::native, {150});
CheckBaseDType("|i1", dtype_v<std::int8_t>, endian::native, {});
CheckBaseDType("<i1", dtype_v<std::int8_t>, endian::native, {});
CheckBaseDType(">i1", dtype_v<std::int8_t>, endian::native, {});
CheckBaseDType("|u1", dtype_v<std::uint8_t>, endian::native, {});
CheckBaseDType("<u1", dtype_v<std::uint8_t>, endian::native, {});
CheckBaseDType(">u1", dtype_v<std::uint8_t>, endian::native, {});
CheckBaseDType("<i2", dtype_v<std::int16_t>, endian::little, {});
CheckBaseDType("<i4", dtype_v<std::int32_t>, endian::little, {});
CheckBaseDType("<i8", dtype_v<std::int64_t>, endian::little, {});
CheckBaseDType("<u2", dtype_v<std::uint16_t>, endian::little, {});
CheckBaseDType("<u4", dtype_v<std::uint32_t>, endian::little, {});
CheckBaseDType("<u8", dtype_v<std::uint64_t>, endian::little, {});
CheckBaseDType(">i2", dtype_v<std::int16_t>, endian::big, {});
CheckBaseDType(">i4", dtype_v<std::int32_t>, endian::big, {});
CheckBaseDType(">i8", dtype_v<std::int64_t>, endian::big, {});
CheckBaseDType(">u2", dtype_v<std::uint16_t>, endian::big, {});
CheckBaseDType(">u4", dtype_v<std::uint32_t>, endian::big, {});
CheckBaseDType(">u8", dtype_v<std::uint64_t>, endian::big, {});
CheckBaseDType("float8_e4m3fn", dtype_v<tensorstore::dtypes::float8_e4m3fn_t>,
endian::little, {});
CheckBaseDType("float8_e4m3fnuz",
dtype_v<tensorstore::dtypes::float8_e4m3fnuz_t>,
endian::little, {});
CheckBaseDType("float8_e4m3b11fnuz",
dtype_v<tensorstore::dtypes::float8_e4m3b11fnuz_t>,
endian::little, {});
CheckBaseDType("float8_e5m2", dtype_v<tensorstore::dtypes::float8_e5m2_t>,
endian::little, {});
CheckBaseDType("float8_e5m2fnuz",
dtype_v<tensorstore::dtypes::float8_e5m2fnuz_t>,
endian::little, {});
CheckBaseDType("<f2", dtype_v<tensorstore::dtypes::float16_t>, endian::little,
{});
CheckBaseDType("bfloat16", dtype_v<tensorstore::dtypes::bfloat16_t>,
endian::little, {});
CheckBaseDType("<f4", dtype_v<tensorstore::dtypes::float32_t>, endian::little,
{});
CheckBaseDType("<f8", dtype_v<tensorstore::dtypes::float64_t>, endian::little,
{});
CheckBaseDType(">f2", dtype_v<tensorstore::dtypes::float16_t>, endian::big,
{});
CheckBaseDType(">f4", dtype_v<tensorstore::dtypes::float32_t>, endian::big,
{});
CheckBaseDType(">f8", dtype_v<tensorstore::dtypes::float64_t>, endian::big,
{});
CheckBaseDType("<c8", dtype_v<tensorstore::dtypes::complex64_t>,
endian::little, {});
CheckBaseDType("<c16", dtype_v<tensorstore::dtypes::complex128_t>,
endian::little, {});
CheckBaseDType(">c8", dtype_v<tensorstore::dtypes::complex64_t>, endian::big,
{});
CheckBaseDType(">c16", dtype_v<tensorstore::dtypes::complex128_t>,
endian::big, {});
}
TEST(ParseBaseDType, Failure) {
EXPECT_THAT(ParseBaseDType(""),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Unsupported zarr dtype: \"\""));
EXPECT_THAT(ParseBaseDType("|f4"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|f8"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|c8"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|c16"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|b2"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|i2"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<i9"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<u9"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<S"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|S999999999999999999999999999"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|S9223372036854775808"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|Sa"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|S "),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<f5"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<c5"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<m8"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<M8"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<X5"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
void CheckDType(const ::nlohmann::json& json, const ZarrDType& expected) {
SCOPED_TRACE(json.dump());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto dtype, ParseDType(json));
EXPECT_EQ(expected, dtype);
EXPECT_EQ(json, ::nlohmann::json(dtype));
}
TEST(ParseDType, SimpleStringBool) {
CheckDType("|b1", ZarrDType{
false,
{
{{
"|b1",
dtype_v<bool>,
endian::native,
{},
},
{},
"",
{},
1,
0,
1},
},
1,
});
}
TEST(ParseDType, SingleNamedFieldChar) {
CheckDType(::nlohmann::json::array_t{{"x", "|S10"}},
ZarrDType{
true,
{
{{
"|S10",
dtype_v<char>,
endian::native,
{10},
},
{},
"x",
{10},
10,
0,
10},
},
10,
});
}
TEST(ParseDType, TwoNamedFieldsCharAndInt) {
CheckDType(
::nlohmann::json::array_t{{"x", "|S10", {2, 3}}, {"y", "<i2", {5}}},
ZarrDType{
true,
{
{{
"|S10",
dtype_v<char>,
endian::native,
{10},
},
{2, 3},
"x",
{2, 3, 10},
10 * 2 * 3,
0,
10 * 2 * 3},
{{
"<i2",
dtype_v<std::int16_t>,
endian::little,
{},
},
{5},
"y",
{5},
5,
10 * 2 * 3,
2 * 5},
},
10 * 2 * 3 + 2 * 5,
});
}
TEST(ParseDType, FieldSpecTooShort) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x"}}),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Expected array of size 2 or 3, but received: \\[\"x\"\\]"));
}
TEST(ParseDType, FieldSpecTooLong) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x", "<i2", {2, 3}, 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Expected array of size 2 or 3, but received: "
"\\[\"x\",\"<i2\",\\[2,3\\],5\\]"));
}
TEST(ParseDType, InvalidFieldName) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{3, "<i2"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Error parsing value at position 0: "
"Expected non-empty string, but received: 3"));
}
TEST(ParseDType, EmptyFieldName) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"", "<i2"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Error parsing value at position 0: "
"Expected non-empty string, but received: \"\""));
}
TEST(ParseDType, DuplicateFieldName) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x", "<i2"}, {"x", "<u2"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Field name \"x\" occurs more than once"));
}
TEST(ParseDType, NonStringFieldBaseDType) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x", 3}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Error parsing value at position 1: "
"Expected string, but received: 3"));
}
TEST(ParseDType, InvalidFieldBaseDType) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x", "<X2"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Error parsing value at position 1: "
"Unsupported zarr dtype: \"<X2\""));
}
TEST(ParseDType, ProductOfDimensionsOverflow) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{
{"x", "|i1", {kInfIndex, kInfIndex}}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Product of dimensions .* is too large"));
}
TEST(ParseDType, FieldSizeInBytesOverflow) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x", "<f8", {kInfIndex}}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Field size in bytes is too large"));
}
TEST(ParseDType, BytesPerOuterElementOverflow) {
EXPECT_THAT(
ParseDType(::nlohmann::json::array_t{{"x", "<i2", {kInfIndex}},
{"y", "<i2", {kInfIndex}}}),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Total number of bytes per outer array element is too large"));
}
TEST(ChooseBaseDTypeTest, RoundTrip) {
constexpr tensorstore::DataType kSupportedDataTypes[] = {
dtype_v<bool>,
dtype_v<uint8_t>,
dtype_v<uint16_t>,
dtype_v<uint32_t>,
dtype_v<uint64_t>,
dtype_v<int8_t>,
dtype_v<int16_t>,
dtype_v<int32_t>,
dtype_v<int64_t>,
dtype_v<::tensorstore::dtypes::float8_e4m3fn_t>,
dtype_v<::tensorstore::dtypes::float8_e4m3fnuz_t>,
dtype_v<::tensorstore::dtypes::float8_e4m3b11fnuz_t>,
dtype_v<::tensorstore::dtypes::float8_e5m2_t>,
dtype_v<::tensorstore::dtypes::float8_e5m2fnuz_t>,
dtype_v<::tensorstore::dtypes::float16_t>,
dtype_v<::tensorstore::dtypes::bfloat16_t>,
dtype_v<::tensorstore::dtypes::float32_t>,
dtype_v<::tensorstore::dtypes::float64_t>,
dtype_v<::tensorstore::dtypes::complex64_t>,
dtype_v<::tensorstore::dtypes::complex128_t>,
};
for (auto dtype : kSupportedDataTypes) {
SCOPED_TRACE(tensorstore::StrCat("dtype=", dtype));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_zarr_dtype,
ChooseBaseDType(dtype));
EXPECT_EQ(dtype, base_zarr_dtype.dtype);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto parsed, ParseBaseDType(base_zarr_dtype.encoded_dtype));
EXPECT_EQ(dtype, parsed.dtype);
EXPECT_EQ(base_zarr_dtype.endian, parsed.endian);
EXPECT_EQ(base_zarr_dtype.flexible_shape, parsed.flexible_shape);
EXPECT_EQ(base_zarr_dtype.encoded_dtype, parsed.encoded_dtype);
}
}
TEST(ChooseBaseDTypeTest, Invalid) {
struct X {};
EXPECT_THAT(ChooseBaseDType(dtype_v<X>),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Data type not supported: .*"));
EXPECT_THAT(ChooseBaseDType(dtype_v<::tensorstore::dtypes::string_t>),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Data type not supported: string"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr/dtype.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr/dtype_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c0ebcd67-aa31-4bde-8d1d-ac3a89a6019f | cpp | tensorflow/tensorflow | adaptive_shared_batch_scheduler | tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler.h | tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_ADAPTIVE_SHARED_BATCH_SCHEDULER_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_ADAPTIVE_SHARED_BATCH_SCHEDULER_H_
#include <algorithm>
#include <atomic>
#include <functional>
#include <memory>
#include <random>
#include <unordered_map>
#include <vector>
#include "absl/types/optional.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/periodic_function.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/byte_order.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/threadpool_interface.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/connected_traceme.h"
namespace tensorflow {
namespace serving {
namespace internal {
template <typename TaskType>
class ASBSBatch;
template <typename TaskType>
class ASBSQueue;
}
template <typename TaskType>
class AdaptiveSharedBatchScheduler
: public std::enable_shared_from_this<
AdaptiveSharedBatchScheduler<TaskType>> {
public:
~AdaptiveSharedBatchScheduler() {
if (owned_batch_thread_pool_) {
delete batch_thread_pool_;
}
}
struct Options {
string thread_pool_name = {"batch_threads"};
int64_t num_batch_threads = port::MaxParallelism();
thread::ThreadPool* thread_pool = nullptr;
int64_t min_in_flight_batches_limit = 1;
int64_t full_batch_scheduling_boost_micros = 0;
Env* env = Env::Default();
double initial_in_flight_batches_limit = 3;
int64_t batches_to_average_over = 1000;
bool fifo_scheduling = false;
};
static Status Create(
const Options& options,
std::shared_ptr<AdaptiveSharedBatchScheduler<TaskType>>* scheduler);
struct QueueOptions {
int max_batch_size = 1000;
absl::optional<int> max_input_task_size = absl::nullopt;
absl::optional<int> max_tasks_per_batch = absl::nullopt;
int max_enqueued_batches = 10;
int64_t batch_timeout_micros = 0;
std::function<Status(std::unique_ptr<TaskType>* input_task, int first_size,
int max_batch_size,
std::vector<std::unique_ptr<TaskType>>* output_tasks)>
split_input_task_func;
bool disable_padding = false;
};
using BatchProcessor = std::function<void(std::unique_ptr<Batch<TaskType>>)>;
Status AddQueue(const QueueOptions& options,
BatchProcessor process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue);
double in_flight_batches_limit() {
mutex_lock l(mu_);
return in_flight_batches_limit_;
}
private:
friend class internal::ASBSQueue<TaskType>;
explicit AdaptiveSharedBatchScheduler(const Options& options);
void CallbackWrapper(const internal::ASBSBatch<TaskType>* batch,
BatchProcessor callback, bool is_express);
void MaybeScheduleNextBatch() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void MaybeScheduleNextBatchFIFO() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void MaybeScheduleClosedBatches();
void MaybeScheduleClosedBatchesLocked() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void MaybeScheduleClosedBatchesLockedFIFO() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void MaybeAdjustInflightLimit() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void AddBatch(const internal::ASBSBatch<TaskType>* batch);
void RemoveQueue(const internal::ASBSQueue<TaskType>* queue);
Env* GetEnv() const { return options_.env; }
const Options options_;
std::vector<const internal::ASBSBatch<TaskType>*> batches_ TF_GUARDED_BY(mu_);
std::deque<const internal::ASBSBatch<TaskType>*> fifo_batches_
TF_GUARDED_BY(mu_);
std::unordered_map<const internal::ASBSQueue<TaskType>*, BatchProcessor>
queues_and_callbacks_ TF_GUARDED_BY(mu_);
mutex mu_;
thread::ThreadPool* batch_thread_pool_;
bool owned_batch_thread_pool_ = false;
double in_flight_batches_limit_ TF_GUARDED_BY(mu_);
int64_t in_flight_batches_ TF_GUARDED_BY(mu_) = 0;
int64_t in_flight_express_batches_ TF_GUARDED_BY(mu_) = 0;
std::default_random_engine rand_engine_;
std::uniform_real_distribution<double> rand_double_;
int64_t batch_count_ TF_GUARDED_BY(mu_) = 0;
struct DelayStats {
int64_t batch_latency_sum = 0;
double last_avg_latency_ms = 0;
bool last_latency_decreased = false;
int step_direction = 1;
};
DelayStats batch_delay_stats_ TF_GUARDED_BY(mu_);
constexpr static double kMaxStepSizeMultiplier = 0.125;
constexpr static double kMinStepSizeMultiplier = 0.0078125;
double step_size_multiplier_ TF_GUARDED_BY(mu_) = kMaxStepSizeMultiplier;
AdaptiveSharedBatchScheduler(const AdaptiveSharedBatchScheduler&) = delete;
void operator=(const AdaptiveSharedBatchScheduler&) = delete;
};
namespace internal {
template <typename TaskType>
class ASBSQueue : public BatchScheduler<TaskType> {
public:
using QueueOptions =
typename AdaptiveSharedBatchScheduler<TaskType>::QueueOptions;
ASBSQueue(std::shared_ptr<AdaptiveSharedBatchScheduler<TaskType>> scheduler,
const QueueOptions& options);
~ASBSQueue() override;
Status Schedule(std::unique_ptr<TaskType>* task) override;
size_t NumEnqueuedTasks() const override;
size_t SchedulingCapacity() const override;
void ReleaseBatch(const ASBSBatch<TaskType>* batch);
size_t max_task_size() const override { return options_.max_batch_size; }
private:
size_t SchedulingCapacityLocked() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
static uint64 NewTraceMeContextIdForBatch();
std::shared_ptr<AdaptiveSharedBatchScheduler<TaskType>> scheduler_;
const QueueOptions options_;
ASBSBatch<TaskType>* current_batch_ TF_GUARDED_BY(mu_) = nullptr;
int64_t num_enqueued_batches_ TF_GUARDED_BY(mu_) = 0;
int64_t num_enqueued_tasks_ TF_GUARDED_BY(mu_) = 0;
mutable mutex mu_;
ASBSQueue(const ASBSQueue&) = delete;
void operator=(const ASBSQueue&) = delete;
};
template <typename TaskType>
class ASBSBatch : public Batch<TaskType> {
public:
ASBSBatch(ASBSQueue<TaskType>* queue, int64_t creation_time_micros,
int64_t batch_timeout_micros, uint64 traceme_context_id)
: queue_(queue),
creation_time_micros_(creation_time_micros),
schedulable_time_micros_(creation_time_micros + batch_timeout_micros),
traceme_context_id_(traceme_context_id) {}
~ASBSBatch() override {}
ASBSQueue<TaskType>* queue() const { return queue_; }
int64_t creation_time_micros() const { return creation_time_micros_; }
int64_t schedulable_time_micros() const { return schedulable_time_micros_; }
uint64 traceme_context_id() const { return traceme_context_id_; }
private:
ASBSQueue<TaskType>* queue_;
const int64_t creation_time_micros_;
const int64_t schedulable_time_micros_;
const uint64 traceme_context_id_;
ASBSBatch(const ASBSBatch&) = delete;
void operator=(const ASBSBatch&) = delete;
};
}
template <typename TaskType>
constexpr double AdaptiveSharedBatchScheduler<TaskType>::kMaxStepSizeMultiplier;
template <typename TaskType>
constexpr double AdaptiveSharedBatchScheduler<TaskType>::kMinStepSizeMultiplier;
template <typename TaskType>
Status AdaptiveSharedBatchScheduler<TaskType>::Create(
const Options& options,
std::shared_ptr<AdaptiveSharedBatchScheduler<TaskType>>* scheduler) {
if (options.num_batch_threads < 1) {
return errors::InvalidArgument("num_batch_threads must be positive; was ",
options.num_batch_threads);
}
if (options.min_in_flight_batches_limit < 1) {
return errors::InvalidArgument(
"min_in_flight_batches_limit must be >= 1; was ",
options.min_in_flight_batches_limit);
}
if (options.min_in_flight_batches_limit > options.num_batch_threads) {
return errors::InvalidArgument(
"min_in_flight_batches_limit (", options.min_in_flight_batches_limit,
") must be <= num_batch_threads (", options.num_batch_threads, ")");
}
if (options.full_batch_scheduling_boost_micros < 0) {
return errors::InvalidArgument(
"full_batch_scheduling_boost_micros can't be negative; was ",
options.full_batch_scheduling_boost_micros);
}
if (options.initial_in_flight_batches_limit > options.num_batch_threads) {
return errors::InvalidArgument(
"initial_in_flight_batches_limit (",
options.initial_in_flight_batches_limit,
") should not be larger than num_batch_threads (",
options.num_batch_threads, ")");
}
if (options.initial_in_flight_batches_limit <
options.min_in_flight_batches_limit) {
return errors::InvalidArgument("initial_in_flight_batches_limit (",
options.initial_in_flight_batches_limit,
"must be >= min_in_flight_batches_limit (",
options.min_in_flight_batches_limit, ")");
}
if (options.batches_to_average_over < 1) {
return errors::InvalidArgument(
"batches_to_average_over should be "
"greater than or equal to 1; was ",
options.batches_to_average_over);
}
scheduler->reset(new AdaptiveSharedBatchScheduler<TaskType>(options));
return absl::OkStatus();
}
template <typename TaskType>
AdaptiveSharedBatchScheduler<TaskType>::AdaptiveSharedBatchScheduler(
const Options& options)
: options_(options),
in_flight_batches_limit_(options.initial_in_flight_batches_limit),
rand_double_(0.0, 1.0) {
std::random_device device;
rand_engine_.seed(device());
if (options.thread_pool == nullptr) {
owned_batch_thread_pool_ = true;
batch_thread_pool_ = new thread::ThreadPool(
GetEnv(), options.thread_pool_name, options.num_batch_threads);
} else {
owned_batch_thread_pool_ = false;
batch_thread_pool_ = options.thread_pool;
}
}
template <typename TaskType>
Status AdaptiveSharedBatchScheduler<TaskType>::AddQueue(
const QueueOptions& options, BatchProcessor process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue) {
if (options.max_batch_size <= 0) {
return errors::InvalidArgument("max_batch_size must be positive; was ",
options.max_batch_size);
}
if (options.max_enqueued_batches <= 0) {
return errors::InvalidArgument(
"max_enqueued_batches must be positive; was ",
options.max_enqueued_batches);
}
if (options.max_input_task_size.has_value()) {
if (options.max_input_task_size.value() < options.max_batch_size) {
return errors::InvalidArgument(
"max_input_task_size must be larger than or equal to max_batch_size;"
"got max_input_task_size as ",
options.max_input_task_size.value(), " and max_batch_size as ",
options.max_batch_size);
}
}
internal::ASBSQueue<TaskType>* asbs_queue_raw;
queue->reset(asbs_queue_raw = new internal::ASBSQueue<TaskType>(
this->shared_from_this(), options));
mutex_lock l(mu_);
queues_and_callbacks_[asbs_queue_raw] = process_batch_callback;
return absl::OkStatus();
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::AddBatch(
const internal::ASBSBatch<TaskType>* batch) {
mutex_lock l(mu_);
if (options_.fifo_scheduling) {
fifo_batches_.push_back(batch);
} else {
batches_.push_back(batch);
}
int64_t delay_micros =
batch->schedulable_time_micros() - GetEnv()->NowMicros();
if (delay_micros <= 0) {
MaybeScheduleNextBatch();
return;
}
GetEnv()->SchedClosureAfter(
delay_micros, [this, lifetime_preserver = this->shared_from_this()] {
mutex_lock l(mu_);
MaybeScheduleNextBatch();
});
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::RemoveQueue(
const internal::ASBSQueue<TaskType>* queue) {
mutex_lock l(mu_);
queues_and_callbacks_.erase(queue);
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::MaybeScheduleNextBatchFIFO() {
const internal::ASBSBatch<TaskType>* batch = *fifo_batches_.begin();
if (batch->schedulable_time_micros() > GetEnv()->NowMicros()) {
return;
}
fifo_batches_.pop_front();
batch->queue()->ReleaseBatch(batch);
batch_thread_pool_->Schedule(std::bind(
&AdaptiveSharedBatchScheduler<TaskType>::CallbackWrapper, this, batch,
queues_and_callbacks_[batch->queue()], false ));
in_flight_batches_++;
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<
TaskType>::MaybeScheduleClosedBatchesLockedFIFO() {
int available_threads =
static_cast<int>(options_.num_batch_threads - in_flight_batches_ -
in_flight_express_batches_);
for (auto it = fifo_batches_.begin();
it != fifo_batches_.end() && available_threads > 0;
it = fifo_batches_.begin()) {
if ((*it)->IsClosed()) {
const internal::ASBSBatch<TaskType>* batch = *it;
fifo_batches_.pop_front();
batch->queue()->ReleaseBatch(batch);
batch_thread_pool_->Schedule(
std::bind(&AdaptiveSharedBatchScheduler<TaskType>::CallbackWrapper,
this, batch, queues_and_callbacks_[batch->queue()], true));
in_flight_express_batches_++;
available_threads--;
} else {
break;
}
}
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::MaybeScheduleNextBatch() {
bool batch_empty =
options_.fifo_scheduling ? fifo_batches_.empty() : batches_.empty();
if (batch_empty || in_flight_batches_ >= in_flight_batches_limit_) return;
if (in_flight_batches_limit_ - in_flight_batches_ < 1 &&
rand_double_(rand_engine_) >
in_flight_batches_limit_ - in_flight_batches_) {
return;
}
if (options_.fifo_scheduling) {
MaybeScheduleNextBatchFIFO();
return;
}
auto best_it = batches_.end();
double best_score = (std::numeric_limits<double>::max)();
int64_t now_micros = GetEnv()->NowMicros();
for (auto it = batches_.begin(); it != batches_.end(); it++) {
if ((*it)->schedulable_time_micros() > now_micros) continue;
const double score =
(*it)->creation_time_micros() -
options_.full_batch_scheduling_boost_micros * (*it)->size() /
static_cast<double>((*it)->queue()->max_task_size());
if (best_it == batches_.end() || score < best_score) {
best_score = score;
best_it = it;
}
}
if (best_it == batches_.end()) return;
const internal::ASBSBatch<TaskType>* batch = *best_it;
batches_.erase(best_it);
batch->queue()->ReleaseBatch(batch);
batch_thread_pool_->Schedule(
std::bind(&AdaptiveSharedBatchScheduler<TaskType>::CallbackWrapper, this,
batch, queues_and_callbacks_[batch->queue()], false));
in_flight_batches_++;
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::MaybeScheduleClosedBatches() {
mutex_lock l(mu_);
MaybeScheduleClosedBatchesLocked();
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<
TaskType>::MaybeScheduleClosedBatchesLocked() {
if (options_.fifo_scheduling) {
MaybeScheduleClosedBatchesLockedFIFO();
return;
}
int available_threads =
static_cast<int>(options_.num_batch_threads - in_flight_batches_ -
in_flight_express_batches_);
for (auto it = batches_.begin();
it != batches_.end() && available_threads > 0;) {
if ((*it)->IsClosed()) {
const internal::ASBSBatch<TaskType>* batch = *it;
it = batches_.erase(it);
batch->queue()->ReleaseBatch(batch);
batch_thread_pool_->Schedule(
std::bind(&AdaptiveSharedBatchScheduler<TaskType>::CallbackWrapper,
this, batch, queues_and_callbacks_[batch->queue()], true));
in_flight_express_batches_++;
available_threads--;
} else {
++it;
}
}
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::CallbackWrapper(
const internal::ASBSBatch<TaskType>* batch,
AdaptiveSharedBatchScheduler<TaskType>::BatchProcessor callback,
bool is_express) {
tsl::profiler::TraceMeConsumer trace_me(
[&] {
return profiler::TraceMeEncode(
"ProcessBatch", {{"batch_size_before_padding", batch->size()},
{"_r", 2} });
},
tsl::profiler::ContextType::kAdaptiveSharedBatchScheduler,
batch->traceme_context_id());
const int64_t start_time = batch->creation_time_micros();
callback(std::unique_ptr<Batch<TaskType>>(
const_cast<internal::ASBSBatch<TaskType>*>(batch)));
int64_t end_time = GetEnv()->NowMicros();
mutex_lock l(mu_);
if (is_express) {
in_flight_express_batches_--;
MaybeScheduleClosedBatchesLocked();
return;
}
in_flight_batches_--;
batch_count_++;
batch_delay_stats_.batch_latency_sum += end_time - start_time;
MaybeAdjustInflightLimit();
MaybeScheduleNextBatch();
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::MaybeAdjustInflightLimit() {
if (batch_count_ == options_.batches_to_average_over) {
double current_avg_latency_ms =
(batch_delay_stats_.batch_latency_sum / 1000.) / batch_count_;
bool current_latency_decreased =
current_avg_latency_ms < batch_delay_stats_.last_avg_latency_ms;
if (current_latency_decreased) {
step_size_multiplier_ *=
(batch_delay_stats_.last_latency_decreased ? 2 : 0.5);
step_size_multiplier_ =
std::min(step_size_multiplier_, kMaxStepSizeMultiplier);
step_size_multiplier_ =
std::max(step_size_multiplier_, kMinStepSizeMultiplier);
} else {
batch_delay_stats_.step_direction = -batch_delay_stats_.step_direction;
}
in_flight_batches_limit_ += batch_delay_stats_.step_direction *
in_flight_batches_limit_ *
step_size_multiplier_;
in_flight_batches_limit_ =
std::min(in_flight_batches_limit_,
static_cast<double>(options_.num_batch_threads));
in_flight_batches_limit_ =
std::max(in_flight_batches_limit_,
static_cast<double>(options_.min_in_flight_batches_limit));
batch_delay_stats_.last_avg_latency_ms = current_avg_latency_ms;
batch_delay_stats_.last_latency_decreased = current_latency_decreased;
batch_count_ = 0;
batch_delay_stats_.batch_latency_sum = 0;
}
}
namespace internal {
template <typename TaskType>
ASBSQueue<TaskType>::ASBSQueue(
std::shared_ptr<AdaptiveSharedBatchScheduler<TaskType>> scheduler,
const QueueOptions& options)
: scheduler_(scheduler), options_(options) {}
template <typename TaskType>
ASBSQueue<TaskType>::~ASBSQueue() {
const int kSleepMicros = 1000;
for (;;) {
{
mutex_lock l(mu_);
if (num_enqueued_batches_ == 0) {
break;
}
}
scheduler_->GetEnv()->SleepForMicroseconds(kSleepMicros);
}
scheduler_->RemoveQueue(this);
}
template <typename TaskType>
Status ASBSQueue<TaskType>::Schedule(std::unique_ptr<TaskType>* task) {
size_t size = (*task)->size();
if (options_.split_input_task_func == nullptr &&
size > options_.max_batch_size) {
return errors::InvalidArgument("Task size ", size,
" is larger than maximum batch size ",
options_.max_batch_size);
}
if (options_.max_input_task_size.has_value() &&
(size > options_.max_input_task_size.value())) {
return errors::InvalidArgument("Task size ", size,
" is larger than max input task size ",
options_.max_input_task_size.value());
}
std::vector<std::unique_ptr<TaskType>> tasks_to_schedule;
std::vector<ASBSBatch<TaskType>*> new_batches;
bool closed_batch = false;
{
mutex_lock l(mu_);
if (size > SchedulingCapacityLocked()) {
return errors::Unavailable("The batch scheduling queue is full");
}
int remaining_batch_size =
current_batch_ == nullptr
? options_.max_batch_size
: options_.max_batch_size - current_batch_->size();
if (options_.split_input_task_func == nullptr ||
size <= remaining_batch_size) {
tasks_to_schedule.push_back(std::move(*task));
} else {
TF_RETURN_IF_ERROR(options_.split_input_task_func(
task, remaining_batch_size, options_.max_batch_size,
&tasks_to_schedule));
}
for (auto& task : tasks_to_schedule) {
if (current_batch_ &&
current_batch_->size() + task->size() > options_.max_batch_size) {
current_batch_->Close();
closed_batch = true;
current_batch_ = nullptr;
}
if (!current_batch_) {
num_enqueued_batches_++;
current_batch_ = new ASBSBatch<TaskType>(
this, scheduler_->GetEnv()->NowMicros(),
options_.batch_timeout_micros, NewTraceMeContextIdForBatch());
new_batches.push_back(current_batch_);
}
tsl::profiler::TraceMeProducer trace_me(
[task_size = task->size()] {
return profiler::TraceMeEncode(
"ASBSQueue::Schedule",
{{"batching_input_task_size", task_size}});
},
tsl::profiler::ContextType::kAdaptiveSharedBatchScheduler,
this->current_batch_->traceme_context_id());
current_batch_->AddTask(std::move(task));
num_enqueued_tasks_++;
bool reached_max_tasks =
(options_.max_tasks_per_batch.has_value() &&
current_batch_->num_tasks() >= options_.max_tasks_per_batch.value());
if (current_batch_->size() == options_.max_batch_size ||
reached_max_tasks) {
current_batch_->Close();
closed_batch = true;
current_batch_ = nullptr;
}
}
}
for (auto* batch : new_batches) {
scheduler_->AddBatch(batch);
}
if (closed_batch) {
scheduler_->MaybeScheduleClosedBatches();
}
return absl::OkStatus();
}
template <typename TaskType>
void ASBSQueue<TaskType>::ReleaseBatch(const ASBSBatch<TaskType>* batch) {
mutex_lock l(mu_);
num_enqueued_batches_--;
num_enqueued_tasks_ -= batch->num_tasks();
if (batch == current_batch_) {
current_batch_->Close();
current_batch_ = nullptr;
}
}
template <typename TaskType>
size_t ASBSQueue<TaskType>::NumEnqueuedTasks() const {
mutex_lock l(mu_);
return num_enqueued_tasks_;
}
template <typename TaskType>
size_t ASBSQueue<TaskType>::SchedulingCapacity() const {
mutex_lock l(mu_);
return SchedulingCapacityLocked();
}
template <typename TaskType>
size_t ASBSQueue<TaskType>::SchedulingCapacityLocked() const {
const int current_batch_capacity =
current_batch_ ? options_.max_batch_size - current_batch_->size() : 0;
const int spare_batches =
options_.max_enqueued_batches - num_enqueued_batches_;
return spare_batches * options_.max_batch_size + current_batch_capacity;
}
template <typename TaskType>
uint64 ASBSQueue<TaskType>::NewTraceMeContextIdForBatch() {
static std::atomic<uint64> traceme_context_id(0);
return traceme_context_id.fetch_add(1, std::memory_order_relaxed);
}
}
}
}
#endif | #include "tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/fake_clock_env.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace serving {
namespace anonymous {
class FakeTask : public BatchTask {
public:
explicit FakeTask(size_t size) : size_(size) {}
~FakeTask() override = default;
size_t size() const override { return size_; }
void set_size(size_t size) { size_ = size; }
private:
size_t size_;
FakeTask(const FakeTask&) = delete;
void operator=(const FakeTask&) = delete;
};
Status ScheduleTask(size_t task_size, BatchScheduler<FakeTask>* scheduler) {
std::unique_ptr<FakeTask> task(new FakeTask(task_size));
Status status = scheduler->Schedule(&task);
CHECK_EQ(status.ok(), task == nullptr);
return status;
}
std::unique_ptr<Thread> CreateFakeClockAdvancerThread(
test_util::FakeClockEnv* env, Notification* start, Notification* stop) {
return std::unique_ptr<Thread>(Env::Default()->StartThread(
{}, "FakeClockAdvancerThread", [env, start, stop] {
start->WaitForNotification();
while (!stop->HasBeenNotified()) {
env->AdvanceByMicroseconds(10);
Env::Default()->SleepForMicroseconds(10);
}
}));
}
TEST(AdaptiveSharedBatchSchedulerTest, BadOptions) {
using Scheduler = AdaptiveSharedBatchScheduler<FakeTask>;
std::shared_ptr<Scheduler> scheduler;
Scheduler::Options options;
options.num_batch_threads = 0;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.initial_in_flight_batches_limit = 0.5;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.num_batch_threads = 5;
options.initial_in_flight_batches_limit = 8;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.batches_to_average_over = -5;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.min_in_flight_batches_limit = 0;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.min_in_flight_batches_limit = 5;
options.num_batch_threads = 3;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.initial_in_flight_batches_limit = 1;
options.min_in_flight_batches_limit = 2;
options.num_batch_threads = 3;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
}
TEST(AdaptiveSharedBatchSchedulerTest, InFlightBatchesLimit) {
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.initial_in_flight_batches_limit = 2;
options.batches_to_average_over = 1000;
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
EXPECT_GT(batch->num_tasks(), 0);
mu.lock();
int batch_num = ++processed_batches;
mu.unlock();
if (batch_num == 2) {
Env::Default()->SleepForMicroseconds(1000);
finish_processing.Notify();
}
if (batch_num == 3) {
ASSERT_TRUE(finish_processing.HasBeenNotified());
}
finish_processing.WaitForNotification();
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
while (queue->NumEnqueuedTasks() > 0) {
}
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
while (queue->NumEnqueuedTasks() > 0) {
}
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
}
TEST(AdaptiveSharedBatchSchedulerTest, InFlightBatchesLimitTuning) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.env = &env;
options.initial_in_flight_batches_limit = 2;
options.batches_to_average_over = 1;
auto queue_callback = [&env](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
switch (batch->size()) {
case 0:
env.AdvanceByMicroseconds(10);
break;
case 1:
env.AdvanceByMicroseconds(15);
break;
case 2:
env.AdvanceByMicroseconds(10);
break;
case 3:
env.AdvanceByMicroseconds(11);
break;
}
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(0, queue.get()));
double in_flight_batches_limit = 2;
while (scheduler->in_flight_batches_limit() == in_flight_batches_limit) {
}
EXPECT_LT(scheduler->in_flight_batches_limit(), in_flight_batches_limit);
in_flight_batches_limit = scheduler->in_flight_batches_limit();
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
while (scheduler->in_flight_batches_limit() == in_flight_batches_limit) {
}
EXPECT_GT(scheduler->in_flight_batches_limit(), in_flight_batches_limit);
in_flight_batches_limit = scheduler->in_flight_batches_limit();
TF_ASSERT_OK(ScheduleTask(2, queue.get()));
while (scheduler->in_flight_batches_limit() == in_flight_batches_limit) {
}
EXPECT_GT(scheduler->in_flight_batches_limit(), in_flight_batches_limit);
in_flight_batches_limit = scheduler->in_flight_batches_limit();
TF_ASSERT_OK(ScheduleTask(3, queue.get()));
while (scheduler->in_flight_batches_limit() == in_flight_batches_limit) {
}
EXPECT_LT(scheduler->in_flight_batches_limit(), in_flight_batches_limit);
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST(AdaptiveSharedBatchSchedulerTest, FullBatchSchedulingBoostMicros) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.env = &env;
options.initial_in_flight_batches_limit = 1;
options.num_batch_threads = 1;
options.batches_to_average_over = 1000;
options.full_batch_scheduling_boost_micros = 100;
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
finish_processing.WaitForNotification();
mutex_lock l(mu);
processed_batches++;
switch (processed_batches) {
case 1:
EXPECT_EQ(100, batch->size());
break;
case 2:
EXPECT_EQ(50, batch->size());
break;
case 3:
EXPECT_EQ(900, batch->size());
break;
case 4:
EXPECT_EQ(200, batch->size());
break;
default:
EXPECT_TRUE(false) << "Should only have 4 batches";
}
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
AdaptiveSharedBatchScheduler<FakeTask>::QueueOptions queue_options;
std::unique_ptr<BatchScheduler<FakeTask>> queue1;
std::unique_ptr<BatchScheduler<FakeTask>> queue2;
queue_options.max_batch_size = 1000;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue1));
queue_options.max_batch_size = 100;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue2));
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
while (queue1->NumEnqueuedTasks() > 0) {
}
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
env.AdvanceByMicroseconds(10);
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
env.AdvanceByMicroseconds(10);
TF_ASSERT_OK(ScheduleTask(50, queue2.get()));
env.AdvanceByMicroseconds(45);
TF_ASSERT_OK(ScheduleTask(900, queue1.get()));
finish_processing.Notify();
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST(AdaptiveSharedBatchSchedulerTest, FIFO) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.env = &env;
options.initial_in_flight_batches_limit = 1;
options.num_batch_threads = 1;
options.batches_to_average_over = 1000;
options.full_batch_scheduling_boost_micros = 0;
options.fifo_scheduling = true;
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
finish_processing.WaitForNotification();
mutex_lock l(mu);
processed_batches++;
switch (processed_batches) {
case 1:
EXPECT_EQ(100, batch->size());
break;
case 2:
EXPECT_EQ(200, batch->size());
break;
case 3:
EXPECT_EQ(50, batch->size());
break;
case 4:
EXPECT_EQ(900, batch->size());
break;
default:
EXPECT_TRUE(false) << "Should only have 4 batches";
}
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
AdaptiveSharedBatchScheduler<FakeTask>::QueueOptions queue_options;
std::unique_ptr<BatchScheduler<FakeTask>> queue1;
std::unique_ptr<BatchScheduler<FakeTask>> queue2;
queue_options.max_batch_size = 1000;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue1));
queue_options.max_batch_size = 100;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue2));
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
env.AdvanceByMicroseconds(30);
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
env.AdvanceByMicroseconds(10);
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
env.AdvanceByMicroseconds(10);
TF_ASSERT_OK(ScheduleTask(50, queue2.get()));
env.AdvanceByMicroseconds(45);
TF_ASSERT_OK(ScheduleTask(900, queue1.get()));
finish_processing.Notify();
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST(AdaptiveSharedBatchSchedulerTest, DeleteQueue) {
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.initial_in_flight_batches_limit = 1;
options.num_batch_threads = 1;
options.batches_to_average_over = 1000;
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
EXPECT_GT(batch->num_tasks(), 0);
finish_processing.WaitForNotification();
mu.lock();
processed_batches++;
mu.unlock();
};
auto processed_checker = gtl::MakeCleanup([&mu, &processed_batches] {
mutex_lock l(mu);
EXPECT_EQ(processed_batches, 2);
});
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
while (queue->NumEnqueuedTasks() > 0) {
}
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
Env::Default()->SchedClosureAfter(
1000, [&finish_processing] { finish_processing.Notify(); });
}
TEST(AdaptiveSharedBatchSchedulerTest, QueueCapacityInfo) {
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.initial_in_flight_batches_limit = 1;
options.batches_to_average_over = 1000;
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
EXPECT_GT(batch->num_tasks(), 0);
mu.lock();
int batch_num = ++processed_batches;
mu.unlock();
if (batch_num == 1) {
finish_processing.WaitForNotification();
}
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
while (queue->NumEnqueuedTasks() > 0) {
}
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
EXPECT_EQ(queue->NumEnqueuedTasks(), 1);
EXPECT_EQ(queue->SchedulingCapacity(), 9 * 1000 + 900);
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
TF_ASSERT_OK(ScheduleTask(200, queue.get()));
EXPECT_EQ(queue->NumEnqueuedTasks(), 3);
EXPECT_EQ(queue->SchedulingCapacity(), 9 * 1000 + 600);
TF_ASSERT_OK(ScheduleTask(700, queue.get()));
EXPECT_EQ(queue->NumEnqueuedTasks(), 1);
EXPECT_EQ(queue->SchedulingCapacity(), 9 * 1000 + 300);
finish_processing.Notify();
}
TEST(AdaptiveSharedBatchSchedulerTest, FullBatches) {
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(AdaptiveSharedBatchScheduler<FakeTask>::Create({}, &scheduler));
auto queue_callback = [](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
};
AdaptiveSharedBatchScheduler<FakeTask>::QueueOptions queue_options;
queue_options.max_batch_size = 100;
queue_options.batch_timeout_micros = 1000000000000;
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
}
TEST(AdaptiveSharedBatchSchedulerTest, TruncateBatches) {
mutex mu;
int processed_batches = 0;
auto queue_callback =
[&mu, &processed_batches](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
mutex_lock l(mu);
++processed_batches;
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(AdaptiveSharedBatchScheduler<FakeTask>::Create({}, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
AdaptiveSharedBatchScheduler<FakeTask>::QueueOptions queue_options;
queue_options.max_batch_size = 100;
queue_options.batch_timeout_micros = 1000000;
queue_options.split_input_task_func =
[](std::unique_ptr<FakeTask>* input_task, int first_size, int max_size,
std::vector<std::unique_ptr<FakeTask>>* output_tasks) {
EXPECT_EQ(first_size, 70);
output_tasks->push_back(std::move(*input_task));
int remaining_size = output_tasks->back()->size() - first_size;
output_tasks->back()->set_size(first_size);
while (remaining_size > 0) {
int task_size = std::min(remaining_size, max_size);
output_tasks->emplace_back(new FakeTask(task_size));
remaining_size -= task_size;
}
return absl::OkStatus();
};
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(30, queue.get()));
TF_ASSERT_OK(ScheduleTask(350, queue.get()));
while (true) {
mutex_lock l(mu);
if (processed_batches == 4) break;
}
}
TEST(AdaptiveSharedBatchSchedulerTest, MaxTasksPerBatch) {
mutex mu;
int processed_batches = 0;
auto queue_callback =
[&mu, &processed_batches](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
mutex_lock l(mu);
++processed_batches;
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(AdaptiveSharedBatchScheduler<FakeTask>::Create({}, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
AdaptiveSharedBatchScheduler<FakeTask>::QueueOptions queue_options;
queue_options.max_batch_size = 100;
queue_options.batch_timeout_micros = 1000000;
queue_options.max_tasks_per_batch = 2;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
EXPECT_EQ(queue->NumEnqueuedTasks(), 1);
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
EXPECT_EQ(queue->NumEnqueuedTasks(), 0);
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
while (true) {
mutex_lock l(mu);
if (processed_batches == 3) break;
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a69130c5-914d-471f-85d3-3c9c8caa6645 | cpp | google/tensorstore | open_mode | tensorstore/open_mode.cc | tensorstore/open_mode_test.cc | #include "tensorstore/open_mode.h"
#include <ostream>
#include "absl/status/status.h"
namespace tensorstore {
std::string_view to_string(ReadWriteMode mode) {
switch (mode) {
case ReadWriteMode::dynamic:
return "dynamic";
case ReadWriteMode::read:
return "read";
case ReadWriteMode::write:
return "write";
case ReadWriteMode::read_write:
return "read_write";
default:
return "<unknown>";
}
}
std::ostream& operator<<(std::ostream& os, ReadWriteMode mode) {
return os << to_string(mode);
}
std::ostream& operator<<(std::ostream& os, OpenMode mode) {
const char* sep = "";
constexpr const char* kSep = "|";
if (!!(mode & OpenMode::open)) {
os << "open";
sep = kSep;
}
if (!!(mode & OpenMode::create)) {
os << sep << "create";
sep = kSep;
}
if (!!(mode & OpenMode::delete_existing)) {
os << sep << "delete_existing";
sep = kSep;
}
if (!!(mode & OpenMode::assume_metadata)) {
os << sep << "assume_metadata";
sep = kSep;
}
return os;
}
namespace internal {
absl::Status ValidateSupportsRead(ReadWriteMode mode) {
return !(mode & ReadWriteMode::read)
? absl::InvalidArgumentError("Source does not support reading.")
: absl::Status();
}
absl::Status ValidateSupportsWrite(ReadWriteMode mode) {
return !(mode & ReadWriteMode::write)
? absl::InvalidArgumentError(
"Destination does not support writing.")
: absl::Status();
}
absl::Status ValidateSupportsModes(ReadWriteMode mode,
ReadWriteMode required_modes) {
if ((mode & required_modes) != required_modes) {
if (!!(required_modes & ReadWriteMode::read) &&
!(mode & ReadWriteMode::read)) {
return absl::InvalidArgumentError("Read mode not supported");
}
if (!!(required_modes & ReadWriteMode::write) &&
!(mode & ReadWriteMode::write)) {
return absl::InvalidArgumentError("Write mode not supported");
}
}
return absl::OkStatus();
}
}
} | #include "tensorstore/open_mode.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::OpenMode;
using ::tensorstore::ReadWriteMode;
using ::tensorstore::StrCat;
static_assert(ReadWriteMode::read_write ==
(ReadWriteMode::read | ReadWriteMode::write));
static_assert((ReadWriteMode::read_write & ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(!ReadWriteMode::dynamic);
static_assert(tensorstore::internal::StaticReadWriteMask(ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(tensorstore::internal::StaticReadWriteMask(
ReadWriteMode::write) == ReadWriteMode::write);
static_assert(tensorstore::internal::StaticReadWriteMask(
ReadWriteMode::dynamic) == ReadWriteMode::read_write);
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::read,
ReadWriteMode::dynamic));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::read,
ReadWriteMode::read));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::write,
ReadWriteMode::dynamic));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::write,
ReadWriteMode::write));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::read_write,
ReadWriteMode::dynamic));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::read_write,
ReadWriteMode::read_write));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::dynamic,
ReadWriteMode::dynamic));
static_assert(!tensorstore::internal::IsModePossible(
ReadWriteMode::read, ReadWriteMode::read_write));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::read,
ReadWriteMode::write));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::write,
ReadWriteMode::read));
static_assert(!tensorstore::internal::IsModePossible(
ReadWriteMode::write, ReadWriteMode::read_write));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::read_write,
ReadWriteMode::read));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::read_write,
ReadWriteMode::write));
TEST(ReadWriteModeTest, PrintToOstream) {
EXPECT_EQ("dynamic", StrCat(ReadWriteMode::dynamic));
EXPECT_EQ("read", StrCat(ReadWriteMode::read));
EXPECT_EQ("write", StrCat(ReadWriteMode::write));
EXPECT_EQ("read_write", StrCat(ReadWriteMode::read_write));
EXPECT_EQ("<unknown>", StrCat(static_cast<ReadWriteMode>(10)));
}
TEST(OpenTest, PrintToOstream) {
EXPECT_EQ("", StrCat(OpenMode{}));
EXPECT_EQ("open", StrCat(OpenMode::open));
EXPECT_EQ("create", StrCat(OpenMode::create));
EXPECT_EQ("open|create", StrCat(OpenMode::open | OpenMode::create));
EXPECT_EQ("open|assume_metadata",
StrCat(OpenMode::open | OpenMode::assume_metadata));
EXPECT_EQ("create|delete_existing",
StrCat(OpenMode::create | OpenMode::delete_existing));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/open_mode.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/open_mode_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e27221c5-0b3b-45d4-8e65-4e62b0ab5c84 | cpp | tensorflow/tensorflow | fuzzy_matcher | third_party/xla/xla/service/fuzzy_matcher.h | third_party/xla/xla/service/fuzzy_matcher_test.cc | #ifndef XLA_SERVICE_FUZZY_MATCHER_H_
#define XLA_SERVICE_FUZZY_MATCHER_H_
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/pattern_matcher.h"
namespace xla {
namespace fm {
template <typename Pattern>
auto OptConvert(Pattern pattern) {
auto shared = match::SharedSubpattern(pattern);
return match::AnyOf<HloInstruction>(match::Convert(shared), shared);
}
#define XLA_FUZZY_UNOP_PATTERN(NAME) \
template <typename HloInstructionType> \
inline auto NAME(HloInstructionType** matched_inst) { \
return OptConvert(match::Op(matched_inst).WithOpcode(HloOpcode::k##NAME)); \
} \
\
template <typename Arg> \
inline auto NAME(Arg&& arg) { \
return OptConvert(match::Op() \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Arg>(arg))); \
} \
\
template <typename HloInstructionType, typename Arg> \
inline auto NAME(HloInstructionType** matched_inst, Arg&& arg) { \
return OptConvert(match::Op(matched_inst) \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Arg>(arg))); \
}
XLA_FUZZY_UNOP_PATTERN(Tanh)
XLA_FUZZY_UNOP_PATTERN(Exp)
XLA_FUZZY_UNOP_PATTERN(Broadcast)
#undef XLA_FUZZY_UNOP_PATTERN
#define XLA_FUZZY_BINOP_PATTERN(NAME) \
template <typename HloInstructionType, typename Lhs, typename Rhs> \
inline auto NAME(HloInstructionType** matched_inst, Lhs&& lhs, Rhs&& rhs) { \
return OptConvert(match::Op(matched_inst) \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Lhs>(lhs)) \
.WithOperand(1, std::forward<Rhs>(rhs))); \
} \
template <typename Lhs, typename Rhs> \
inline auto NAME(Lhs&& lhs, Rhs&& rhs) { \
return OptConvert(match::Op() \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Lhs>(lhs)) \
.WithOperand(1, std::forward<Rhs>(rhs))); \
}
XLA_FUZZY_BINOP_PATTERN(Dot)
XLA_FUZZY_BINOP_PATTERN(Divide)
XLA_FUZZY_BINOP_PATTERN(Subtract)
XLA_FUZZY_BINOP_PATTERN(Multiply)
XLA_FUZZY_BINOP_PATTERN(Reduce)
#undef XLA_FUZZY_BINOP_PATTERN
#define XLA_FUZZY_TERNOP_PATTERN(NAME) \
template <typename Arg0, typename Arg1, typename Arg2> \
inline auto NAME(Arg0&& arg0, Arg1&& arg1, Arg2&& arg2) { \
return OptConvert(match::Op() \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Arg0>(arg0)) \
.WithOperand(1, std::forward<Arg1>(arg1)) \
.WithOperand(2, std::forward<Arg2>(arg2))); \
} \
\
template <typename HloInstructionType, typename Arg0, typename Arg1, \
typename Arg2> \
inline auto NAME(HloInstructionType** matched_inst, Arg0&& arg0, \
Arg1&& arg1, Arg2&& arg2) { \
return OptConvert(match::Op(matched_inst) \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Arg0>(arg0)) \
.WithOperand(1, std::forward<Arg1>(arg1)) \
.WithOperand(2, std::forward<Arg2>(arg2))); \
}
XLA_FUZZY_TERNOP_PATTERN(Select);
#undef XLA_FUZZY_TERNOP_PATTERN
}
}
#endif | #include "xla/service/fuzzy_matcher.h"
#include <gtest/gtest.h>
#include "xla/service/pattern_matcher.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using FuzzyMatcherTest = HloTestBase;
TEST_F(FuzzyMatcherTest, IgnoreConvert) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
x = f16[8,3] parameter(0)
y = f16[8,3] parameter(1)
div = f16[8,3] divide(x, y)
ROOT convert = f32[8,3] convert(div)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(
Match(root, fm::Divide(match::Parameter(0), match::Parameter(1))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/fuzzy_matcher.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/fuzzy_matcher_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d7178847-5f31-485b-8ced-289d86dd036a | cpp | google/tensorstore | absl_time | tensorstore/serialization/absl_time.cc | tensorstore/internal/json_binding/absl_time_test.cc | #include "tensorstore/serialization/absl_time.h"
#include <cstdint>
#include <limits>
#include "absl/time/time.h"
#include "tensorstore/serialization/serialization.h"
namespace tensorstore {
namespace serialization {
bool Serializer<absl::Duration>::Encode(EncodeSink& sink,
const absl::Duration& value) {
int64_t rep_hi = absl::time_internal::GetRepHi(value);
uint32_t rep_lo = absl::time_internal::GetRepLo(value);
return serialization::EncodeTuple(sink, rep_hi, rep_lo);
}
bool Serializer<absl::Duration>::Decode(DecodeSource& source,
absl::Duration& value) {
int64_t rep_hi;
uint32_t rep_lo;
using absl::time_internal::kTicksPerSecond;
if (!serialization::DecodeTuple(source, rep_hi, rep_lo)) return false;
if (rep_lo >= kTicksPerSecond &&
(rep_lo != std::numeric_limits<uint32_t>::max() ||
(rep_hi != std::numeric_limits<int64_t>::min() &&
rep_hi != std::numeric_limits<int64_t>::max()))) {
source.Fail(serialization::DecodeError("Invalid time representation"));
return false;
}
value = absl::time_internal::MakeDuration(rep_hi, rep_lo);
return true;
}
bool Serializer<absl::Time>::Encode(EncodeSink& sink, const absl::Time& value) {
return serialization::Encode(sink, value - absl::UnixEpoch());
}
bool Serializer<absl::Time>::Decode(DecodeSource& source, absl::Time& value) {
absl::Duration d;
if (!serialization::Decode(source, d)) return false;
value = absl::UnixEpoch() + d;
return true;
}
}
} | #include "tensorstore/internal/json_binding/absl_time.h"
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/time/civil_time.h"
#include "absl/time/time.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
namespace jb = tensorstore::internal_json_binding;
namespace {
TEST(AbslTimeJsonBinder, Roundtrips) {
const absl::TimeZone utc = absl::UTCTimeZone();
const absl::CivilSecond cs(2015, 2, 3, 4, 5, 6);
tensorstore::TestJsonBinderRoundTrip<absl::Time>(
{
{absl::FromCivil(cs, utc), "2015-02-03T04:05:06+00:00"},
{absl::FromCivil(absl::CivilMinute(cs), utc),
"2015-02-03T04:05:00+00:00"},
{absl::FromCivil(absl::CivilHour(cs), utc),
"2015-02-03T04:00:00+00:00"},
{absl::FromCivil(absl::CivilDay(cs), utc),
"2015-02-03T00:00:00+00:00"},
{absl::FromCivil(absl::CivilMonth(cs), utc),
"2015-02-01T00:00:00+00:00"},
{absl::FromCivil(absl::CivilYear(cs), utc),
"2015-01-01T00:00:00+00:00"},
},
jb::Rfc3339TimeBinder);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/serialization/absl_time.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/absl_time_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
4eeac038-fc93-4cc4-828c-47132cc75b65 | cpp | tensorflow/tensorflow | collective_order | tensorflow/core/graph/collective_order.cc | tensorflow/core/graph/collective_order_test.cc | #include "tensorflow/core/graph/collective_order.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/graph/algorithm.h"
namespace tensorflow {
namespace {
Status DiscoverDataDependencies(
const Graph* graph, std::vector<Node*>* collective_nodes,
std::vector<int32>* instance_keys,
absl::flat_hash_map<Node*, absl::flat_hash_set<int32>>* data_dependencies) {
Status s;
auto node_leave = [collective_nodes, instance_keys, data_dependencies,
&s](Node* node) {
int32_t instance_key;
bool enter_node =
node->IsCollective() && node->type_string() == "CollectiveReduce";
if (enter_node) {
Status get_attr_status =
GetNodeAttr(node->attrs(), "instance_key", &instance_key);
s.Update(get_attr_status);
collective_nodes->push_back(node);
instance_keys->push_back(instance_key);
VLOG(2) << "collective node " << node->DebugString();
}
data_dependencies->reserve(data_dependencies->size() + 1 +
node->out_edges().size());
const auto& node_deps = (*data_dependencies)[node];
for (const Edge* out_edge : node->out_edges()) {
auto& child_deps = (*data_dependencies)[out_edge->dst()];
child_deps.insert(node_deps.begin(), node_deps.end());
if (enter_node && s.ok()) {
child_deps.insert(instance_key);
}
}
};
ReverseDFS(*graph, nullptr, node_leave);
return s;
}
Status CreateControlDependencies(
const std::vector<Node*>& collective_nodes,
const std::vector<int32>& instance_keys,
absl::flat_hash_map<Node*, absl::flat_hash_set<int32>>* data_dependencies,
absl::flat_hash_map<Node*, absl::flat_hash_set<Node*>>* dependency_edges) {
absl::flat_hash_map<Node*, absl::flat_hash_set<Node*>> all_paths;
for (int i = 0; i < collective_nodes.size() - 1; i++) {
if (!collective_nodes[i]->IsCollective() ||
collective_nodes[i]->type_string() != "CollectiveReduce") {
return errors::Internal("Unexpected node ",
collective_nodes[i]->DebugString());
}
const auto& deps_i = (*data_dependencies)[collective_nodes[i]];
for (int j = i + 1; j < collective_nodes.size(); j++) {
if (collective_nodes[i]->requested_device() !=
collective_nodes[j]->requested_device()) {
continue;
}
if (instance_keys[i] == instance_keys[j]) {
return errors::Internal("Unexpected same instance_key ",
instance_keys[i],
" on 2 nodes with the same device ",
collective_nodes[i]->requested_device());
}
const auto& deps_j = (*data_dependencies)[collective_nodes[j]];
if (deps_i.find(instance_keys[j]) == deps_i.end() &&
deps_j.find(instance_keys[i]) == deps_j.end()) {
int src_idx = instance_keys[i] > instance_keys[j] ? i : j;
int dst_idx = instance_keys[i] > instance_keys[j] ? j : i;
Node* src_node = collective_nodes[src_idx];
Node* dst_node = collective_nodes[dst_idx];
VLOG(1) << "Adding control dependency from node " << src_node->name()
<< " instance " << instance_keys[src_idx] << " to node "
<< dst_node->name() << " instance " << instance_keys[dst_idx];
(*dependency_edges)[src_node].insert(dst_node);
auto& src_paths = all_paths[src_node];
src_paths.insert(dst_node);
for (Node* downstream_node : all_paths[dst_node]) {
src_paths.insert(downstream_node);
}
}
}
}
for (int i = 0; i < collective_nodes.size(); ++i) {
Node* node = collective_nodes[i];
auto& neighbor_set = (*dependency_edges)[node];
std::vector<Node*> neighbor_list(neighbor_set.begin(), neighbor_set.end());
for (int j = 0; j < neighbor_list.size(); ++j) {
Node* n1 = neighbor_list[j];
if (n1 == nullptr) continue;
auto& n1_paths = all_paths[n1];
for (int k = 0; k < neighbor_list.size(); ++k) {
Node* n2 = neighbor_list[k];
if (j == k || n2 == nullptr) continue;
if (n1_paths.find(n2) != n1_paths.end()) {
neighbor_set.erase(n2);
neighbor_list[k] = nullptr;
}
}
}
}
return absl::OkStatus();
}
Status InsertControlDependencies(
Graph* graph, GraphCollectiveOrder order_type,
const absl::flat_hash_map<Node*, absl::flat_hash_set<Node*>>&
dependency_edges) {
if (order_type == GraphCollectiveOrder::kEdges) {
for (const auto& pair : dependency_edges) {
Node* src_node = pair.first;
for (Node* dst_node : pair.second) {
graph->AddControlEdge(src_node, dst_node);
}
}
} else if (order_type == GraphCollectiveOrder::kAttrs) {
absl::flat_hash_map<Node*, absl::flat_hash_set<int32>> wait_for;
for (const auto& pair : dependency_edges) {
int32_t src_instance;
TF_RETURN_IF_ERROR(
GetNodeAttr(pair.first->attrs(), "instance_key", &src_instance));
for (Node* dst_node : pair.second) {
wait_for[dst_node].insert(src_instance);
}
}
for (const auto& pair : wait_for) {
std::vector<int32> wait_for_list(pair.second.begin(), pair.second.end());
pair.first->ClearAttr("wait_for");
pair.first->AddAttr("wait_for", wait_for_list);
}
} else {
return errors::Internal("Unexpected GraphCollectiveOrder type ",
static_cast<int>(order_type));
}
return absl::OkStatus();
}
}
Status OrderCollectives(Graph* graph, GraphCollectiveOrder order_type) {
std::vector<Node*> collective_nodes;
std::vector<int32> instance_keys;
absl::flat_hash_map<Node*, absl::flat_hash_set<int32>> data_dependencies;
TF_RETURN_IF_ERROR(DiscoverDataDependencies(
graph, &collective_nodes, &instance_keys, &data_dependencies));
if (collective_nodes.empty()) return absl::OkStatus();
absl::flat_hash_map<Node*, absl::flat_hash_set<Node*>> dependency_edges;
TF_RETURN_IF_ERROR(CreateControlDependencies(
collective_nodes, instance_keys, &data_dependencies, &dependency_edges));
return InsertControlDependencies(graph, order_type, dependency_edges);
}
} | #include "tensorflow/core/graph/collective_order.h"
#include <gmock/gmock.h>
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using ::testing::UnorderedElementsAreArray;
REGISTER_OP("TestParams").Output("o: float");
void VerifyGraph(const Graph& graph,
const std::vector<string>& expected_collective_nodes,
const std::vector<std::pair<string, string>>&
expected_collective_control_edges) {
std::vector<string> actual_collective_nodes;
std::vector<std::pair<string, string>> actual_collective_control_edges;
for (const Node* src : graph.nodes()) {
if (!src->IsCollective()) {
continue;
}
actual_collective_nodes.push_back(src->name());
for (const Edge* edge : src->out_edges()) {
VLOG(2) << "collective edge " << edge->src()->name() << " -> "
<< edge->dst()->name();
if (!edge->IsControlEdge() || edge->dst()->name() == "_SINK") {
continue;
}
actual_collective_control_edges.emplace_back(src->name(),
edge->dst()->name());
}
}
EXPECT_THAT(actual_collective_nodes,
UnorderedElementsAreArray(expected_collective_nodes));
EXPECT_THAT(actual_collective_control_edges,
UnorderedElementsAreArray(expected_collective_control_edges));
}
void VerifyAttrs(
const Graph& graph,
const std::unordered_map<string, std::vector<int32>> wait_for_map) {
for (const Node* node : graph.nodes()) {
if (node->IsCollective() ||
wait_for_map.find(node->name()) == wait_for_map.end()) {
continue;
}
std::vector<int32> wait_for_actual;
TF_EXPECT_OK(GetNodeAttr(node->attrs(), "wait_for", &wait_for_actual));
auto wait_for_expected = wait_for_map.at(node->name());
EXPECT_THAT(wait_for_actual, UnorderedElementsAreArray(wait_for_expected));
}
}
Node* CollectiveReduceNode(GraphDefBuilder* builder, Node* input,
const string& name, const string& device,
int instance_key) {
Node* collective_node =
ops::UnaryOp("CollectiveReduce", input,
builder->opts()
.WithName(name)
.WithDevice(device)
.WithAttr("T", DT_FLOAT)
.WithAttr("group_size", 2)
.WithAttr("group_key", 1)
.WithAttr("instance_key", instance_key)
.WithAttr("merge_op", "Add")
.WithAttr("final_op", "Id")
.WithAttr("subdiv_offsets", {1}));
return collective_node;
}
std::unique_ptr<Graph> InitGraph() {
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
const string dev0 = "/job:localhost/replica:0/task:0/device:CPU:0";
const string dev1 = "/job:localhost/replica:0/task:0/device:CPU:1";
Node* a = ops::SourceOp("TestParams",
builder.opts().WithName("a").WithDevice(dev0));
Node* b = ops::SourceOp("TestParams",
builder.opts().WithName("b").WithDevice(dev1));
Node* c1_0 = CollectiveReduceNode(&builder, a, "c1_0", dev0, 1);
Node* c1_1 = CollectiveReduceNode(&builder, b, "c1_1", dev1, 1);
Node* id0 = ops::UnaryOp(
"Identity", c1_0,
builder.opts().WithName("id0").WithDevice(dev0).WithAttr("T", DT_FLOAT));
Node* id1 = ops::UnaryOp(
"Identity", c1_1,
builder.opts().WithName("id1").WithDevice(dev1).WithAttr("T", DT_FLOAT));
CollectiveReduceNode(&builder, id0, "c2_0", dev0, 2);
CollectiveReduceNode(&builder, id1, "c2_1", dev1, 2);
CollectiveReduceNode(&builder, id0, "c3_0", dev0, 3);
CollectiveReduceNode(&builder, id1, "c3_1", dev1, 3);
std::unique_ptr<Graph> graph = absl::make_unique<Graph>(OpRegistry::Global());
Status s = GraphDefBuilderToGraph(builder, graph.get());
if (!s.ok()) {
LOG(FATAL) << "Error building graph " << s;
}
return graph;
}
TEST(CollectiveOrderTest, SimpleOrder) {
std::unique_ptr<Graph> graph = InitGraph();
TF_EXPECT_OK(OrderCollectives(graph.get(), GraphCollectiveOrder::kEdges));
VerifyGraph(*graph, {"c1_0", "c1_1", "c2_0", "c2_1", "c3_0", "c3_1"},
{{"c3_0", "c2_0"}, {"c3_1", "c2_1"}});
}
TEST(CollectiveOrderTest, SimpleOrderAttr) {
std::unique_ptr<Graph> graph = InitGraph();
TF_EXPECT_OK(OrderCollectives(graph.get(), GraphCollectiveOrder::kAttrs));
VerifyAttrs(*graph, {{"c2_0", {3}}, {"c2_1", {3}}});
}
std::unique_ptr<Graph> InitGraph2() {
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
const string dev0 = "/job:localhost/replica:0/task:0/device:CPU:0";
Node* a = ops::SourceOp("TestParams",
builder.opts().WithName("a").WithDevice(dev0));
Node* c1 = CollectiveReduceNode(&builder, a, "c1", dev0, 1);
CollectiveReduceNode(&builder, c1, "c4", dev0, 4);
Node* id = ops::UnaryOp(
"Identity", c1,
builder.opts().WithName("id").WithDevice(dev0).WithAttr("T", DT_FLOAT));
CollectiveReduceNode(&builder, id, "c2", dev0, 2);
CollectiveReduceNode(&builder, id, "c3", dev0, 3);
std::unique_ptr<Graph> graph = absl::make_unique<Graph>(OpRegistry::Global());
Status s = GraphDefBuilderToGraph(builder, graph.get());
if (!s.ok()) {
LOG(FATAL) << "Error building graph " << s;
}
return graph;
}
TEST(CollectiveOrderTest, SimpleOrder2) {
std::unique_ptr<Graph> graph = InitGraph2();
TF_EXPECT_OK(OrderCollectives(graph.get(), GraphCollectiveOrder::kEdges));
VerifyGraph(*graph, {"c1", "c2", "c3", "c4"}, {{"c4", "c3"}, {"c3", "c2"}});
}
std::unique_ptr<Graph> InitGraphForPruning() {
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
const string dev0 = "/job:localhost/replica:0/task:0/device:CPU:0";
Node* w = ops::SourceOp("TestParams",
builder.opts().WithName("w").WithDevice(dev0));
Node* x = ops::SourceOp("TestParams",
builder.opts().WithName("x").WithDevice(dev0));
Node* y = ops::SourceOp("TestParams",
builder.opts().WithName("y").WithDevice(dev0));
Node* z = ops::SourceOp("TestParams",
builder.opts().WithName("z").WithDevice(dev0));
CollectiveReduceNode(&builder, w, "c1", dev0, 1);
CollectiveReduceNode(&builder, x, "c2", dev0, 2);
CollectiveReduceNode(&builder, y, "c3", dev0, 3);
CollectiveReduceNode(&builder, z, "c4", dev0, 4);
std::unique_ptr<Graph> graph = absl::make_unique<Graph>(OpRegistry::Global());
Status s = GraphDefBuilderToGraph(builder, graph.get());
if (!s.ok()) {
LOG(FATAL) << "Error building graph " << s;
}
return graph;
}
TEST(CollectiveOrderTest, Pruning) {
std::unique_ptr<Graph> graph = InitGraphForPruning();
TF_EXPECT_OK(OrderCollectives(graph.get(), GraphCollectiveOrder::kAttrs));
VerifyAttrs(*graph, {{"c3", {4}}, {"c2", {3}}, {"c1", {2}}});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/collective_order.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/collective_order_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4c6920cc-3aba-4b4e-867d-febd43cab405 | cpp | tensorflow/tensorflow | llvm_compiler | third_party/xla/xla/service/llvm_compiler.cc | third_party/xla/xla/tests/llvm_compiler_test.cc | #include "xla/service/llvm_compiler.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_module_group.h"
#include "xla/service/executable.h"
#include "xla/service/stream_pool.h"
#include "tsl/platform/denormal.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/scoped_annotation.h"
#ifdef __FAST_MATH__
#error "Don't build XLA with -ffast-math"
#endif
namespace xla {
absl::StatusOr<std::vector<std::unique_ptr<Executable>>> LLVMCompiler::Compile(
std::unique_ptr<HloModuleGroup> module_group,
std::vector<std::vector<se::StreamExecutor*>> stream_execs,
const CompileOptions& options) {
tsl::port::ScopedDontFlushDenormal dont_flush_denormals;
std::vector<std::unique_ptr<Executable>> result;
std::vector<std::unique_ptr<HloModule>> modules =
module_group->ConsumeModules();
for (size_t i = 0; i < modules.size(); i++) {
tsl::profiler::ScopedAnnotation annotation{[&] {
return absl::StrFormat("XlaCompile:#module=%s,program_id=%d#",
modules[i]->name(), modules[i]->unique_id());
}};
TF_ASSIGN_OR_RETURN(modules[i], RunHloPasses(std::move(modules[i]),
stream_execs[i][0], options));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<Executable> executable,
RunBackend(std::move(modules[i]), stream_execs[i][0], options));
result.push_back(std::move(executable));
}
return std::move(result);
}
} | #include "xla/service/llvm_compiler.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "llvm/IR/Module.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module_group.h"
#include "xla/literal_util.h"
#include "xla/service/backend.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
using LLVMCompilerTest = HloTestBase;
const char* const kHloText = R"(
HloModule Add
ENTRY main {
constant.0 = f32[] constant(42.0)
constant.1 = f32[] constant(43.0)
ROOT add.0 = f32[] add(constant.0, constant.1)
}
)";
TEST_F(LLVMCompilerTest, HooksTest) {
int pre_opt_hook_call_count = 0;
int post_opt_hook_call_count = 0;
auto pre_opt_hook = [&pre_opt_hook_call_count](const llvm::Module&) {
++pre_opt_hook_call_count;
return absl::OkStatus();
};
auto post_opt_hook = [&post_opt_hook_call_count](const llvm::Module&) {
++post_opt_hook_call_count;
return absl::OkStatus();
};
auto hlo_module = ParseAndReturnVerifiedModule(kHloText).value();
LLVMCompiler* compiler =
tensorflow::down_cast<xla::LLVMCompiler*>(backend().compiler());
compiler->SetPreOptimizationHook(pre_opt_hook);
compiler->SetPostOptimizationHook(post_opt_hook);
ASSERT_TRUE(compiler
->RunBackend(std::move(hlo_module),
backend().default_stream_executor(),
nullptr)
.ok());
EXPECT_EQ(1, pre_opt_hook_call_count);
EXPECT_EQ(1, post_opt_hook_call_count);
}
TEST_F(LLVMCompilerTest, DISABLED_MultiModuleCompilation) {
auto hlo_module = ParseAndReturnVerifiedModule(kHloText).value();
auto hlo_module2 = ParseAndReturnVerifiedModule(kHloText).value();
std::vector<std::unique_ptr<HloModule>> modules;
modules.push_back(std::move(hlo_module));
modules.push_back(std::move(hlo_module2));
auto module_group =
std::make_unique<HloModuleGroup>("test_module_group", std::move(modules));
std::vector<std::vector<se::StreamExecutor*>> executors;
executors.push_back({backend().default_stream_executor()});
executors.push_back({backend().default_stream_executor()});
EXPECT_IS_OK(backend().compiler()->Compile(std::move(module_group),
std::move(executors),
backend().memory_allocator()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/llvm_compiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/llvm_compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
335f1012-0c74-4c51-87eb-9de4ab987744 | cpp | google/cel-cpp | expr | base/ast_internal/expr.cc | base/ast_internal/expr_test.cc | #include "base/ast_internal/expr.h"
#include <memory>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/functional/overload.h"
#include "absl/types/variant.h"
namespace cel::ast_internal {
namespace {
const Type& default_type() {
static absl::NoDestructor<Type> type;
return *type;
}
TypeKind CopyImpl(const TypeKind& other) {
return absl::visit(absl::Overload(
[](const std::unique_ptr<Type>& other) -> TypeKind {
return std::make_unique<Type>(*other);
},
[](const auto& other) -> TypeKind {
return other;
}),
other);
}
}
const Extension::Version& Extension::Version::DefaultInstance() {
static absl::NoDestructor<Version> instance;
return *instance;
}
const Extension& Extension::DefaultInstance() {
static absl::NoDestructor<Extension> instance;
return *instance;
}
Extension::Extension(const Extension& other)
: id_(other.id_),
affected_components_(other.affected_components_),
version_(std::make_unique<Version>(*other.version_)) {}
Extension& Extension::operator=(const Extension& other) {
id_ = other.id_;
affected_components_ = other.affected_components_;
version_ = std::make_unique<Version>(*other.version_);
return *this;
}
const Type& ListType::elem_type() const {
if (elem_type_ != nullptr) {
return *elem_type_;
}
return default_type();
}
bool ListType::operator==(const ListType& other) const {
return elem_type() == other.elem_type();
}
const Type& MapType::key_type() const {
if (key_type_ != nullptr) {
return *key_type_;
}
return default_type();
}
const Type& MapType::value_type() const {
if (value_type_ != nullptr) {
return *value_type_;
}
return default_type();
}
bool MapType::operator==(const MapType& other) const {
return key_type() == other.key_type() && value_type() == other.value_type();
}
const Type& FunctionType::result_type() const {
if (result_type_ != nullptr) {
return *result_type_;
}
return default_type();
}
bool FunctionType::operator==(const FunctionType& other) const {
return result_type() == other.result_type() && arg_types_ == other.arg_types_;
}
const Type& Type::type() const {
auto* value = absl::get_if<std::unique_ptr<Type>>(&type_kind_);
if (value != nullptr) {
if (*value != nullptr) return **value;
}
return default_type();
}
Type::Type(const Type& other) : type_kind_(CopyImpl(other.type_kind_)) {}
Type& Type::operator=(const Type& other) {
type_kind_ = CopyImpl(other.type_kind_);
return *this;
}
FunctionType::FunctionType(const FunctionType& other)
: result_type_(std::make_unique<Type>(other.result_type())),
arg_types_(other.arg_types()) {}
FunctionType& FunctionType::operator=(const FunctionType& other) {
result_type_ = std::make_unique<Type>(other.result_type());
arg_types_ = other.arg_types();
return *this;
}
} | #include "base/ast_internal/expr.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/types/variant.h"
#include "common/expr.h"
#include "internal/testing.h"
namespace cel {
namespace ast_internal {
namespace {
TEST(AstTest, ListTypeMutableConstruction) {
ListType type;
type.mutable_elem_type() = Type(PrimitiveType::kBool);
EXPECT_EQ(absl::get<PrimitiveType>(type.elem_type().type_kind()),
PrimitiveType::kBool);
}
TEST(AstTest, MapTypeMutableConstruction) {
MapType type;
type.mutable_key_type() = Type(PrimitiveType::kBool);
type.mutable_value_type() = Type(PrimitiveType::kBool);
EXPECT_EQ(absl::get<PrimitiveType>(type.key_type().type_kind()),
PrimitiveType::kBool);
EXPECT_EQ(absl::get<PrimitiveType>(type.value_type().type_kind()),
PrimitiveType::kBool);
}
TEST(AstTest, MapTypeComparatorKeyType) {
MapType type;
type.mutable_key_type() = Type(PrimitiveType::kBool);
EXPECT_FALSE(type == MapType());
}
TEST(AstTest, MapTypeComparatorValueType) {
MapType type;
type.mutable_value_type() = Type(PrimitiveType::kBool);
EXPECT_FALSE(type == MapType());
}
TEST(AstTest, FunctionTypeMutableConstruction) {
FunctionType type;
type.mutable_result_type() = Type(PrimitiveType::kBool);
EXPECT_EQ(absl::get<PrimitiveType>(type.result_type().type_kind()),
PrimitiveType::kBool);
}
TEST(AstTest, FunctionTypeComparatorArgTypes) {
FunctionType type;
type.mutable_arg_types().emplace_back(Type());
EXPECT_FALSE(type == FunctionType());
}
TEST(AstTest, ListTypeDefaults) { EXPECT_EQ(ListType().elem_type(), Type()); }
TEST(AstTest, MapTypeDefaults) {
EXPECT_EQ(MapType().key_type(), Type());
EXPECT_EQ(MapType().value_type(), Type());
}
TEST(AstTest, FunctionTypeDefaults) {
EXPECT_EQ(FunctionType().result_type(), Type());
}
TEST(AstTest, TypeDefaults) {
EXPECT_EQ(Type().null(), nullptr);
EXPECT_EQ(Type().primitive(), PrimitiveType::kPrimitiveTypeUnspecified);
EXPECT_EQ(Type().wrapper(), PrimitiveType::kPrimitiveTypeUnspecified);
EXPECT_EQ(Type().well_known(), WellKnownType::kWellKnownTypeUnspecified);
EXPECT_EQ(Type().list_type(), ListType());
EXPECT_EQ(Type().map_type(), MapType());
EXPECT_EQ(Type().function(), FunctionType());
EXPECT_EQ(Type().message_type(), MessageType());
EXPECT_EQ(Type().type_param(), ParamType());
EXPECT_EQ(Type().type(), Type());
EXPECT_EQ(Type().error_type(), ErrorType());
EXPECT_EQ(Type().abstract_type(), AbstractType());
}
TEST(AstTest, TypeComparatorTest) {
Type type;
type.set_type_kind(std::make_unique<Type>(PrimitiveType::kBool));
EXPECT_TRUE(type == Type(std::make_unique<Type>(PrimitiveType::kBool)));
EXPECT_FALSE(type == Type(PrimitiveType::kBool));
EXPECT_FALSE(type == Type(std::unique_ptr<Type>()));
EXPECT_FALSE(type == Type(std::make_unique<Type>(PrimitiveType::kInt64)));
}
TEST(AstTest, ExprMutableConstruction) {
Expr expr;
expr.mutable_const_expr().set_bool_value(true);
ASSERT_TRUE(expr.has_const_expr());
EXPECT_TRUE(expr.const_expr().bool_value());
expr.mutable_ident_expr().set_name("expr");
ASSERT_TRUE(expr.has_ident_expr());
EXPECT_FALSE(expr.has_const_expr());
EXPECT_EQ(expr.ident_expr().name(), "expr");
expr.mutable_select_expr().set_field("field");
ASSERT_TRUE(expr.has_select_expr());
EXPECT_FALSE(expr.has_ident_expr());
EXPECT_EQ(expr.select_expr().field(), "field");
expr.mutable_call_expr().set_function("function");
ASSERT_TRUE(expr.has_call_expr());
EXPECT_FALSE(expr.has_select_expr());
EXPECT_EQ(expr.call_expr().function(), "function");
expr.mutable_list_expr();
EXPECT_TRUE(expr.has_list_expr());
EXPECT_FALSE(expr.has_call_expr());
expr.mutable_struct_expr().set_name("name");
ASSERT_TRUE(expr.has_struct_expr());
EXPECT_EQ(expr.struct_expr().name(), "name");
EXPECT_FALSE(expr.has_list_expr());
expr.mutable_comprehension_expr().set_accu_var("accu_var");
ASSERT_TRUE(expr.has_comprehension_expr());
EXPECT_FALSE(expr.has_list_expr());
EXPECT_EQ(expr.comprehension_expr().accu_var(), "accu_var");
}
TEST(AstTest, ReferenceConstantDefaultValue) {
Reference reference;
EXPECT_EQ(reference.value(), Constant());
}
TEST(AstTest, TypeCopyable) {
Type type = Type(PrimitiveType::kBool);
Type type2 = type;
EXPECT_TRUE(type2.has_primitive());
EXPECT_EQ(type2, type);
type = Type(ListType(std::make_unique<Type>(PrimitiveType::kBool)));
type2 = type;
EXPECT_TRUE(type2.has_list_type());
EXPECT_EQ(type2, type);
type = Type(MapType(std::make_unique<Type>(PrimitiveType::kBool),
std::make_unique<Type>(PrimitiveType::kBool)));
type2 = type;
EXPECT_TRUE(type2.has_map_type());
EXPECT_EQ(type2, type);
type = Type(FunctionType(std::make_unique<Type>(PrimitiveType::kBool), {}));
type2 = type;
EXPECT_TRUE(type2.has_function());
EXPECT_EQ(type2, type);
type = Type(AbstractType("optional", {Type(PrimitiveType::kBool)}));
type2 = type;
EXPECT_TRUE(type2.has_abstract_type());
EXPECT_EQ(type2, type);
}
TEST(AstTest, TypeMoveable) {
Type type = Type(PrimitiveType::kBool);
Type type2 = type;
Type type3 = std::move(type);
EXPECT_TRUE(type2.has_primitive());
EXPECT_EQ(type2, type3);
type = Type(ListType(std::make_unique<Type>(PrimitiveType::kBool)));
type2 = type;
type3 = std::move(type);
EXPECT_TRUE(type2.has_list_type());
EXPECT_EQ(type2, type3);
type = Type(MapType(std::make_unique<Type>(PrimitiveType::kBool),
std::make_unique<Type>(PrimitiveType::kBool)));
type2 = type;
type3 = std::move(type);
EXPECT_TRUE(type2.has_map_type());
EXPECT_EQ(type2, type3);
type = Type(FunctionType(std::make_unique<Type>(PrimitiveType::kBool), {}));
type2 = type;
type3 = std::move(type);
EXPECT_TRUE(type2.has_function());
EXPECT_EQ(type2, type3);
type = Type(AbstractType("optional", {Type(PrimitiveType::kBool)}));
type2 = type;
type3 = std::move(type);
EXPECT_TRUE(type2.has_abstract_type());
EXPECT_EQ(type2, type3);
}
TEST(AstTest, NestedTypeKindCopyAssignable) {
ListType list_type(std::make_unique<Type>(PrimitiveType::kBool));
ListType list_type2;
list_type2 = list_type;
EXPECT_EQ(list_type2, list_type);
MapType map_type(std::make_unique<Type>(PrimitiveType::kBool),
std::make_unique<Type>(PrimitiveType::kBool));
MapType map_type2;
map_type2 = map_type;
AbstractType abstract_type(
"abstract", {Type(PrimitiveType::kBool), Type(PrimitiveType::kBool)});
AbstractType abstract_type2;
abstract_type2 = abstract_type;
EXPECT_EQ(abstract_type2, abstract_type);
FunctionType function_type(
std::make_unique<Type>(PrimitiveType::kBool),
{Type(PrimitiveType::kBool), Type(PrimitiveType::kBool)});
FunctionType function_type2;
function_type2 = function_type;
EXPECT_EQ(function_type2, function_type);
}
TEST(AstTest, ExtensionSupported) {
SourceInfo source_info;
source_info.mutable_extensions().push_back(
Extension("constant_folding", nullptr, {}));
EXPECT_EQ(source_info.extensions()[0],
Extension("constant_folding", nullptr, {}));
}
TEST(AstTest, ExtensionEquality) {
Extension extension1("constant_folding", nullptr, {});
EXPECT_EQ(extension1, Extension("constant_folding", nullptr, {}));
EXPECT_NE(extension1,
Extension("constant_folding",
std::make_unique<Extension::Version>(1, 0), {}));
EXPECT_NE(extension1, Extension("constant_folding", nullptr,
{Extension::Component::kRuntime}));
EXPECT_EQ(extension1,
Extension("constant_folding",
std::make_unique<Extension::Version>(0, 0), {}));
}
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/base/ast_internal/expr.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/base/ast_internal/expr_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
51f65671-4b5c-44ae-919f-d1b236832132 | cpp | google/arolla | generic_operator_overload_condition | arolla/expr/operator_loader/generic_operator_overload_condition.cc | arolla/expr/operator_loader/generic_operator_overload_condition_test.cc | #include "arolla/expr/operator_loader/generic_operator_overload_condition.h"
#include <cstdint>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/eval/model_executor.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/tuple_expr_operator.h"
#include "arolla/io/wildcard_input_loader.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::operator_loader {
using ::arolla::expr::BindOp;
using ::arolla::expr::CompileModelExecutor;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::MakeTupleOperator;
using ::arolla::expr::ModelEvaluationOptions;
absl::StatusOr<GenericOperatorOverloadConditionFn>
MakeGenericOperatorOverloadConditionFn(
absl::Span<const ExprNodePtr> prepared_condition_exprs) {
ASSIGN_OR_RETURN(auto expr, BindOp(MakeTupleOperator::Make(),
prepared_condition_exprs, {}));
auto accessor = [](QTypePtr input_tuple_qtype, absl::string_view) {
return input_tuple_qtype;
};
ASSIGN_OR_RETURN(auto input_loader,
WildcardInputLoader<QTypePtr>::Build(accessor));
ASSIGN_OR_RETURN(auto model_executor,
CompileModelExecutor<TypedValue>(expr, *input_loader));
const auto test_input_qtype = MakeTupleQType({});
const auto expected_output_qtype = MakeTupleQType(
std::vector(prepared_condition_exprs.size(), GetQType<OptionalUnit>()));
ASSIGN_OR_RETURN(
auto actual_output,
model_executor.ExecuteOnHeap(ModelEvaluationOptions{}, test_input_qtype));
if (actual_output.GetType() != expected_output_qtype) {
return absl::FailedPreconditionError(absl::StrFormat(
"unexpected return qtype: expected %s, got %s",
expected_output_qtype->name(), actual_output.GetType()->name()));
}
return [model_executor = std::move(model_executor)](
QTypePtr input_tuple_qtype) -> absl::StatusOr<std::vector<bool>> {
ASSIGN_OR_RETURN(auto qvalue,
model_executor.ExecuteOnHeap(ModelEvaluationOptions{},
input_tuple_qtype));
const int64_t n = qvalue.GetFieldCount();
std::vector<bool> result(n);
for (int64_t i = 0; i < n; ++i) {
result[i] = qvalue.GetField(i).UnsafeAs<OptionalUnit>().present;
}
return result;
};
}
} | #include "arolla/expr/operator_loader/generic_operator_overload_condition.h"
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/unit.h"
namespace arolla::operator_loader {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::Leaf;
using ::arolla::expr::Literal;
class GenericOperatorOverloadConditionTest : public ::testing::Test {
protected:
static absl::StatusOr<ExprNodePtr> Arg(int n) {
return CallOp("qtype.get_field_qtype",
{Leaf("input_tuple_qtype"), Literal(n)});
}
static absl::StatusOr<ExprNodePtr> Equal(absl::StatusOr<ExprNodePtr> lhs,
absl::StatusOr<ExprNodePtr> rhs) {
return CallOp("core.equal", {lhs, rhs});
}
static absl::StatusOr<ExprNodePtr> NotEqual(absl::StatusOr<ExprNodePtr> lhs,
absl::StatusOr<ExprNodePtr> rhs) {
return CallOp("core.not_equal", {lhs, rhs});
}
static absl::StatusOr<ExprNodePtr> And(absl::StatusOr<ExprNodePtr> lhs,
absl::StatusOr<ExprNodePtr> rhs) {
return CallOp("core.presence_and", {lhs, rhs});
}
};
TEST_F(GenericOperatorOverloadConditionTest, Empty) {
ASSERT_OK_AND_ASSIGN(auto condition_fn,
MakeGenericOperatorOverloadConditionFn({}));
EXPECT_THAT(condition_fn(MakeTupleQType({})),
IsOkAndHolds(std::vector<bool>()));
}
TEST_F(GenericOperatorOverloadConditionTest, SingleCondition) {
ASSERT_OK_AND_ASSIGN(auto condition_expr,
NotEqual(Arg(0), Literal(GetNothingQType())));
ASSERT_OK_AND_ASSIGN(
auto condition_fn,
MakeGenericOperatorOverloadConditionFn({condition_expr}));
EXPECT_THAT(condition_fn(MakeTupleQType({})),
IsOkAndHolds(std::vector({false})));
EXPECT_THAT(condition_fn(MakeTupleQType({GetNothingQType()})),
IsOkAndHolds(std::vector({false})));
EXPECT_THAT(condition_fn(MakeTupleQType({GetQType<Unit>()})),
IsOkAndHolds(std::vector({true})));
}
TEST_F(GenericOperatorOverloadConditionTest, MultipleConditions) {
ASSERT_OK_AND_ASSIGN(auto condition_expr_1,
And(And(NotEqual(Arg(0), Literal(GetNothingQType())),
NotEqual(Arg(1), Literal(GetNothingQType()))),
NotEqual(Arg(0), Arg(1))));
ASSERT_OK_AND_ASSIGN(auto condition_expr_2,
And(And(NotEqual(Arg(0), Literal(GetNothingQType())),
NotEqual(Arg(1), Literal(GetNothingQType()))),
Equal(Arg(0), Arg(1))));
ASSERT_OK_AND_ASSIGN(auto condition_fn,
MakeGenericOperatorOverloadConditionFn(
{condition_expr_1, condition_expr_2}));
EXPECT_THAT(condition_fn(MakeTupleQType({})),
IsOkAndHolds(std::vector({false, false})));
EXPECT_THAT(condition_fn(MakeTupleQType({GetNothingQType()})),
IsOkAndHolds(std::vector({false, false})));
EXPECT_THAT(
condition_fn(MakeTupleQType({GetQType<Unit>(), GetQType<Unit>()})),
IsOkAndHolds(std::vector({false, true})));
EXPECT_THAT(condition_fn(MakeTupleQType({GetQType<Unit>(), GetQType<int>()})),
IsOkAndHolds(std::vector({true, false})));
}
TEST_F(GenericOperatorOverloadConditionTest, UnexpectedReturnQType) {
ASSERT_OK_AND_ASSIGN(auto condition_expr_1,
NotEqual(Arg(0), Literal(GetNothingQType())));
ASSERT_OK_AND_ASSIGN(auto condition_expr_2, Arg(1));
EXPECT_THAT(MakeGenericOperatorOverloadConditionFn(
{condition_expr_1, condition_expr_2}),
StatusIs(absl::StatusCode::kFailedPrecondition,
"unexpected return qtype: expected "
"tuple<OPTIONAL_UNIT,OPTIONAL_UNIT>, got "
"tuple<OPTIONAL_UNIT,QTYPE>"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_loader/generic_operator_overload_condition.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_loader/generic_operator_overload_condition_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
d31ea08a-dfb2-4f54-8dcb-90625354d274 | cpp | tensorflow/tensorflow | internal | tensorflow/lite/delegates/gpu/common/memory_management/internal.cc | tensorflow/lite/delegates/gpu/common/memory_management/internal_test.cc | #include "tensorflow/lite/delegates/gpu/common/memory_management/internal.h"
#include <algorithm>
#include <cstddef>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/memory_management/types.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
bool CompareBySize(const TensorUsageWithIndex<size_t>& first,
const TensorUsageWithIndex<size_t>& second) {
return first.usage_record->tensor_size > second.usage_record->tensor_size;
}
bool IsCoveringObject(const uint2& first_object, const uint2& second_object) {
return first_object.x >= second_object.x && first_object.y >= second_object.y;
}
bool IsCoveringObject(const uint3& first_object, const uint3& second_object) {
return first_object.x >= second_object.x &&
first_object.y >= second_object.y && first_object.z >= second_object.z;
}
size_t AbsDiffInElements(const size_t first_size, const size_t second_size) {
return first_size >= second_size ? first_size - second_size
: second_size - first_size;
}
size_t AbsDiffInElements(const uint2& first_size, const uint2& second_size) {
const size_t first_elements_cnt = first_size.y * first_size.x;
const size_t second_elements_cnt = second_size.y * second_size.x;
return first_elements_cnt >= second_elements_cnt
? first_elements_cnt - second_elements_cnt
: second_elements_cnt - first_elements_cnt;
}
size_t AbsDiffInElements(const uint3& first_size, const uint3& second_size) {
const size_t first_elements_cnt = first_size.z * first_size.y * first_size.x;
const size_t second_elements_cnt =
second_size.z * second_size.y * second_size.x;
return first_elements_cnt >= second_elements_cnt
? first_elements_cnt - second_elements_cnt
: second_elements_cnt - first_elements_cnt;
}
std::vector<TaskProfile> CalculateTaskProfiles(
const std::vector<TensorUsageRecord<size_t>>& usage_records) {
TaskId num_tasks = 0;
for (size_t i = 0; i < usage_records.size(); ++i) {
num_tasks = std::max(num_tasks, usage_records[i].last_task + 1);
}
std::vector<TaskProfile> task_profiles(num_tasks);
for (size_t rec_id = 0; rec_id < usage_records.size(); ++rec_id) {
for (TaskId task_id = usage_records[rec_id].first_task;
task_id <= usage_records[rec_id].last_task; ++task_id) {
task_profiles[task_id].emplace_back(&usage_records[rec_id], rec_id);
}
}
for (auto& task_profile : task_profiles) {
std::stable_sort(task_profile.begin(), task_profile.end(), CompareBySize);
}
return task_profiles;
}
std::vector<size_t> CalculatePositionalMaximums(
const std::vector<TensorUsageRecord<size_t>>& usage_records) {
std::vector<TaskProfile> task_profiles = CalculateTaskProfiles(usage_records);
std::vector<size_t> positional_max;
for (const auto& task_profile : task_profiles) {
size_t i = 0;
for (; i < task_profile.size() && i < positional_max.size(); ++i) {
positional_max[i] = std::max(positional_max[i],
task_profile[i].usage_record->tensor_size);
}
for (; i < task_profile.size(); ++i) {
positional_max.push_back(task_profile[i].usage_record->tensor_size);
}
}
return positional_max;
}
}
} | #include "tensorflow/lite/delegates/gpu/common/memory_management/internal.h"
#include <cstddef>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/memory_management/types.h"
namespace tflite {
namespace gpu {
namespace {
using ::testing::ElementsAre;
TEST(TaskProfileTest, EmptyRecords) {
std::vector<TaskProfile> task_profiles = CalculateTaskProfiles({});
EXPECT_TRUE(task_profiles.empty());
std::vector<size_t> positional_max = CalculatePositionalMaximums({});
EXPECT_TRUE(positional_max.empty());
}
TEST(TaskProfileTest, OneRecord) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{16, 0, 1}};
const std::vector<std::vector<size_t>> correct_idx = {{0}, {0}};
std::vector<TaskProfile> task_profiles = CalculateTaskProfiles(usage_records);
ASSERT_EQ(task_profiles.size(), correct_idx.size());
for (size_t i = 0; i < task_profiles.size(); ++i) {
ASSERT_EQ(task_profiles[i].size(), correct_idx[i].size());
for (size_t j = 0; j < task_profiles[i].size(); ++j) {
ASSERT_EQ(task_profiles[i][j].usage_record,
&usage_records[correct_idx[i][j]]);
ASSERT_EQ(task_profiles[i][j].idx, correct_idx[i][j]);
}
}
std::vector<size_t> positional_max =
CalculatePositionalMaximums(usage_records);
EXPECT_THAT(positional_max, ElementsAre(16));
}
TEST(TaskProfileTest, ChainRecords) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{16, 0, 1},
{8, 1, 2},
{64, 2, 3},
{32, 3, 4},
{8, 4, 5},
};
const std::vector<std::vector<size_t>> correct_idx = {{0}, {0, 1}, {2, 1},
{2, 3}, {3, 4}, {4}};
std::vector<TaskProfile> task_profiles = CalculateTaskProfiles(usage_records);
ASSERT_EQ(task_profiles.size(), correct_idx.size());
for (size_t i = 0; i < task_profiles.size(); ++i) {
ASSERT_EQ(task_profiles[i].size(), correct_idx[i].size());
for (size_t j = 0; j < task_profiles[i].size(); ++j) {
ASSERT_EQ(task_profiles[i][j].usage_record,
&usage_records[correct_idx[i][j]]);
ASSERT_EQ(task_profiles[i][j].idx, correct_idx[i][j]);
}
}
std::vector<size_t> positional_max =
CalculatePositionalMaximums(usage_records);
EXPECT_THAT(positional_max, ElementsAre(64, 32));
}
TEST(TaskProfileTest, ComplexRecords) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{32, 0, 1},
{32, 1, 4},
{8, 2, 5},
{16, 3, 5},
{8, 4, 5},
{64, 5, 7},
{8, 6, 8},
{8, 7, 8},
{16, 8, 9}};
const std::vector<std::vector<size_t>> correct_idx = {
{0}, {0, 1}, {1, 2}, {1, 3, 2}, {1, 3, 2, 4},
{5, 3, 2, 4}, {5, 6}, {5, 6, 7}, {8, 6, 7}, {8}};
std::vector<TaskProfile> task_profiles = CalculateTaskProfiles(usage_records);
ASSERT_EQ(task_profiles.size(), correct_idx.size());
for (size_t i = 0; i < task_profiles.size(); ++i) {
ASSERT_EQ(task_profiles[i].size(), correct_idx[i].size());
for (size_t j = 0; j < task_profiles[i].size(); ++j) {
ASSERT_EQ(task_profiles[i][j].usage_record,
&usage_records[correct_idx[i][j]]);
ASSERT_EQ(task_profiles[i][j].idx, correct_idx[i][j]);
}
}
std::vector<size_t> positional_max =
CalculatePositionalMaximums(usage_records);
EXPECT_THAT(positional_max, ElementsAre(64, 32, 8, 8));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/memory_management/internal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/memory_management/internal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
91e75353-5a71-453d-bbaa-606e96f59b4f | cpp | tensorflow/tensorflow | cc_op_gen | tensorflow/cc/framework/cc_op_gen.cc | tensorflow/cc/framework/cc_op_gen_test.cc | #include "tensorflow/cc/framework/cc_op_gen.h"
#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/strings/escaping.h"
#include "tensorflow/cc/framework/cc_op_gen_util.h"
#include "tensorflow/core/framework/api_def.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace cc_op {
namespace {
const int kRightMargin = 79;
string GetConstructorDecl(const OpInfo& op_info, StringPiece op_name_prefix,
bool include_attr) {
const string prefix = strings::StrCat(op_name_prefix, op_info.op_name, "(");
string c_decl;
for (int i = 0; i < op_info.arg_types.size(); ++i) {
if (i > 0) strings::StrAppend(&c_decl, ", ");
strings::StrAppend(&c_decl, op_info.arg_types[i], " ",
op_info.arg_names[i]);
}
if (include_attr && op_info.has_optional_attrs) {
strings::StrAppend(&c_decl, ", const ", op_info.op_name, "::Attrs& attrs");
}
strings::StrAppend(&c_decl, ")");
return WordWrap(prefix, c_decl, kRightMargin);
}
void WriteClassDecl(const OpInfo& op_info, WritableFile* h) {
string class_decl = op_info.comment;
strings::StrAppend(&class_decl, "class ", op_info.op_name, " {\n");
strings::StrAppend(&class_decl, " public:\n");
if (op_info.has_optional_attrs) {
strings::StrAppend(&class_decl, op_info.GetOpAttrStruct());
}
strings::StrAppend(&class_decl, " ",
GetConstructorDecl(op_info, "", false),
";\n");
if (op_info.has_optional_attrs) {
strings::StrAppend(&class_decl, " ",
GetConstructorDecl(op_info, "", true),
";\n");
}
if (op_info.output_types.empty()) {
strings::StrAppend(&class_decl,
" operator ::tensorflow::Operation() const { "
"return operation; }\n");
} else if (op_info.output_types.size() == 1) {
if (op_info.is_list_output[0]) {
strings::StrAppend(&class_decl,
" ::tensorflow::Output operator[](size_t index) "
"const { return ",
op_info.output_names[0], "[index]; }\n\n");
} else {
strings::StrAppend(&class_decl,
" operator ::tensorflow::Output() const { return ",
op_info.output_names[0], "; }\n");
strings::StrAppend(&class_decl,
" operator ::tensorflow::Input() const { return ",
op_info.output_names[0], "; }\n");
strings::StrAppend(&class_decl,
" ::tensorflow::Node* node() const { return ",
op_info.output_names[0], ".node(); }\n");
}
}
if (op_info.has_optional_attrs) {
strings::StrAppend(&class_decl, "\n");
for (int i = 0; i < op_info.graph_op_def.attr_size(); ++i) {
const auto& attr(op_info.graph_op_def.attr(i));
const auto& api_def_attr(op_info.api_def.attr(i));
if ((op_info.inferred_input_attrs.find(attr.name()) !=
op_info.inferred_input_attrs.end()) ||
!api_def_attr.has_default_value()) {
continue;
}
const auto entry = AttrTypeName(attr.type());
const auto attr_type_name = entry.first;
const bool use_const = entry.second;
const string camel_case_name = ToCamelCase(api_def_attr.rename_to());
const string suffix =
(camel_case_name == op_info.op_name || camel_case_name == "Attrs")
? "_"
: "";
const string attr_func_def = strings::StrCat(
camel_case_name, suffix, "(", use_const ? "const " : "",
attr_type_name, use_const ? "&" : "");
strings::StrAppend(&class_decl, " static Attrs ", attr_func_def,
" x) {\n");
strings::StrAppend(&class_decl, " return Attrs().", camel_case_name,
suffix, "(x);\n");
strings::StrAppend(&class_decl, " }\n");
}
}
strings::StrAppend(&class_decl, "\n Operation operation;\n");
for (int i = 0; i < op_info.output_types.size(); ++i) {
strings::StrAppend(&class_decl, " ", op_info.output_types[i], " ",
op_info.output_names[i], ";\n");
}
strings::StrAppend(&class_decl, "};\n");
if (!op_info.aliases.empty()) {
for (const auto& alias : op_info.aliases) {
strings::StrAppend(&class_decl, "typedef ", op_info.op_name, " ", alias,
";\n");
}
}
strings::StrAppend(&class_decl, "\n");
TF_CHECK_OK(h->Append(class_decl));
}
void GetOutput(const OpInfo& op_info, string* out) {
const string scope_str = op_info.arg_names[0];
string return_on_error =
strings::StrCat("if (!", scope_str, ".ok()) return;");
strings::StrAppend(out, " this->operation = Operation(ret);\n");
if (op_info.graph_op_def.output_arg_size() == 0) {
strings::StrAppend(out, " return;\n");
return;
}
if (op_info.graph_op_def.output_arg_size() == 1) {
if (op_info.is_list_output[0]) {
strings::StrAppend(out,
" for (int32 i = 0; i < ret->num_outputs(); ++i)\n");
strings::StrAppend(out, " this->", op_info.output_names[0],
".push_back(Output(ret, i));\n");
} else {
strings::StrAppend(out, " this->", op_info.output_names[0],
" = Output(ret, 0);\n");
}
return;
}
strings::StrAppend(out, " ::tensorflow::NameRangeMap _outputs_range;\n");
strings::StrAppend(out,
" ::tensorflow::Status _status_ = "
"::tensorflow::NameRangesForNode(*ret, ret->op_def(), "
"nullptr, &_outputs_range);\n");
strings::StrAppend(out, " if (!_status_.ok()) {\n", " ", scope_str,
".UpdateStatus(_status_);\n", " return;\n");
strings::StrAppend(out, " }\n\n");
for (int i = 0; i < op_info.graph_op_def.output_arg_size(); ++i) {
const string arg_range = strings::StrCat(
"_outputs_range[\"", op_info.graph_op_def.output_arg(i).name(), "\"]");
if (op_info.is_list_output[i]) {
strings::StrAppend(out, " for (int32 i = ", arg_range, ".first; i < ",
arg_range, ".second; ++i)\n");
strings::StrAppend(out, " this->", op_info.output_names[i],
".push_back(Output(ret, i));\n");
} else {
strings::StrAppend(out, " this->", op_info.output_names[i],
" = Output(ret, ", arg_range, ".first);\n");
}
}
}
string GetConstructorBody(const OpInfo& op_info) {
const string scope_str = op_info.arg_names[0];
string body;
string return_on_error =
strings::StrCat("if (!", scope_str, ".ok()) return;");
strings::StrAppend(&body, " ", return_on_error, "\n");
for (int i = 0; i < op_info.graph_op_def.input_arg_size(); ++i) {
const auto& arg(op_info.graph_op_def.input_arg(i));
const auto& api_def_arg(op_info.api_def.in_arg(i));
strings::StrAppend(
&body, " auto _", api_def_arg.rename_to(), " = ::tensorflow::ops::",
ArgIsList(arg) ? "AsNodeOutList" : "AsNodeOut", "(", scope_str, ", ",
AvoidCPPKeywords(api_def_arg.rename_to()), ");\n");
strings::StrAppend(&body, " ", return_on_error, "\n");
}
strings::StrAppend(&body, " ::tensorflow::Node* ret;\n");
strings::StrAppend(&body, " const auto unique_name = ", scope_str,
".GetUniqueNameForOp(\"", op_info.op_name, "\");\n");
strings::StrAppend(
&body, " auto builder = ::tensorflow::NodeBuilder(unique_name, \"",
op_info.graph_op_def.name(), "\")\n");
const string spaces = " ";
for (int i = 0; i < op_info.api_def.in_arg_size(); ++i) {
const auto& arg(op_info.api_def.in_arg(i));
strings::StrAppend(&body, spaces, ".Input(_", arg.rename_to(), ")\n");
}
for (int i = 0; i < op_info.api_def.attr_size(); ++i) {
const auto& graph_attr(op_info.graph_op_def.attr(i));
const auto& api_def_attr(op_info.api_def.attr(i));
if (op_info.inferred_input_attrs.find(api_def_attr.name()) !=
op_info.inferred_input_attrs.end()) {
continue;
}
const string attr_name =
api_def_attr.has_default_value()
? strings::StrCat("attrs.", api_def_attr.rename_to(), "_")
: AvoidCPPKeywords(api_def_attr.rename_to());
strings::StrAppend(&body, spaces, ".Attr(\"", graph_attr.name(), "\", ",
attr_name, ")\n");
}
strings::StrAppend(&body, " ;\n");
strings::StrAppend(&body, " ", scope_str, ".UpdateBuilder(&builder);\n");
strings::StrAppend(&body, " ", scope_str, ".UpdateStatus(builder.Finalize(",
scope_str, ".graph(), &ret));\n");
strings::StrAppend(&body, " ", return_on_error, "\n");
strings::StrAppend(&body, " ", scope_str, ".UpdateStatus(", scope_str,
".DoShapeInference(ret));\n");
GetOutput(op_info, &body);
return body;
}
void WriteClassDef(const OpInfo& op_info, WritableFile* cc) {
string class_def;
strings::StrAppend(
&class_def,
GetConstructorDecl(op_info, strings::StrCat(op_info.op_name, "::"),
true),
" {\n");
strings::StrAppend(&class_def, GetConstructorBody(op_info));
strings::StrAppend(&class_def, "}\n\n");
if (op_info.has_optional_attrs) {
strings::StrAppend(
&class_def,
GetConstructorDecl(op_info, strings::StrCat(op_info.op_name, "::"),
false));
strings::StrAppend(&class_def, "\n : ", op_info.op_name, "(");
int i = 0;
for (; i < op_info.arg_names.size(); ++i) {
if (i > 0) strings::StrAppend(&class_def, ", ");
strings::StrAppend(&class_def, op_info.arg_names[i]);
}
if (i > 0) strings::StrAppend(&class_def, ", ");
strings::StrAppend(&class_def, op_info.op_name, "::Attrs()");
strings::StrAppend(&class_def, ") {}\n\n");
}
TF_CHECK_OK(cc->Append(class_def));
}
void WriteCCOp(const OpDef& graph_op_def, const ApiDef& api_def,
const std::vector<string>& aliases, WritableFile* h,
WritableFile* cc) {
OpInfo op_info(graph_op_def, api_def, aliases);
WriteClassDecl(op_info, h);
WriteClassDef(op_info, cc);
}
void StartFiles(bool internal, const string& dot_h_fname, WritableFile* h,
WritableFile* cc, string* op_header_guard) {
const string header =
R"header(
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
)header";
const string namespace_begin = internal ? R"namespace(
namespace tensorflow {
namespace ops {
namespace internal {
)namespace"
: R"namespace(
namespace tensorflow {
namespace ops {
)namespace";
const string op_header = GetPath(dot_h_fname);
*op_header_guard = ToGuard(op_header);
const string cc_header = strings::StrCat(
R"include(
#include "tensorflow/cc/ops/const_op.h"
)include",
"#include \"", op_header, "\"\n", namespace_begin);
const string filename = GetFilename(dot_h_fname);
const string doxygen = strings::StrCat("
ToTitle(filename), "\n", "
TF_CHECK_OK(h->Append(
strings::StrCat("
"#ifndef ",
*op_header_guard,
"\n"
"#define ",
*op_header_guard, "\n\n")));
TF_CHECK_OK(h->Append(header));
TF_CHECK_OK(h->Append(namespace_begin));
TF_CHECK_OK(h->Append(doxygen));
TF_CHECK_OK(cc->Append(cc_header));
}
void FinishFiles(bool internal, WritableFile* h, WritableFile* cc,
const string& op_header_guard) {
const string footer = internal ? R"footer(}
}
}
)footer"
:
R"footer(
}
}
)footer";
TF_CHECK_OK(h->Append(footer));
TF_CHECK_OK(
h->Append(strings::StrCat("\n#endif ", "
TF_CHECK_OK(cc->Append(footer));
TF_CHECK_OK(cc->Close());
TF_CHECK_OK(h->Close());
}
string MakeInternal(const string& fname) {
auto dot_pos = fname.rfind('.');
if (dot_pos == string::npos) {
return strings::StrCat(fname, "_internal");
} else {
return strings::StrCat(fname.substr(0, dot_pos), "_internal",
fname.substr(dot_pos));
}
}
}
void WriteCCOps(const OpList& ops, const ApiDefMap& api_def_map,
const string& dot_h_fname, const string& dot_cc_fname) {
Env* env = Env::Default();
std::unique_ptr<WritableFile> h = nullptr;
std::unique_ptr<WritableFile> cc = nullptr;
TF_CHECK_OK(env->NewWritableFile(dot_h_fname, &h));
TF_CHECK_OK(env->NewWritableFile(dot_cc_fname, &cc));
string op_header_guard;
StartFiles(false, dot_h_fname, h.get(), cc.get(), &op_header_guard);
std::unique_ptr<WritableFile> internal_h = nullptr;
std::unique_ptr<WritableFile> internal_cc = nullptr;
const string internal_dot_h_fname = MakeInternal(dot_h_fname);
TF_CHECK_OK(env->NewWritableFile(internal_dot_h_fname, &internal_h));
TF_CHECK_OK(env->NewWritableFile(MakeInternal(dot_cc_fname), &internal_cc));
string internal_op_header_guard;
StartFiles(true , internal_dot_h_fname, internal_h.get(),
internal_cc.get(), &internal_op_header_guard);
for (const auto& graph_op_def : ops.op()) {
if (graph_op_def.has_deprecation() &&
graph_op_def.deprecation().version() <= TF_GRAPH_DEF_VERSION) {
continue;
}
if (graph_op_def.name() == "Const") continue;
const auto* api_def = api_def_map.GetApiDef(graph_op_def.name());
std::vector<string> aliases;
if (api_def->visibility() == ApiDef::SKIP) continue;
for (int endpoint_i = 1; endpoint_i < api_def->endpoint_size();
++endpoint_i) {
aliases.push_back(api_def->endpoint(endpoint_i).name());
}
if (api_def->visibility() == ApiDef::HIDDEN) {
WriteCCOp(graph_op_def, *api_def, aliases, internal_h.get(),
internal_cc.get());
continue;
}
WriteCCOp(graph_op_def, *api_def, aliases, h.get(), cc.get());
}
FinishFiles(false, h.get(), cc.get(), op_header_guard);
FinishFiles(true , internal_h.get(), internal_cc.get(),
internal_op_header_guard);
}
}
} | #include "tensorflow/cc/framework/cc_op_gen.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
constexpr char kBaseOpDef[] = R"(
op {
name: "Foo"
input_arg {
name: "images"
description: "Images to process."
}
input_arg {
name: "dim"
description: "Description for dim."
type: DT_FLOAT
}
output_arg {
name: "output"
description: "Description for output."
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
description: "Type for images"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
}
}
default_value {
i: 1
}
}
summary: "Summary for op Foo."
description: "Description for op Foo."
}
)";
void ExpectHasSubstr(StringPiece s, StringPiece expected) {
EXPECT_TRUE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
void ExpectDoesNotHaveSubstr(StringPiece s, StringPiece expected) {
EXPECT_FALSE(absl::StrContains(s, expected))
<< "'" << s << "' contains '" << expected << "'";
}
void ExpectSubstrOrder(const string& s, const string& before,
const string& after) {
int before_pos = s.find(before);
int after_pos = s.find(after);
ASSERT_NE(std::string::npos, before_pos);
ASSERT_NE(std::string::npos, after_pos);
EXPECT_LT(before_pos, after_pos)
<< before << " is not before " << after << " in " << s;
}
void GenerateCcOpFiles(Env* env, const OpList& ops,
const ApiDefMap& api_def_map, string* h_file_text,
string* internal_h_file_text) {
const string& tmpdir = testing::TmpDir();
const auto h_file_path = io::JoinPath(tmpdir, "test.h");
const auto cc_file_path = io::JoinPath(tmpdir, "test.cc");
const auto internal_h_file_path = io::JoinPath(tmpdir, "test_internal.h");
const auto internal_cc_file_path = io::JoinPath(tmpdir, "test_internal.cc");
cc_op::WriteCCOps(ops, api_def_map, h_file_path, cc_file_path);
TF_ASSERT_OK(ReadFileToString(env, h_file_path, h_file_text));
TF_ASSERT_OK(
ReadFileToString(env, internal_h_file_path, internal_h_file_text));
}
TEST(CcOpGenTest, TestVisibilityChangedToHidden) {
const string api_def = R"(
op {
graph_op_name: "Foo"
visibility: HIDDEN
}
)";
Env* env = Env::Default();
OpList op_defs;
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string h_file_text, internal_h_file_text;
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectHasSubstr(h_file_text, "class Foo");
ExpectDoesNotHaveSubstr(internal_h_file_text, "class Foo");
TF_ASSERT_OK(api_def_map.LoadApiDef(api_def));
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectHasSubstr(internal_h_file_text, "class Foo");
ExpectDoesNotHaveSubstr(h_file_text, "class Foo");
}
TEST(CcOpGenTest, TestArgNameChanges) {
const string api_def = R"(
op {
graph_op_name: "Foo"
arg_order: "dim"
arg_order: "images"
}
)";
Env* env = Env::Default();
OpList op_defs;
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string cc_file_text, h_file_text;
string internal_cc_file_text, internal_h_file_text;
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectSubstrOrder(h_file_text, "Input images", "Input dim");
TF_ASSERT_OK(api_def_map.LoadApiDef(api_def));
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectSubstrOrder(h_file_text, "Input dim", "Input images");
}
TEST(CcOpGenTest, TestEndpoints) {
const string api_def = R"(
op {
graph_op_name: "Foo"
endpoint {
name: "Foo1"
}
endpoint {
name: "Foo2"
}
}
)";
Env* env = Env::Default();
OpList op_defs;
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string cc_file_text, h_file_text;
string internal_cc_file_text, internal_h_file_text;
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectHasSubstr(h_file_text, "class Foo {");
ExpectDoesNotHaveSubstr(h_file_text, "class Foo1");
ExpectDoesNotHaveSubstr(h_file_text, "class Foo2");
TF_ASSERT_OK(api_def_map.LoadApiDef(api_def));
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectHasSubstr(h_file_text, "class Foo1");
ExpectHasSubstr(h_file_text, "typedef Foo1 Foo2");
ExpectDoesNotHaveSubstr(h_file_text, "class Foo {");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/framework/cc_op_gen.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/framework/cc_op_gen_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
04d8e833-ef24-4c6a-94a9-6f57be904037 | cpp | google/arolla | ops_util | arolla/array/ops_util.h | arolla/array/ops_util_test.cc | #ifndef AROLLA_ARRAY_OPS_UTIL_H_
#define AROLLA_ARRAY_OPS_UTIL_H_
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/log/check.h"
#include "arolla/array/array.h"
#include "arolla/array/id_filter.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/ops/util.h"
#include "arolla/memory/optional_value.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/util/meta.h"
#include "arolla/util/view_types.h"
namespace arolla::array_ops_internal {
inline void empty_missing_fn(int64_t, int64_t) {}
template <bool ConvertToDense, class ArgList>
class ArrayOpsUtil;
template <bool ConvertToDense, class... Ts>
class ArrayOpsUtil<ConvertToDense, meta::type_list<Ts...>> {
public:
explicit ArrayOpsUtil(int64_t size, const AsArray<Ts>&... args,
RawBufferFactory* buf_factory = GetHeapBufferFactory())
: size_(size) {
DCHECK(((size == args.size()) && ... && true));
if constexpr (ConvertToDense) {
dense_ = std::make_tuple(args.ToDenseForm(buf_factory).dense_data()...);
} else {
default_valid_ = !(BoundsValidIds<Ts>(args) || ...);
if (default_valid_) {
default_values_ = std::make_tuple(
MaybeUnwrapOptional<Ts>(args.missing_id_value())...);
}
if (IsSameIdFilter(args...)) {
ids_ = First(args...).id_filter();
dense_ = std::make_tuple(args.dense_data()...);
} else {
if (!default_valid_) {
IdFilter full(IdFilter::kFull);
ids_ = IdFilter::UpperBoundIntersect(
(BoundsValidIds<Ts>(args) ? args.id_filter() : full)...);
} else {
ids_ = IdFilter::UpperBoundMerge(First(args...).size(), buf_factory,
args.id_filter()...);
}
dense_ = std::make_tuple(
args.WithIds(ids_, args.missing_id_value(), buf_factory)
.dense_data()...);
}
}
}
template <class Fn, class RepeatedFn, class MissedFn>
void Iterate(int64_t from, int64_t to, Fn&& fn, MissedFn&& missing_fn,
RepeatedFn&& repeated_fn) const {
return Iterate(std::forward<Fn>(fn), std::forward<RepeatedFn>(repeated_fn),
std::forward<MissedFn>(missing_fn),
std::index_sequence_for<Ts...>{}, from, to);
}
template <class Fn, class MissedFn>
void Iterate(int64_t from, int64_t to, Fn&& fn, MissedFn&& missing_fn) const {
auto repeated_fn = [&](int64_t id, int64_t count, view_type_t<Ts>... args) {
for (int64_t i = 0; i < count; ++i) fn(id + i, args...);
};
return Iterate(fn, std::move(repeated_fn),
std::forward<MissedFn>(missing_fn),
std::index_sequence_for<Ts...>{}, from, to);
}
template <class Fn>
void Iterate(int64_t from, int64_t to, Fn&& fn) const {
Iterate(from, to, std::forward<Fn>(fn), empty_missing_fn);
}
template <class Fn>
void IterateSimple(Fn&& fn) const {
return IterateSimple(std::forward<Fn>(fn),
std::index_sequence_for<Ts...>{});
}
int64_t size() const { return size_; }
int64_t PresentCountUpperEstimate() const {
if (ids_.type() == IdFilter::kFull || default_valid_) {
return size_;
} else {
return ids_.ids().size();
}
}
private:
using DenseUtil = dense_ops_internal::DenseOpsUtil<meta::type_list<Ts...>>;
template <class Fn, class RepeatedFn, class MissedFn, size_t... Is>
void Iterate(Fn&& fn, RepeatedFn&& repeated_fn, MissedFn&& missing_fn,
std::index_sequence<Is...>, uint64_t from, uint64_t to) const {
DCHECK_GE(from, 0);
DCHECK_GE(to, from);
if (ids_.type() == IdFilter::kFull) {
DenseUtil::Iterate(
[&](int64_t id, bool valid, view_type_t<Ts>... args) {
if (valid) {
fn(id, args...);
} else {
missing_fn(id, 1);
}
},
from, to, std::get<Is>(dense_)...);
return;
}
DCHECK(!ConvertToDense) << "If ConvertToDense=true, `ids_` must be full";
if constexpr (!ConvertToDense) {
auto defaultFn = [&](int64_t id, int64_t row_count) {
if (default_valid_) {
repeated_fn(id, row_count, std::get<Is>(default_values_)...);
} else {
missing_fn(id, row_count);
}
};
auto ids_iter = std::lower_bound(ids_.ids().begin(), ids_.ids().end(),
from + ids_.ids_offset());
int64_t offset_from = std::distance(ids_.ids().begin(), ids_iter);
auto iter_to = std::lower_bound(ids_.ids().begin(), ids_.ids().end(),
to + ids_.ids_offset());
int64_t offset_to = std::distance(ids_.ids().begin(), iter_to);
int64_t id = from;
const int64_t* ids = ids_.ids().begin();
DenseUtil::Iterate(
[&](int64_t offset, bool valid, view_type_t<Ts>... args) {
int64_t new_id = ids[offset] - ids_.ids_offset();
if (id < new_id) defaultFn(id, new_id - id);
if (valid) {
fn(new_id, args...);
} else {
missing_fn(new_id, 1);
}
id = new_id + 1;
},
offset_from, offset_to, std::get<Is>(dense_)...);
if (id < to) defaultFn(id, to - id);
}
}
template <class Fn, size_t... Is>
void IterateSimple(Fn&& fn, std::index_sequence<Is...>) const {
if (ids_.type() == IdFilter::kFull) {
DenseUtil::IterateFromZero(
[&](int64_t id, bool valid, view_type_t<Ts>... args) {
if (valid) fn(id, args...);
},
size_, std::get<Is>(dense_)...);
return;
}
DCHECK(!ConvertToDense) << "If ConvertToDense=true, `ids_` must be full";
if constexpr (!ConvertToDense) {
int64_t id = 0;
const int64_t* ids = ids_.ids().begin();
DenseUtil::IterateFromZero(
[&](int64_t offset, bool valid, view_type_t<Ts>... args) {
int64_t new_id = ids[offset] - ids_.ids_offset();
if (default_valid_ && id < new_id) {
while (id < new_id) {
fn(id++, std::get<Is>(default_values_)...);
}
}
if (valid) fn(new_id, args...);
id = new_id + 1;
},
ids_.ids().size(), std::get<Is>(dense_)...);
if (default_valid_) {
while (id < size_) {
fn(id++, std::get<Is>(default_values_)...);
}
}
}
}
template <class Arg, class A>
static bool BoundsValidIds(const A& arg) {
return !is_optional_v<Arg> && !arg.HasMissingIdValue();
}
template <class A, class... As>
static bool IsSameIdFilter(const A& a, const As&... as) {
return ((a.id_filter().IsSame(as.id_filter()) && ... && true));
}
template <class A, class... As>
static const A& First(const A& a, const As&...) {
return a;
}
template <class To, class From>
static const To& MaybeUnwrapOptional(const OptionalValue<From>& v) {
if constexpr (!is_optional_v<To>) {
DCHECK(v.present);
return v.value;
} else {
return v;
}
}
int64_t size_;
IdFilter ids_{IdFilter::kFull};
std::tuple<AsDenseArray<Ts>...> dense_;
bool default_valid_;
std::tuple<Ts...> default_values_;
};
template <bool ConvertToDense>
class ArrayOpsUtil<ConvertToDense, meta::type_list<>> {
public:
explicit ArrayOpsUtil(int64_t size, RawBufferFactory* = nullptr)
: size_(size) {}
template <class Fn, class RepeatedFn, class MissedFn>
void Iterate(int64_t from, int64_t to, Fn&&, MissedFn&&,
RepeatedFn&& repeated_fn) const {
repeated_fn(from, to - from);
}
template <class Fn, class MissedFn>
void Iterate(int64_t from, int64_t to, Fn&& fn, MissedFn&&) const {
for (int64_t i = from; i < to; ++i) fn(i);
}
template <class Fn>
void Iterate(int64_t from, int64_t to, Fn&& fn) const {
for (int64_t i = from; i < to; ++i) fn(i);
}
template <class Fn>
void IterateSimple(Fn&& fn) const {
for (int64_t i = 0; i < size_; ++i) fn(i);
}
int64_t size() const { return size_; }
int64_t PresentCountUpperEstimate() const { return size_; }
private:
int64_t size_;
};
template <class OptionalityList, class TypeList>
struct ApplyOptionalityToTypes;
template <class... Os, class... Ts>
struct ApplyOptionalityToTypes<meta::type_list<Os...>, meta::type_list<Ts...>> {
using types =
meta::type_list<std::conditional_t<is_optional_v<Os>,
::arolla::OptionalValue<Ts>, Ts>...>;
};
}
namespace arolla {
template <class Fn, class T, class... Ts>
void ArraysIterate(Fn&& fn, const Array<T>& first_array,
const Array<Ts>&... arrays) {
static_assert(meta::function_traits<Fn>::arity == sizeof...(arrays) + 2);
using arg_list = typename array_ops_internal::ApplyOptionalityToTypes<
meta::tail_t<typename meta::function_traits<Fn>::arg_types>,
meta::type_list<T, Ts...>>::types;
array_ops_internal::ArrayOpsUtil<false, arg_list> util(
first_array.size(), first_array, arrays...);
util.IterateSimple(std::forward<Fn>(fn));
}
template <class Fn, class T, class... Ts>
void ArraysIterateDense(Fn&& fn, const Array<T>& first_array,
const Array<Ts>&... arrays) {
static_assert(meta::function_traits<Fn>::arity == sizeof...(arrays) + 2);
using arg_list = typename array_ops_internal::ApplyOptionalityToTypes<
meta::tail_t<typename meta::function_traits<Fn>::arg_types>,
meta::type_list<T, Ts...>>::types;
array_ops_internal::ArrayOpsUtil<true, arg_list> util(first_array.size(),
first_array, arrays...);
util.IterateSimple(std::forward<Fn>(fn));
}
}
#endif | #include "arolla/array/ops_util.h"
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "arolla/array/array.h"
#include "arolla/array/id_filter.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/memory/buffer.h"
#include "arolla/memory/optional_value.h"
#include "arolla/util/bytes.h"
#include "arolla/util/meta.h"
#include "arolla/util/text.h"
using ::testing::ElementsAre;
namespace arolla::array_ops_internal {
TEST(ArrayOpsUtilTest, IterateConst) {
ArrayOpsUtil<false, meta::type_list<int, int>> util(6, Array<int>(6, 3),
Array<int>(6, 7));
{
std::vector<int64_t> ids;
std::vector<int> vx;
std::vector<int> vy;
util.Iterate(1, 4, [&](int64_t id, int x, int y) {
ids.push_back(id);
vx.push_back(x);
vy.push_back(y);
});
EXPECT_THAT(ids, ElementsAre(1, 2, 3));
EXPECT_THAT(vx, ElementsAre(3, 3, 3));
EXPECT_THAT(vy, ElementsAre(7, 7, 7));
}
{
int fn_count = 0;
int missing_fn_count = 0;
int repeated_fn_count = 0;
auto fn = [&](int64_t, int, int) { fn_count++; };
auto missing_fn = [&](int64_t, int64_t) { missing_fn_count++; };
auto repeated_fn = [&](int64_t first_id, int64_t count, int x, int y) {
EXPECT_EQ(first_id, 1);
EXPECT_EQ(count, 3);
EXPECT_EQ(x, 3);
EXPECT_EQ(y, 7);
repeated_fn_count++;
};
util.Iterate(1, 4, fn, missing_fn, repeated_fn);
EXPECT_EQ(fn_count, 0);
EXPECT_EQ(missing_fn_count, 0);
EXPECT_EQ(repeated_fn_count, 1);
}
}
TEST(ArrayOpsUtilTest, IterateSparse) {
auto q1 = CreateArray<int>(20, {3, 7, 8, 10}, {1, 2, 3, 4});
auto q2 = CreateArray<int>(20, {4, 8, 10, 11}, {1, 2, 3, 4});
std::vector<std::string> res;
auto fn = [&](int64_t id, int x, int y) {
res.push_back(absl::StrFormat("single(%d, %d, %d)", id, x, y));
};
auto missing_fn = [&](int64_t id, int64_t count) {
res.push_back(absl::StrFormat("missing(%d, %d)", id, count));
};
auto repeated_fn = [&](int64_t id, int64_t count, int x, int y) {
res.push_back(absl::StrFormat("repeated(%d, %d, %d, %d)", id, count, x, y));
};
ArrayOpsUtil<false, meta::type_list<int, int>> util(20, q1, q2);
util.Iterate(0, 15, fn, missing_fn, repeated_fn);
EXPECT_THAT(
res, ElementsAre("missing(0, 4)", "missing(4, 1)", "missing(5, 3)",
"single(8, 3, 2)", "missing(9, 1)", "single(10, 4, 3)",
"missing(11, 1)", "missing(12, 3)"));
}
TEST(ArrayOpsUtilTest, IterateSparseWithMissedIdValue) {
Array<int> q1(20, IdFilter(20, CreateBuffer<int64_t>({3, 7, 8, 10})),
CreateDenseArray<int>({1, 2, 3, 4}), 9);
auto q2 = CreateArray<int>(20, {4, 8, 10, 11}, {1, 2, 3, 4});
std::vector<std::string> res;
auto fn = [&](int64_t id, int x, OptionalValue<int> y) {
if (y.present) {
res.push_back(absl::StrFormat("single(%d, %d, %d)", id, x, y.value));
} else {
res.push_back(absl::StrFormat("single(%d, %d, NA)", id, x));
}
};
auto missing_fn = [&](int64_t id, int64_t count) {
res.push_back(absl::StrFormat("missing(%d, %d)", id, count));
};
auto repeated_fn = [&](int64_t id, int64_t count, int x,
OptionalValue<int> y) {
if (y.present) {
res.push_back(
absl::StrFormat("repeated(%d, %d, %d, %d)", id, count, x, y.value));
} else {
res.push_back(absl::StrFormat("repeated(%d, %d, %d, NA)", id, count, x));
}
};
ArrayOpsUtil<false, meta::type_list<int, OptionalValue<int>>> util(20, q1,
q2);
util.Iterate(0, 15, fn, missing_fn, repeated_fn);
EXPECT_THAT(res, ElementsAre("repeated(0, 3, 9, NA)", "single(3, 1, NA)",
"single(4, 9, 1)", "repeated(5, 2, 9, NA)",
"single(7, 2, NA)", "single(8, 3, 2)",
"repeated(9, 1, 9, NA)", "single(10, 4, 3)",
"single(11, 9, 4)", "repeated(12, 3, 9, NA)"));
}
TEST(ArrayOpsUtilTest, ArraysIterate) {
Array<int> array_x = CreateArray<int>({5, 4, std::nullopt, 2, 1});
Array<int> array_y =
CreateArray<int>({3, std::nullopt, 3, 1, 3}).ToSparseForm();
std::vector<int64_t> ids;
std::vector<OptionalValue<int>> vx;
std::vector<int> vy;
ArraysIterate(
[&](int64_t id, OptionalValue<int> x, int y) {
ids.push_back(id);
vx.push_back(x);
vy.push_back(y);
},
array_x, array_y);
EXPECT_THAT(ids, ElementsAre(0, 2, 3, 4));
EXPECT_THAT(vx, ElementsAre(5, std::nullopt, 2, 1));
EXPECT_THAT(vy, ElementsAre(3, 3, 1, 3));
}
TEST(ArrayOpsUtilTest, ArraysIterateDense) {
Array<int> array_x = CreateArray<int>({5, 4, std::nullopt, 2, 1});
Array<int> array_y =
CreateArray<int>({3, std::nullopt, 3, 1, 3}).ToSparseForm();
std::vector<int64_t> ids;
std::vector<OptionalValue<int>> vx;
std::vector<int> vy;
ArraysIterateDense(
[&](int64_t id, OptionalValue<int> x, int y) {
ids.push_back(id);
vx.push_back(x);
vy.push_back(y);
},
array_x, array_y);
EXPECT_THAT(ids, ElementsAre(0, 2, 3, 4));
EXPECT_THAT(vx, ElementsAre(5, std::nullopt, 2, 1));
EXPECT_THAT(vy, ElementsAre(3, 3, 1, 3));
}
TEST(ArrayOpsUtilTest, ArraysIterateStrings) {
Array<Text> array_x = CreateArray<Text>(
{Text("5"), Text("4"), std::nullopt, Text("2"), Text("1")});
Array<Bytes> array_y =
CreateArray<Bytes>(
{Bytes("3"), std::nullopt, Bytes("3"), Bytes("1"), Bytes("3")})
.ToSparseForm();
std::vector<int64_t> ids;
std::vector<OptionalValue<absl::string_view>> vx;
std::vector<absl::string_view> vy;
ArraysIterate(
[&](int64_t id, OptionalValue<absl::string_view> x, absl::string_view y) {
ids.push_back(id);
vx.push_back(x);
vy.push_back(y);
},
array_x, array_y);
EXPECT_THAT(ids, ElementsAre(0, 2, 3, 4));
EXPECT_THAT(vx, ElementsAre("5", std::nullopt, "2", "1"));
EXPECT_THAT(vy, ElementsAre("3", "3", "1", "3"));
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/array/ops_util.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/array/ops_util_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
436d8490-15c6-4601-8e7a-e454d6599354 | cpp | tensorflow/tensorflow | xla_compiler_options_util | tensorflow/compiler/jit/xla_compiler_options_util.cc | tensorflow/compiler/jit/xla_compiler_options_util_test.cc | #include "tensorflow/compiler/jit/xla_compiler_options_util.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/tsl/framework/device_id_utils.h"
#include "tensorflow/core/framework/function.h"
namespace tensorflow {
namespace {
using XlaDeviceCompiler =
DeviceCompiler<xla::LocalExecutable, xla::LocalClient>;
using PjRtDeviceCompiler =
DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>;
inline void LogOptions(const XlaCompiler::Options& options) {
VLOG(2) << "XlaCompiler::Options[device_type=" << options.device_type
<< ",device_ordinal=" << options.device_ordinal
<< ",client=" << options.client << ",flib_def=" << options.flib_def
<< ",graph_def_version=" << options.graph_def_version
<< ",options.shape_determination_fns.layout_preference_fn?="
<< (options.shape_determination_fns.layout_preference_fn != nullptr)
<< ",options.shape_determination_fns.shape_representation_fn?="
<< (options.shape_determination_fns.shape_representation_fn !=
nullptr)
<< ",allow_cpu_custom_calls=" << options.allow_cpu_custom_calls
<< ",populate_resource_manager=" << options.populate_resource_manager
<< ",alias_passthrough_params=" << options.alias_passthrough_params
<< ",detailed_logging=" << options.detailed_logging << "]";
}
}
XlaCompiler::Options GenerateCompilerOptions(
const XlaDeviceCompiler& xla_device_compiler,
const FunctionLibraryRuntime& function_library, DeviceBase* device,
se::Stream* stream, const XlaPlatformInfo& platform_info,
bool has_ref_vars) {
XlaCompiler::Options options;
options.client = static_cast<xla::LocalClient*>(xla_device_compiler.client());
if (stream != nullptr) {
options.device_ordinal = stream->parent()->device_ordinal();
}
options.device_type = xla_device_compiler.device_type();
options.flib_def = function_library.GetFunctionLibraryDefinition();
options.graph_def_version = function_library.graph_def_version();
options.allow_cpu_custom_calls =
(platform_info.platform_id() == se::host::kHostPlatformId);
options.device_allocator = GetAllocator(device, stream, platform_info);
if (platform_info.xla_device_metadata()) {
options.shape_determination_fns =
platform_info.xla_device_metadata()->default_shape_determination_fns();
}
options.alias_passthrough_params =
!has_ref_vars && !platform_info.is_on_xla_device();
LogOptions(options);
return options;
}
XlaCompiler::Options GenerateCompilerOptionsForTfrtTpu(
const XlaDeviceCompiler& xla_device_compiler,
const FunctionLibraryRuntime& function_library) {
XlaCompiler::Options options;
options.device_type = xla_device_compiler.device_type();
options.flib_def = function_library.GetFunctionLibraryDefinition();
options.graph_def_version = function_library.graph_def_version();
options.allow_cpu_custom_calls = false;
options.alias_passthrough_params = false;
return options;
}
XlaCompiler::Options GenerateCompilerOptionsForPjRt(
const FunctionLibraryRuntime& function_library,
const DeviceBase* device_base, const XlaPlatformInfo& platform_info,
const DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>*
pjrt_device_compiler) {
return GenerateCompilerOptionsForPjRt(
function_library.GetFunctionLibraryDefinition(),
function_library.graph_def_version(), device_base, platform_info,
pjrt_device_compiler);
}
XlaCompiler::Options GenerateCompilerOptionsForPjRt(
const FunctionLibraryDefinition* function_library_def,
int graph_def_version, const DeviceBase* device_base,
const XlaPlatformInfo& platform_info,
const PjRtDeviceCompiler* pjrt_device_compiler) {
XlaCompiler::Options options;
absl::StatusOr<int> platform_device_id =
tsl::GetPlatformDeviceIdFromDeviceParsedName(
device_base->parsed_name(),
DeviceType(tensorflow::down_cast<const Device*>(device_base)
->device_type()));
if (platform_device_id.ok()) {
options.device_ordinal = *platform_device_id;
} else {
options.device_ordinal = device_base->parsed_name().id;
}
options.flib_def = function_library_def;
options.graph_def_version = graph_def_version;
if (const auto* metadata = platform_info.xla_device_metadata();
metadata != nullptr) {
options.device_type = metadata->jit_device_type();
options.shape_determination_fns =
metadata->default_shape_determination_fns();
} else if (const auto* metadata = platform_info.pjrt_device_metadata();
metadata != nullptr) {
options.device_type = metadata->jit_device_type();
options.shape_determination_fns =
metadata->default_shape_determination_fns();
} else if (pjrt_device_compiler != nullptr) {
options.device_type = pjrt_device_compiler->device_type();
}
options.allow_cpu_custom_calls = false;
options.alias_passthrough_params = false;
LogOptions(options);
return options;
}
XlaCompiler::CompileOptions GenerateCompileOptions(
bool has_ref_vars, bool may_alias_resource_update) {
XlaCompiler::CompileOptions compile_options;
compile_options.is_entry_computation = true;
compile_options.always_return_tuple = false;
compile_options.alias_resource_update =
!has_ref_vars && may_alias_resource_update;
return compile_options;
}
} | #include "tensorflow/compiler/jit/xla_compiler_options_util.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/pjrt_device_compiler_client.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "tensorflow/compiler/jit/xla_platform_info.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/client/client_library.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tpu/tpu_defs.h"
namespace tensorflow {
namespace {
using XlaDeviceCompiler =
DeviceCompiler<xla::LocalExecutable, xla::LocalClient>;
using XlaDeviceExecutablePersistor =
DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>;
using PjRtDeviceCompiler =
DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>;
using PjRtDeviceExecutablePersistor =
DeviceExecutablePersistor<xla::PjRtLoadedExecutable, xla::PjRtClient>;
XlaDeviceCompiler* CreateXlaDeviceCompiler(DeviceType device_type,
xla::LocalClient* local_client) {
auto persistor = std::make_unique<XlaDeviceExecutablePersistor>(
XlaDeviceExecutablePersistor::Config(), device_type);
auto compiler_client =
std::make_unique<XlaDeviceCompilerClient>(local_client);
return new XlaDeviceCompiler(std::move(persistor),
std::move(compiler_client));
}
PjRtDeviceCompiler* CreatePjRtDeviceCompiler(DeviceType device_type,
xla::PjRtClient* pjrt_client) {
auto persistor = std::make_unique<PjRtDeviceExecutablePersistor>(
PjRtDeviceExecutablePersistor::Config(), device_type);
auto compiler_client =
std::make_unique<PjRtDeviceCompilerClient>(pjrt_client);
return new PjRtDeviceCompiler(std::move(persistor),
std::move(compiler_client));
}
std::vector<XlaShapeLayoutHelpers::ShapeDeterminationFns>
GetShapeDeterminationFns() {
XlaHelpers::ShapeRepresentationFn shape_representation_fn =
[](const TensorShape&, DataType, bool, XlaLayoutPreference) {
return xla::Shape();
};
XlaShapeLayoutHelpers::LayoutPreferenceFn layout_preference_fn =
[](const TensorShape&, DataType, std::optional<XlaArgument::Kind>) {
return tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout;
};
return {XlaShapeLayoutHelpers::ShapeDeterminationFns{
layout_preference_fn, shape_representation_fn}};
}
std::unique_ptr<XlaDevice::Metadata> CreateXlaDeviceMetadata(
DeviceType compilation_device_type) {
return std::make_unique<XlaDevice::Metadata>(
0, nullptr, compilation_device_type,
GetShapeDeterminationFns(), XlaDevice::PaddedShapeFn(),
false);
}
std::unique_ptr<PjRtBaseDevice::Metadata> CreatePjRtDeviceMetadata(
DeviceType compilation_device_type) {
return std::make_unique<PjRtBaseDevice::Metadata>(compilation_device_type,
GetShapeDeterminationFns());
}
class XlaCompilerOptionsTest : public ::testing::Test {
protected:
void SetUp() override {
tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true;
}
DeviceSetup device_setup_;
};
TEST_F(XlaCompilerOptionsTest, PjRtOptionsXlaDevice) {
device_setup_.AddDevicesAndSetUp({DEVICE_XLA_GPU});
Device* device = device_setup_.GetDevice(DEVICE_XLA_GPU);
DeviceType compilation_device_type = DeviceType(DEVICE_GPU_XLA_JIT);
se::Platform::Id platform_id = nullptr;
auto xla_device_metadata = CreateXlaDeviceMetadata(compilation_device_type);
std::shared_ptr<se::DeviceMemoryAllocator> custom_allocator;
XlaPlatformInfo platform_info(
compilation_device_type, platform_id, xla_device_metadata.get(),
nullptr, custom_allocator);
XlaCompiler::Options options = GenerateCompilerOptionsForPjRt(
*device_setup_.flr(), device, platform_info,
nullptr);
EXPECT_EQ(options.device_type, compilation_device_type);
EXPECT_EQ(options.device_ordinal, 0);
EXPECT_NE(options.flib_def, nullptr);
EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION);
EXPECT_FALSE(options.allow_cpu_custom_calls);
EXPECT_FALSE(options.alias_passthrough_params);
TF_ASSERT_OK_AND_ASSIGN(
auto shape, options.shape_determination_fns.shape_representation_fn(
TensorShape(), DT_FLOAT, false,
tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout));
EXPECT_EQ(shape, xla::Shape());
EXPECT_EQ(options.shape_determination_fns.layout_preference_fn(
TensorShape(), DT_FLOAT, std::nullopt),
tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout);
}
TEST_F(XlaCompilerOptionsTest, PjRtOptionsPjRtBaseDevice) {
device_setup_.AddDevicesAndSetUp({DEVICE_CPU});
Device* device = device_setup_.GetDevice(DEVICE_CPU);
DeviceType compilation_device_type = DeviceType(DEVICE_CPU_XLA_JIT);
auto pjrt_device_metadata = CreatePjRtDeviceMetadata(compilation_device_type);
XlaPlatformInfo platform_info(
compilation_device_type, nullptr,
nullptr,
pjrt_device_metadata.get(),
nullptr);
XlaCompiler::Options options = GenerateCompilerOptionsForPjRt(
*device_setup_.flr(), device, platform_info,
nullptr);
EXPECT_EQ(options.device_type, compilation_device_type);
EXPECT_EQ(options.device_ordinal, 0);
EXPECT_NE(options.flib_def, nullptr);
EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION);
EXPECT_FALSE(options.allow_cpu_custom_calls);
EXPECT_FALSE(options.alias_passthrough_params);
TF_ASSERT_OK_AND_ASSIGN(
auto shape, options.shape_determination_fns.shape_representation_fn(
TensorShape(), DT_FLOAT, false,
tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout));
EXPECT_EQ(shape, xla::Shape());
EXPECT_EQ(options.shape_determination_fns.layout_preference_fn(
TensorShape(), DT_FLOAT, std::nullopt),
tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout);
}
TEST_F(XlaCompilerOptionsTest, PjRtOptionsNonXlaDevice) {
device_setup_.AddDevicesAndSetUp({DEVICE_CPU});
Device* device = device_setup_.GetDevice(DEVICE_CPU);
DeviceType compilation_device_type = DeviceType(DEVICE_CPU_XLA_JIT);
XlaPlatformInfo platform_info(compilation_device_type,
nullptr,
nullptr,
nullptr,
nullptr);
auto pjrt_device_compiler =
CreatePjRtDeviceCompiler(compilation_device_type, nullptr);
core::ScopedUnref pjrt_device_compiler_ref(pjrt_device_compiler);
XlaCompiler::Options options = GenerateCompilerOptionsForPjRt(
*device_setup_.flr(), device, platform_info, pjrt_device_compiler);
EXPECT_EQ(options.device_type, compilation_device_type);
EXPECT_EQ(options.device_ordinal, 0);
EXPECT_NE(options.flib_def, nullptr);
EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION);
EXPECT_FALSE(options.allow_cpu_custom_calls);
EXPECT_FALSE(options.alias_passthrough_params);
TF_ASSERT_OK_AND_ASSIGN(
auto shape, options.shape_determination_fns.shape_representation_fn(
TensorShape(), DT_FLOAT, false,
tensorflow::XlaLayoutPreference::kNoPreference));
xla::ShapeProto shape_proto;
shape_proto.set_element_type(xla::PrimitiveType::F32);
shape_proto.mutable_layout();
EXPECT_EQ(shape, xla::Shape(shape_proto));
EXPECT_EQ(options.shape_determination_fns.layout_preference_fn(
TensorShape(), DT_FLOAT, std::nullopt),
tensorflow::XlaLayoutPreference::kNoPreference);
}
TEST_F(XlaCompilerOptionsTest, XlaOptions) {
device_setup_.AddDevicesAndSetUp({DEVICE_XLA_GPU});
Device* device = device_setup_.GetDevice(DEVICE_XLA_GPU);
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
DeviceType device_type = DeviceType(DEVICE_XLA_GPU);
DeviceType compilation_device_type = DeviceType(DEVICE_GPU_XLA_JIT);
auto xla_device_compiler =
CreateXlaDeviceCompiler(compilation_device_type, client);
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
se::Platform::Id platform_id = se::host::kHostPlatformId;
auto xla_device_metadata = CreateXlaDeviceMetadata(compilation_device_type);
std::shared_ptr<se::DeviceMemoryAllocator> custom_allocator;
XlaPlatformInfo platform_info(
device_type, platform_id, xla_device_metadata.get(),
nullptr, custom_allocator);
XlaCompiler::Options options =
GenerateCompilerOptions(*xla_device_compiler, *device_setup_.flr(),
device, nullptr, platform_info, false);
EXPECT_EQ(options.device_type, compilation_device_type);
EXPECT_NE(options.flib_def, nullptr);
EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION);
EXPECT_TRUE(options.allow_cpu_custom_calls);
EXPECT_NE(options.device_allocator, nullptr);
EXPECT_FALSE(options.alias_passthrough_params);
TF_ASSERT_OK_AND_ASSIGN(
auto shape, options.shape_determination_fns.shape_representation_fn(
TensorShape(), DT_FLOAT, false,
tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout));
EXPECT_EQ(shape, xla::Shape());
EXPECT_EQ(options.shape_determination_fns.layout_preference_fn(
TensorShape(), DT_FLOAT, std::nullopt),
tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout);
}
TEST_F(XlaCompilerOptionsTest, XlaOptionsHasRefVarsNoXlaDeviceMetadata) {
device_setup_.AddDevicesAndSetUp({DEVICE_CPU});
Device* device = device_setup_.GetDevice(DEVICE_CPU);
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
DeviceType device_type = DeviceType(DEVICE_CPU);
DeviceType compilation_device_type = DeviceType(DEVICE_CPU_XLA_JIT);
auto xla_device_compiler =
CreateXlaDeviceCompiler(compilation_device_type, client);
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
se::Platform::Id platform_id = se::host::kHostPlatformId;
std::shared_ptr<se::DeviceMemoryAllocator> custom_allocator;
XlaPlatformInfo platform_info(
device_type, platform_id, nullptr,
nullptr, custom_allocator);
XlaCompiler::Options options =
GenerateCompilerOptions(*xla_device_compiler, *device_setup_.flr(),
device, nullptr, platform_info, false);
EXPECT_EQ(options.device_type, compilation_device_type);
EXPECT_NE(options.flib_def, nullptr);
EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION);
EXPECT_TRUE(options.allow_cpu_custom_calls);
EXPECT_NE(options.device_allocator, nullptr);
EXPECT_TRUE(options.alias_passthrough_params);
TF_ASSERT_OK_AND_ASSIGN(
auto shape, options.shape_determination_fns.shape_representation_fn(
TensorShape(), DT_FLOAT, false,
tensorflow::XlaLayoutPreference::kNoPreference));
xla::ShapeProto shape_proto;
shape_proto.set_element_type(xla::PrimitiveType::F32);
shape_proto.mutable_layout();
EXPECT_EQ(shape, xla::Shape(shape_proto));
EXPECT_EQ(options.shape_determination_fns.layout_preference_fn(
TensorShape(), DT_FLOAT, std::nullopt),
tensorflow::XlaLayoutPreference::kNoPreference);
}
TEST_F(XlaCompilerOptionsTest, TfRtTpuOptions) {
device_setup_.AddDevicesAndSetUp({DEVICE_TPU_NODE});
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
DeviceType compilation_device_type = DeviceType(DEVICE_TPU_XLA_JIT);
auto xla_device_compiler =
CreateXlaDeviceCompiler(compilation_device_type, client);
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
XlaCompiler::Options options = GenerateCompilerOptionsForTfrtTpu(
*xla_device_compiler, *device_setup_.flr());
EXPECT_EQ(options.device_type, compilation_device_type);
EXPECT_NE(options.flib_def, nullptr);
EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION);
EXPECT_FALSE(options.allow_cpu_custom_calls);
EXPECT_FALSE(options.alias_passthrough_params);
}
TEST_F(XlaCompilerOptionsTest, GenerateCompileOptions) {
XlaCompiler::CompileOptions option1 = GenerateCompileOptions(
false, false);
EXPECT_TRUE(option1.is_entry_computation);
EXPECT_FALSE(option1.always_return_tuple);
EXPECT_FALSE(option1.alias_resource_update);
XlaCompiler::CompileOptions option2 = GenerateCompileOptions(
false, true);
EXPECT_TRUE(option2.alias_resource_update);
XlaCompiler::CompileOptions option3 = GenerateCompileOptions(
true, false);
EXPECT_FALSE(option3.alias_resource_update);
XlaCompiler::CompileOptions option4 = GenerateCompileOptions(
true, true);
EXPECT_FALSE(option4.alias_resource_update);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_compiler_options_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_compiler_options_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
42871c5d-e2a7-4409-b141-155d8cd6cb8c | cpp | tensorflow/tensorflow | batched_gather_scatter_normalizer | third_party/xla/xla/service/batched_gather_scatter_normalizer.cc | third_party/xla/xla/service/batched_gather_scatter_normalizer_test.cc | #include "xla/service/batched_gather_scatter_normalizer.h"
#include <cstdint>
#include <iterator>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
bool IsBatchGather(const HloGatherInstruction* gather) {
const auto& dims = gather->gather_dimension_numbers();
return !dims.operand_batching_dims().empty();
}
bool IsBatchScatter(const HloScatterInstruction* scatter) {
const auto& dims = scatter->scatter_dimension_numbers();
return !dims.input_batching_dims().empty();
}
PrimitiveType PromoteTypeForSize(PrimitiveType type, int64_t size) {
if (!primitive_util::IsIntegralType(type) ||
primitive_util::FitsInIntegralType(size, type)) {
return type;
}
if (primitive_util::FitsInIntegralType(size, PrimitiveType::S32)) {
return PrimitiveType::S32;
}
return PrimitiveType::S64;
}
bool GetUpdatedIndicesAreSorted(bool indices_are_sorted,
absl::Span<const int64_t> indices_batching_dims,
absl::Span<const int64_t> updated_index_map) {
return indices_are_sorted && absl::c_is_sorted(indices_batching_dims) &&
absl::c_is_sorted(updated_index_map);
}
HloInstruction* CreateConcatIndices(
HloInstruction* inst, HloInstruction* indices, int64_t index_vector_dim,
absl::Span<const int64_t> indices_batching_dims,
BatchedGatherScatterNormalizer* normalizer) {
PrimitiveType element_type = indices->shape().element_type();
for (int64_t indices_batching_dim : indices_batching_dims) {
element_type = PromoteTypeForSize(
element_type, indices->shape().dimensions(indices_batching_dim));
}
if (element_type != indices->shape().element_type()) {
Shape indices_shape = indices->shape();
indices_shape.set_element_type(element_type);
indices = inst->parent()->AddInstruction(
HloInstruction::CreateConvert(indices_shape, indices));
}
Shape iota_shape = indices->shape();
const bool index_vector_dim_on_last_dim =
index_vector_dim == iota_shape.rank();
if (index_vector_dim_on_last_dim) {
std::vector<int64_t> dimensions(iota_shape.dimensions().begin(),
iota_shape.dimensions().end());
dimensions.push_back(1);
iota_shape = ShapeUtil::MakeShape(element_type, dimensions);
indices = inst->AddInstruction(
HloInstruction::CreateReshape(iota_shape, indices));
}
iota_shape.set_dimensions(index_vector_dim, 1);
normalizer->UpdateLayout(&iota_shape);
std::vector<HloInstruction*> indices_to_concat;
indices_to_concat.reserve(indices_batching_dims.size() + 1);
for (int64_t indices_batching_dim : indices_batching_dims) {
indices_to_concat.push_back(inst->parent()->AddInstruction(
HloInstruction::CreateIota(iota_shape, indices_batching_dim)));
}
indices_to_concat.push_back(indices);
Shape concat_shape = iota_shape;
concat_shape.set_dimensions(
index_vector_dim,
indices_batching_dims.size() +
(index_vector_dim_on_last_dim
? 1
: indices->shape().dimensions(index_vector_dim)));
normalizer->UpdateLayout(&concat_shape);
return inst->AddInstruction(HloInstruction::CreateConcatenate(
concat_shape, indices_to_concat, index_vector_dim));
}
absl::StatusOr<HloInstruction*> NormalizeBatchGather(
HloGatherInstruction* gather, BatchedGatherScatterNormalizer* normalizer) {
HloInstruction* gather_operand = gather->mutable_operand(0);
HloInstruction* gather_indices = gather->mutable_operand(1);
const auto& dims = gather->gather_dimension_numbers();
CHECK_EQ(dims.operand_batching_dims_size(),
dims.start_indices_batching_dims_size());
std::vector<int64_t> start_index_map(dims.operand_batching_dims().begin(),
dims.operand_batching_dims().end());
absl::c_copy(dims.start_index_map(), std::back_inserter(start_index_map));
gather_indices =
CreateConcatIndices(gather, gather_indices, dims.index_vector_dim(),
dims.start_indices_batching_dims(), normalizer);
std::vector<int64_t> collapsed_slice_dims(dims.collapsed_slice_dims().begin(),
dims.collapsed_slice_dims().end());
absl::c_copy(dims.operand_batching_dims(),
std::back_inserter(collapsed_slice_dims));
absl::c_sort(collapsed_slice_dims);
GatherDimensionNumbers updated_dims =
HloGatherInstruction::MakeGatherDimNumbers(
dims.offset_dims(), collapsed_slice_dims, start_index_map,
dims.index_vector_dim());
return gather->AddInstruction(HloInstruction::CreateGather(
gather->shape(), gather_operand, gather_indices, updated_dims,
gather->gather_slice_sizes(),
GetUpdatedIndicesAreSorted(gather->indices_are_sorted(),
dims.start_indices_batching_dims(),
start_index_map)));
}
absl::StatusOr<HloInstruction*> NormalizeBatchScatter(
HloScatterInstruction* scatter,
BatchedGatherScatterNormalizer* normalizer) {
auto scatter_operands = scatter->scatter_operands();
HloInstruction* scatter_indices = scatter->scatter_indices();
auto scatter_updates = scatter->scatter_updates();
const auto& dims = scatter->scatter_dimension_numbers();
CHECK_EQ(dims.input_batching_dims_size(),
dims.scatter_indices_batching_dims_size());
std::vector<int64_t> scatter_dims_to_operand_dims(
dims.input_batching_dims().begin(), dims.input_batching_dims().end());
absl::c_copy(dims.scatter_dims_to_operand_dims(),
std::back_inserter(scatter_dims_to_operand_dims));
scatter_indices =
CreateConcatIndices(scatter, scatter_indices, dims.index_vector_dim(),
dims.scatter_indices_batching_dims(), normalizer);
std::vector<int64_t> inserted_window_dims(dims.inserted_window_dims().begin(),
dims.inserted_window_dims().end());
absl::c_copy(dims.input_batching_dims(),
std::back_inserter(inserted_window_dims));
absl::c_sort(inserted_window_dims);
ScatterDimensionNumbers updated_dims =
HloScatterInstruction::MakeScatterDimNumbers(
dims.update_window_dims(), inserted_window_dims,
scatter_dims_to_operand_dims, dims.index_vector_dim());
return scatter->AddInstruction(HloInstruction::CreateScatter(
scatter->shape(), scatter_operands, scatter_indices, scatter_updates,
scatter->to_apply(), updated_dims,
GetUpdatedIndicesAreSorted(scatter->indices_are_sorted(),
dims.scatter_indices_batching_dims(),
scatter_dims_to_operand_dims),
scatter->unique_indices()));
}
}
absl::StatusOr<HloInstruction*>
BatchedGatherScatterNormalizer::ExpandInstruction(HloInstruction* inst) {
if (inst->opcode() == HloOpcode::kGather) {
auto* gather = DynCast<HloGatherInstruction>(inst);
return NormalizeBatchGather(gather, this);
}
if (inst->opcode() == HloOpcode::kScatter) {
auto* scatter = DynCast<HloScatterInstruction>(inst);
return NormalizeBatchScatter(scatter, this);
}
return absl::InvalidArgumentError(absl::StrFormat(
"Instruction: %s is not a batch gather or scatter.", inst->ToString()));
}
bool BatchedGatherScatterNormalizer::InstructionMatchesPattern(
HloInstruction* inst) {
if (inst->opcode() == HloOpcode::kGather) {
auto* gather = DynCast<HloGatherInstruction>(inst);
return IsBatchGather(gather);
}
if (inst->opcode() == HloOpcode::kScatter) {
auto* scatter = DynCast<HloScatterInstruction>(inst);
return IsBatchScatter(scatter);
}
return false;
}
} | #include "xla/service/batched_gather_scatter_normalizer.h"
#include <optional>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class BatchedGatherScatterNormalizerTest : public HloTestBase {};
TEST_F(BatchedGatherScatterNormalizerTest, NormalizeBatchGather) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[50,49,48,47,46,512]{5,4,3,2,1,0}, s64[10,9,8,7,5,512]{5,4,3,2,1,0})->f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0}}
ENTRY %Gather (input_tensor: f32[50,49,48,47,46,512], start_indices: s64[10,9,8,7,5,512]) -> f32[10,9,8,7,30,29,28,27,26,512] {
%input_tensor = f32[50,49,48,47,46,512]{5,4,3,2,1,0} parameter(0)
%start_indices = s64[10,9,8,7,5,512]{5,4,3,2,1,0} parameter(1)
ROOT %gather = f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0}
gather(f32[50,49,48,47,46,512]{5,4,3,2,1,0} %input_tensor, s64[10,9,8,7,5,512]{5,4,3,2,1,0} %start_indices),
offset_dims={4,5,6,7,8}, collapsed_slice_dims={}, start_index_map={0,1,2,3,4}, operand_batching_dims={5},
start_indices_batching_dims={5}, index_vector_dim=4, slice_sizes={30,29,28,27,26,1}
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA:.*]] = s64[10,9,8,7,1,512]{{.*}} iota(), iota_dimension=5
CHECK: %[[INDICES_CONCAT:.*]] = s64[10,9,8,7,6,512]{{.*}} concatenate(%[[IOTA]], %start_indices)
CHECK: ROOT %[[GATHER:.*]] = f32[10,9,8,7,30,29,28,27,26,512]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={4,5,6,7,8},
CHECK-SAME: collapsed_slice_dims={5},
CHECK-SAME: start_index_map={5,0,1,2,3,4},
CHECK-SAME: index_vector_dim=4,
CHECK-SAME: slice_sizes={30,29,28,27,26,1}
)");
}
TEST_F(BatchedGatherScatterNormalizerTest, NormalizeBatchGather2) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[50,49,48,47,46,512,1024,100]{7,6,5,4,3,2,1,0}, s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0})->f32[10,9,8,7,30,29,28,27,26,512,1024]{10,9,8,7,6,5,4,3,2,1,0}}
ENTRY %Gather (input_tensor: f32[50,49,48,47,46,512,1024,100], start_indices: s64[10,9,8,7,6,512,1024]) -> f32[10,9,8,7,30,29,28,27,26,512,1024] {
%input_tensor = f32[50,49,48,47,46,512,1024,100]{7,6,5,4,3,2,1,0} parameter(0)
%start_indices = s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0} parameter(1)
ROOT %gather = f32[10,9,8,7,30,29,28,27,26,512,1024]{10,9,8,7,6,5,4,3,2,1,0}
gather(f32[50,49,48,47,46,512,1024,100]{7,6,5,4,3,2,1,0} %input_tensor, s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0} %start_indices),
offset_dims={4,5,6,7,8}, collapsed_slice_dims={7}, start_index_map={0,1,2,3,4,7}, operand_batching_dims={5,6},
start_indices_batching_dims={5,6}, index_vector_dim=4, slice_sizes={30,29,28,27,26,1,1,1}
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[10,9,8,7,1,512,1024]{{.*}} iota(), iota_dimension=5
CHECK: %[[IOTA2:.*]] = s64[10,9,8,7,1,512,1024]{{.*}} iota(), iota_dimension=6
CHECK: %[[INDICES_CONCAT:.*]] = s64[10,9,8,7,8,512,1024]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %start_indices)
CHECK: ROOT %[[GATHER:.*]] = f32[10,9,8,7,30,29,28,27,26,512,1024]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={4,5,6,7,8},
CHECK-SAME: collapsed_slice_dims={5,6,7},
CHECK-SAME: start_index_map={5,6,0,1,2,3,4,7},
CHECK-SAME: index_vector_dim=4,
CHECK-SAME: slice_sizes={30,29,28,27,26,1,1,1}
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
NormalizeBatchGatherIndicesBecomeUnsorted) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[2,3,4,512]{3,2,1,0}, s64[3,4,1]{2,1,0})->f32[3,4,5]{2,1,0}}
ENTRY %Gather (input_tensor: f32[2,3,4,512], start_indices: s64[3,4,1]) -> f32[3,4,5] {
%input_tensor = f32[2,3,4,512]{3,2,1,0} parameter(0)
%start_indices = s64[3,4,1]{2,1,0} parameter(1)
ROOT %gather = f32[3,4,5]{2,1,0}
gather(f32[2,3,4,512]{3,2,1,0} %input_tensor, s64[3,4,1]{2,1,0} %start_indices),
offset_dims={2}, collapsed_slice_dims={1}, start_index_map={1}, operand_batching_dims={0,2},
start_indices_batching_dims={0,1}, index_vector_dim=2, slice_sizes={1,1,1,5},
indices_are_sorted=true
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[3,4,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[IOTA2:.*]] = s64[3,4,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[INDICES_CONCAT:.*]] = s64[3,4,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %start_indices)
CHECK: ROOT %[[GATHER:.*]] = f32[3,4,5]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={2},
CHECK-SAME: collapsed_slice_dims={0,1,2},
CHECK-SAME: start_index_map={0,2,1},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: slice_sizes={1,1,1,5}
CHECK-NOT: indices_are_sorted
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
NormalizeBatchGatherIndicesBecomeUnsorted2) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[2,3,4,512]{3,2,1,0}, s64[3,2,1]{2,1,0})->f32[3,2,5]{2,1,0}}
ENTRY %Gather (input_tensor: f32[2,3,4,512], start_indices: s64[3,2,1]) -> f32[3,2,5] {
%input_tensor = f32[2,3,4,512]{3,2,1,0} parameter(0)
%start_indices = s64[3,2,1]{2,1,0} parameter(1)
ROOT %gather = f32[3,2,5]{2,1,0}
gather(f32[2,3,4,512]{3,2,1,0} %input_tensor, s64[3,2,1]{2,1,0} %start_indices),
offset_dims={2}, collapsed_slice_dims={2}, start_index_map={2}, operand_batching_dims={0,1},
start_indices_batching_dims={1,0}, index_vector_dim=2, slice_sizes={1,1,1,5},
indices_are_sorted=true
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[3,2,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[IOTA2:.*]] = s64[3,2,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[INDICES_CONCAT:.*]] = s64[3,2,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %start_indices)
CHECK: ROOT %[[GATHER:.*]] = f32[3,2,5]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={2},
CHECK-SAME: collapsed_slice_dims={0,1,2},
CHECK-SAME: start_index_map={0,1,2},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: slice_sizes={1,1,1,5}
CHECK-NOT: indices_are_sorted
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
NormalizeBatchGatherIndicesRemainSorted) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[2,3,4,512]{3,2,1,0}, s64[2,3,1]{2,1,0})->f32[2,3,5]{2,1,0}}
ENTRY %Gather (input_tensor: f32[2,3,4,512], start_indices: s64[2,3,1]) -> f32[2,3,5] {
%input_tensor = f32[2,3,4,512]{3,2,1,0} parameter(0)
%start_indices = s64[2,3,1]{2,1,0} parameter(1)
ROOT %gather = f32[2,3,5]{2,1,0}
gather(f32[2,3,4,512]{3,2,1,0} %input_tensor, s64[2,3,1]{2,1,0} %start_indices),
offset_dims={2}, collapsed_slice_dims={2}, start_index_map={2}, operand_batching_dims={0,1},
start_indices_batching_dims={0,1}, index_vector_dim=2, slice_sizes={1,1,1,5},
indices_are_sorted=true
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[2,3,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[IOTA2:.*]] = s64[2,3,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[INDICES_CONCAT:.*]] = s64[2,3,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %start_indices)
CHECK: ROOT %[[GATHER:.*]] = f32[2,3,5]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={2},
CHECK-SAME: collapsed_slice_dims={0,1,2},
CHECK-SAME: start_index_map={0,1,2},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: slice_sizes={1,1,1,5}
CHECK-SAME: indices_are_sorted=true
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
NormalizeBatchGatherIndicesRemainUnsorted) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[2,3,4,512]{3,2,1,0}, s64[2,3,1]{2,1,0})->f32[2,3,5]{2,1,0}}
ENTRY %Gather (input_tensor: f32[2,3,4,512], start_indices: s64[2,3,1]) -> f32[2,3,5] {
%input_tensor = f32[2,3,4,512]{3,2,1,0} parameter(0)
%start_indices = s64[2,3,1]{2,1,0} parameter(1)
ROOT %gather = f32[2,3,5]{2,1,0}
gather(f32[2,3,4,512]{3,2,1,0} %input_tensor, s64[2,3,1]{2,1,0} %start_indices),
offset_dims={2}, collapsed_slice_dims={2}, start_index_map={2}, operand_batching_dims={0,1},
start_indices_batching_dims={0,1}, index_vector_dim=2, slice_sizes={1,1,1,5},
indices_are_sorted=false
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[2,3,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[IOTA2:.*]] = s64[2,3,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[INDICES_CONCAT:.*]] = s64[2,3,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %start_indices)
CHECK: ROOT %[[GATHER:.*]] = f32[2,3,5]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={2},
CHECK-SAME: collapsed_slice_dims={0,1,2},
CHECK-SAME: start_index_map={0,1,2},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: slice_sizes={1,1,1,5}
CHECK-NOT: indices_are_sorted
)");
}
TEST_F(BatchedGatherScatterNormalizerTest, NormalizeBatchGatherDimSizeZero) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[50,49,48,47,46,0]{5,4,3,2,1,0}, s64[10,9,8,7,5,0]{5,4,3,2,1,0})->f32[10,9,8,7,30,29,28,27,26,0]{9,8,7,6,5,4,3,2,1,0}}
ENTRY %Gather (input_tensor: f32[50,49,48,47,46,0], start_indices: s64[10,9,8,7,5,0]) -> f32[10,9,8,7,30,29,28,27,26,0] {
%input_tensor = f32[50,49,48,47,46,0]{5,4,3,2,1,0} parameter(0)
%start_indices = s64[10,9,8,7,5,0]{5,4,3,2,1,0} parameter(1)
ROOT %gather = f32[10,9,8,7,30,29,28,27,26,0]{9,8,7,6,5,4,3,2,1,0}
gather(f32[50,49,48,47,46,0]{5,4,3,2,1,0} %input_tensor, s64[10,9,8,7,5,0]{5,4,3,2,1,0} %start_indices),
offset_dims={4,5,6,7,8}, collapsed_slice_dims={}, start_index_map={0,1,2,3,4}, operand_batching_dims={5},
start_indices_batching_dims={5}, index_vector_dim=4, slice_sizes={30,29,28,27,26,0}
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA:.*]] = s64[10,9,8,7,1,0]{{.*}} iota(), iota_dimension=5
CHECK: %[[INDICES_CONCAT:.*]] = s64[10,9,8,7,6,0]{{.*}} concatenate(%[[IOTA]], %start_indices)
CHECK: ROOT %[[GATHER:.*]] = f32[10,9,8,7,30,29,28,27,26,0]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={4,5,6,7,8},
CHECK-SAME: collapsed_slice_dims={5},
CHECK-SAME: start_index_map={5,0,1,2,3,4},
CHECK-SAME: index_vector_dim=4,
CHECK-SAME: slice_sizes={30,29,28,27,26,0}
)");
}
TEST_F(BatchedGatherScatterNormalizerTest, NormalizeBatchScatter) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyScatter, entry_computation_layout={(f32[50,49,48,47,46,512]{5,4,3,2,1,0}, s64[10,9,8,7,5,512]{5,4,3,2,1,0}, f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0})->f32[50,49,48,47,46,512]{5,4,3,2,1,0}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %Scatter (input_tensor: f32[50,49,48,47,46,512], scatter_indices: s64[10,9,8,7,5,512], updates: f32[10,9,8,7,30,29,28,27,26,512]) -> f32[50,49,48,47,46,512] {
%input_tensor = f32[50,49,48,47,46,512]{5,4,3,2,1,0} parameter(0)
%scatter_indices = s64[10,9,8,7,5,512]{5,4,3,2,1,0} parameter(1)
%updates = f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0} parameter(2)
ROOT %scatter = f32[50,49,48,47,46,512]{5,4,3,2,1,0} scatter(
f32[50,49,48,47,46,512]{5,4,3,2,1,0} %input_tensor,
s64[10,9,8,7,5,512]{5,4,3,2,1,0} %scatter_indices,
f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0} %updates),
update_window_dims={4,5,6,7,8}, inserted_window_dims={},
scatter_dims_to_operand_dims={0,1,2,3,4}, input_batching_dims={5},
scatter_indices_batching_dims={5}, index_vector_dim=4, to_apply=%add_F32.v3
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA:.*]] = s64[10,9,8,7,1,512]{{.*}} iota(), iota_dimension=5
CHECK: %[[INDICES_CONCAT:.*]] = s64[10,9,8,7,6,512]{{.*}} concatenate(%[[IOTA]], %scatter_indices)
CHECK: ROOT %[[SCATTER:.*]] = f32[50,49,48,47,46,512]{{.*}} scatter(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]], %updates),
CHECK-SAME: update_window_dims={4,5,6,7,8},
CHECK-SAME: inserted_window_dims={5},
CHECK-SAME: scatter_dims_to_operand_dims={5,0,1,2,3,4},
CHECK-SAME: index_vector_dim=4,
CHECK-SAME: to_apply=%add_F32.v3
)");
}
TEST_F(BatchedGatherScatterNormalizerTest, NormalizeBatchScatter2) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyScatter, entry_computation_layout={(f32[50,49,48,47,46,512,1024,100]{7,6,5,4,3,2,1,0}, s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0}, f32[10,9,8,7,30,29,28,27,26,512,1024]{10,9,8,7,6,5,4,3,2,1,0})->f32[50,49,48,47,46,512,1024,100]{7,6,5,4,3,2,1,0}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %Scatter (input_tensor: f32[50,49,48,47,46,512,1024,100], scatter_indices: s64[10,9,8,7,6,512,1024], updates: f32[10,9,8,7,30,29,28,27,26,512,1024]) -> f32[50,49,48,47,46,512,1024,100] {
%input_tensor = f32[50,49,48,47,46,512,1024,100]{7,6,5,4,3,2,1,0} parameter(0)
%scatter_indices = s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0} parameter(1)
%updates = f32[10,9,8,7,30,29,28,27,26,512,1024]{10,9,8,7,6,5,4,3,2,1,0} parameter(2)
ROOT %scatter = f32[50,49,48,47,46,512,1024,100]{7,6,5,4,3,2,1,0} scatter(
f32[50,49,48,47,46,512,1024,100]{7,6,5,4,3,2,1,0} %input_tensor,
s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0} %scatter_indices,
f32[10,9,8,7,30,29,28,27,26,512,1024]{10,9,8,7,6,5,4,3,2,1,0} %updates),
update_window_dims={4,5,6,7,8}, inserted_window_dims={7},
scatter_dims_to_operand_dims={0,1,2,3,4,7}, input_batching_dims={5,6},
scatter_indices_batching_dims={5,6}, index_vector_dim=4, to_apply=%add_F32.v3
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[10,9,8,7,1,512,1024]{{.*}} iota(), iota_dimension=5
CHECK: %[[IOTA2:.*]] = s64[10,9,8,7,1,512,1024]{{.*}} iota(), iota_dimension=6
CHECK: %[[INDICES_CONCAT:.*]] = s64[10,9,8,7,8,512,1024]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %scatter_indices)
CHECK: ROOT %[[SCATTER:.*]] = f32[50,49,48,47,46,512,1024,100]{{.*}} scatter(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]], %updates),
CHECK-SAME: update_window_dims={4,5,6,7,8},
CHECK-SAME: inserted_window_dims={5,6,7},
CHECK-SAME: scatter_dims_to_operand_dims={5,6,0,1,2,3,4,7},
CHECK-SAME: index_vector_dim=4,
CHECK-SAME: to_apply=%add_F32.v3
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
NormalizeBatchScatterIndicesRemainSorted) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyScatter, entry_computation_layout={(f32[2,3,4,512]{3,2,1,0}, s64[2,3,1]{2,1,0}, f32[2,3,5]{2,1,0})->f32[2,3,4,512]{3,2,1,0}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %Scatter (input_tensor: f32[2,3,4,512], scatter_indices: s64[2,3,1], updates: f32[2,3,5]) -> f32[2,3,4,512] {
%input_tensor = f32[2,3,4,512]{3,2,1,0} parameter(0)
%scatter_indices = s64[2,3,1]{2,1,0} parameter(1)
%updates = f32[2,3,5]{2,1,0} parameter(2)
ROOT %scatter = f32[2,3,4,512]{3,2,1,0}
scatter(f32[2,3,4,512]{3,2,1,0} %input_tensor, s64[2,3,1]{2,1,0} %scatter_indices, f32[2,3,5]{2,1,0} %updates),
update_window_dims={2}, inserted_window_dims={2}, scatter_dims_to_operand_dims={2}, input_batching_dims={0,1},
scatter_indices_batching_dims={0,1}, index_vector_dim=2, indices_are_sorted=true, to_apply=%add_F32.v3
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[2,3,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[IOTA2:.*]] = s64[2,3,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[INDICES_CONCAT:.*]] = s64[2,3,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %scatter_indices)
CHECK: ROOT %[[SCATTER:.*]] = f32[2,3,4,512]{{.*}} scatter(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]], %updates),
CHECK-SAME: update_window_dims={2},
CHECK-SAME: inserted_window_dims={0,1,2},
CHECK-SAME: scatter_dims_to_operand_dims={0,1,2},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: indices_are_sorted=true
CHECK-SAME: to_apply=%add_F32.v3
)");
}
TEST_F(BatchedGatherScatterNormalizerTest, NormalizeBatchScatterDimSizeZero) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyScatter, entry_computation_layout={(f32[50,49,48,47,46,0]{5,4,3,2,1,0}, s64[10,9,8,7,5,0]{5,4,3,2,1,0}, f32[10,9,8,7,30,29,28,27,26,0]{9,8,7,6,5,4,3,2,1,0})->f32[50,49,48,47,46,0]{5,4,3,2,1,0}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %Scatter (input_tensor: f32[50,49,48,47,46,0], scatter_indices: s64[10,9,8,7,5,0], updates: f32[10,9,8,7,30,29,28,27,26,0]) -> f32[50,49,48,47,46,0] {
%input_tensor = f32[50,49,48,47,46,0]{5,4,3,2,1,0} parameter(0)
%scatter_indices = s64[10,9,8,7,5,0]{5,4,3,2,1,0} parameter(1)
%updates = f32[10,9,8,7,30,29,28,27,26,0]{9,8,7,6,5,4,3,2,1,0} parameter(2)
ROOT %scatter = f32[50,49,48,47,46,0]{5,4,3,2,1,0} scatter(
f32[50,49,48,47,46,0]{5,4,3,2,1,0} %input_tensor,
s64[10,9,8,7,5,0]{5,4,3,2,1,0} %scatter_indices,
f32[10,9,8,7,30,29,28,27,26,0]{9,8,7,6,5,4,3,2,1,0} %updates),
update_window_dims={4,5,6,7,8}, inserted_window_dims={},
scatter_dims_to_operand_dims={0,1,2,3,4}, input_batching_dims={5},
scatter_indices_batching_dims={5}, index_vector_dim=4, to_apply=%add_F32.v3
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA:.*]] = s64[10,9,8,7,1,0]{{.*}} iota(), iota_dimension=5
CHECK: %[[INDICES_CONCAT:.*]] = s64[10,9,8,7,6,0]{{.*}} concatenate(%[[IOTA]], %scatter_indices)
CHECK: ROOT %[[SCATTER:.*]] = f32[50,49,48,47,46,0]{{.*}} scatter(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]], %updates),
CHECK-SAME: update_window_dims={4,5,6,7,8},
CHECK-SAME: inserted_window_dims={5},
CHECK-SAME: scatter_dims_to_operand_dims={5,0,1,2,3,4},
CHECK-SAME: index_vector_dim=4,
CHECK-SAME: to_apply=%add_F32.v3
)");
}
TEST_F(BatchedGatherScatterNormalizerTest, IndexVectorDimOnLastDim) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[50,512,1024]{2,1,0}, s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0})->f32[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0}}
ENTRY %Gather (input_tensor: f32[50,512,1024], start_indices: s64[10,9,8,7,6,512,1024]) -> f32[10,9,8,7,6,512,1024] {
%input_tensor = f32[50,512,1024]{2,1,0} parameter(0)
%start_indices = s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0} parameter(1)
ROOT %gather = f32[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0}
gather(f32[50,512,1024]{2,1,0} %input_tensor, s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0} %start_indices),
offset_dims={}, collapsed_slice_dims={0}, start_index_map={0}, operand_batching_dims={1,2},
start_indices_batching_dims={5,6}, index_vector_dim=7, slice_sizes={1,1,1}
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[10,9,8,7,6,512,1024,1]{{.*}} iota(), iota_dimension=5
CHECK: %[[IOTA2:.*]] = s64[10,9,8,7,6,512,1024,1]{{.*}} iota(), iota_dimension=6
CHECK: %[[RESHAPE:.*]] = s64[10,9,8,7,6,512,1024,1]{{.*}} reshape(%start_indices)
CHECK: %[[INDICES_CONCAT:.*]] = s64[10,9,8,7,6,512,1024,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %[[RESHAPE]])
CHECK: ROOT %[[GATHER:.*]] = f32[10,9,8,7,6,512,1024]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={},
CHECK-SAME: collapsed_slice_dims={0,1,2},
CHECK-SAME: start_index_map={1,2,0},
CHECK-SAME: index_vector_dim=7,
CHECK-SAME: slice_sizes={1,1,1}
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
BatchingDimSizeDoesNotOverflowIndicesType) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[2,127,512]{2,1,0}, s8[2,127,1]{2,1,0})->f32[2,127,5]{2,1,0}}
ENTRY %Gather (input_tensor: f32[2,127,512], start_indices: s8[2,127,1]) -> f32[2,127,5] {
%input_tensor = f32[2,127,512]{2,1,0} parameter(0)
%start_indices = s8[2,127,1]{2,1,0} parameter(1)
ROOT %gather = f32[2,127,5]{2,1,0}
gather(f32[2,127,512]{2,1,0} %input_tensor, s8[2,127,1]{2,1,0} %start_indices),
offset_dims={2}, collapsed_slice_dims={}, start_index_map={2}, operand_batching_dims={0,1},
start_indices_batching_dims={0,1}, index_vector_dim=2, slice_sizes={1,1,5}
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s8[2,127,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[IOTA2:.*]] = s8[2,127,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[INDICES_CONCAT:.*]] = s8[2,127,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %start_indices)
CHECK: ROOT %[[GATHER:.*]] = f32[2,127,5]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={2},
CHECK-SAME: collapsed_slice_dims={0,1},
CHECK-SAME: start_index_map={0,1,2},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: slice_sizes={1,1,5}
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
BatchingDimSizeOverflowsIndicesType) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[2,128,512]{2,1,0}, s8[2,128,1]{2,1,0})->f32[2,128,5]{2,1,0}}
ENTRY %Gather (input_tensor: f32[2,128,512], start_indices: s8[2,128,1]) -> f32[2,128,5] {
%input_tensor = f32[2,128,512]{2,1,0} parameter(0)
%start_indices = s8[2,128,1]{2,1,0} parameter(1)
ROOT %gather = f32[2,128,5]{2,1,0}
gather(f32[2,128,512]{2,1,0} %input_tensor, s8[2,128,1]{2,1,0} %start_indices),
offset_dims={2}, collapsed_slice_dims={}, start_index_map={2}, operand_batching_dims={0,1},
start_indices_batching_dims={0,1}, index_vector_dim=2, slice_sizes={1,1,5}
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s32[2,128,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[IOTA2:.*]] = s32[2,128,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[CONVERT:.*]] = s32[2,128,1]{{.*}} convert(%start_indices)
CHECK: %[[INDICES_CONCAT:.*]] = s32[2,128,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %[[CONVERT]])
CHECK: ROOT %[[GATHER:.*]] = f32[2,128,5]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={2},
CHECK-SAME: collapsed_slice_dims={0,1},
CHECK-SAME: start_index_map={0,1,2},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: slice_sizes={1,1,5}
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
BatchingDimSizeOverflowsIndicesTypeAndS32) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[2147483648,2,512]{2,1,0}, s8[2147483648,2,1]{2,1,0})->f32[2147483648,2,5]{2,1,0}}
ENTRY %Gather (input_tensor: f32[2147483648,2,512], start_indices: s8[2147483648,2,1]) -> f32[2147483648,2,5] {
%input_tensor = f32[2147483648,2,512]{2,1,0} parameter(0)
%start_indices = s8[2147483648,2,1]{2,1,0} parameter(1)
ROOT %gather = f32[2147483648,2,5]{2,1,0}
gather(f32[2147483648,2,512]{2,1,0} %input_tensor, s8[2147483648,2,1]{2,1,0} %start_indices),
offset_dims={2}, collapsed_slice_dims={}, start_index_map={2}, operand_batching_dims={0,1},
start_indices_batching_dims={0,1}, index_vector_dim=2, slice_sizes={1,1,5}
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[2147483648,2,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[IOTA2:.*]] = s64[2147483648,2,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[CONVERT:.*]] = s64[2147483648,2,1]{{.*}} convert(%start_indices)
CHECK: %[[INDICES_CONCAT:.*]] = s64[2147483648,2,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %[[CONVERT]])
CHECK: ROOT %[[GATHER:.*]] = f32[2147483648,2,5]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={2},
CHECK-SAME: collapsed_slice_dims={0,1},
CHECK-SAME: start_index_map={0,1,2},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: slice_sizes={1,1,5}
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
BatchingDimSizeOverflowsAndIndexVectorDimOnLastDim) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[2,128,512]{2,1,0}, s8[2,128]{1,0})->f32[2,128,5]{2,1,0}}
ENTRY %Gather (input_tensor: f32[2,128,512], start_indices: s8[2,128]) -> f32[2,128,5] {
%input_tensor = f32[2,128,512]{2,1,0} parameter(0)
%start_indices = s8[2,128]{1,0} parameter(1)
ROOT %gather = f32[2,128,5]{2,1,0}
gather(f32[2,128,512]{2,1,0} %input_tensor, s8[2,128]{1,0} %start_indices),
offset_dims={2}, collapsed_slice_dims={}, start_index_map={2}, operand_batching_dims={0,1},
start_indices_batching_dims={0,1}, index_vector_dim=2, slice_sizes={1,1,5}
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s32[2,128,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[IOTA2:.*]] = s32[2,128,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[CONVERT:.*]] = s32[2,128]{{.*}} convert(%start_indices)
CHECK: %[[RESHAPE:.*]] = s32[2,128,1]{{.*}} reshape(%[[CONVERT]])
CHECK: %[[INDICES_CONCAT:.*]] = s32[2,128,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %[[RESHAPE]])
CHECK: ROOT %[[GATHER:.*]] = f32[2,128,5]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={2},
CHECK-SAME: collapsed_slice_dims={0,1},
CHECK-SAME: start_index_map={0,1,2},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: slice_sizes={1,1,5}
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/batched_gather_scatter_normalizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/batched_gather_scatter_normalizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6abba2e5-bea3-4193-8a00-66c00e7ee71b | cpp | tensorflow/tensorflow | subtract | tensorflow/lite/experimental/shlo/ops/subtract.cc | tensorflow/lite/experimental/shlo/ops/subtract_test.cc | #include "tensorflow/lite/experimental/shlo/ops/subtract.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Subtract : std::minus<void> {};
SubtractOp Create(SubtractOp::Attributes) { return {}; }
absl::Status Prepare(SubtractOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("subtract"), lhs,
IsIntTensor, IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("subtract"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("subtract"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(SubtractOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
Subtract subtract;
if (IsIntTensor(lhs) || IsFloatTensor(lhs)) {
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
lhs.tensor_element_type(), subtract, lhs, rhs, output);
} else if (IsQuantizedPerTensorTensor(lhs)) {
DISPATCH_QUANTIZED(detail::DequantizeOpQuantizePerTensor,
lhs.quantized_per_tensor_element_type().StorageType(),
lhs.quantized_per_tensor_element_type().ExpressedType(),
subtract, lhs, rhs, output)
}
return absl::FailedPreconditionError(
"stablehlo.subtract: Unsupported tensor type.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/subtract.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<SubtractOp> {
static std::string Get() { return "Subtract"; }
};
struct Subtract : std::minus<void> {};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(Subtract,
BinaryElementwiseOpShapePropagationTest,
SubtractOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
SubtractOp,
ConcatTypes<BaselineConstraintIntTypes, BaselineConstraintFloatTypes,
BaselineConstraintQuantizedPerTensorTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
Subtract, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes =
WithOpTypes<SubtractOp,
ConcatTypes<BoolTestType, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Subtract, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using ArithmeticTypes = ConcatTypes<ArithmeticTestTypes>;
template <class T>
struct SubtractTest : ::testing::Test {};
TYPED_TEST_SUITE(SubtractTest, ArithmeticTypes, TestParamNames);
TYPED_TEST(SubtractTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(), Subtract());
auto op = Create(SubtractOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
template <class T>
struct QuantizedSubtractTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedSubtractTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedSubtractTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(2);
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> output_data(shape.NumElements());
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor lhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = rhs_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
lhs_data, rhs_data, expected_data.begin(),
[zero_point, scale](auto lhs, auto rhs) {
const ExpressedT dequantized_lhs = Dequantize(lhs, zero_point, scale);
const ExpressedT dequantized_rhs = Dequantize(rhs, zero_point, scale);
const ExpressedT dequantized_res =
Subtract()(dequantized_lhs, dequantized_rhs);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(SubtractOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/subtract.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/subtract_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
439b36a0-6382-423f-8d8b-44e0b13c9471 | cpp | google/cel-cpp | string_functions | runtime/standard/string_functions.cc | runtime/standard/string_functions_test.cc | #include "runtime/standard/string_functions.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "base/builtins.h"
#include "base/function_adapter.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "internal/status_macros.h"
#include "runtime/function_registry.h"
namespace cel {
namespace {
absl::StatusOr<StringValue> ConcatString(ValueManager& factory,
const StringValue& value1,
const StringValue& value2) {
return factory.CreateUncheckedStringValue(
absl::StrCat(value1.ToString(), value2.ToString()));
}
absl::StatusOr<BytesValue> ConcatBytes(ValueManager& factory,
const BytesValue& value1,
const BytesValue& value2) {
return factory.CreateBytesValue(
absl::StrCat(value1.ToString(), value2.ToString()));
}
bool StringContains(ValueManager&, const StringValue& value,
const StringValue& substr) {
return absl::StrContains(value.ToString(), substr.ToString());
}
bool StringEndsWith(ValueManager&, const StringValue& value,
const StringValue& suffix) {
return absl::EndsWith(value.ToString(), suffix.ToString());
}
bool StringStartsWith(ValueManager&, const StringValue& value,
const StringValue& prefix) {
return absl::StartsWith(value.ToString(), prefix.ToString());
}
absl::Status RegisterSizeFunctions(FunctionRegistry& registry) {
auto size_func = [](ValueManager& value_factory,
const StringValue& value) -> int64_t {
return value.Size();
};
using StrSizeFnAdapter = UnaryFunctionAdapter<int64_t, const StringValue&>;
CEL_RETURN_IF_ERROR(StrSizeFnAdapter::RegisterGlobalOverload(
cel::builtin::kSize, size_func, registry));
CEL_RETURN_IF_ERROR(StrSizeFnAdapter::RegisterMemberOverload(
cel::builtin::kSize, size_func, registry));
auto bytes_size_func = [](ValueManager&, const BytesValue& value) -> int64_t {
return value.Size();
};
using BytesSizeFnAdapter = UnaryFunctionAdapter<int64_t, const BytesValue&>;
CEL_RETURN_IF_ERROR(BytesSizeFnAdapter::RegisterGlobalOverload(
cel::builtin::kSize, bytes_size_func, registry));
return BytesSizeFnAdapter::RegisterMemberOverload(cel::builtin::kSize,
bytes_size_func, registry);
}
absl::Status RegisterConcatFunctions(FunctionRegistry& registry) {
using StrCatFnAdapter =
BinaryFunctionAdapter<absl::StatusOr<StringValue>, const StringValue&,
const StringValue&>;
CEL_RETURN_IF_ERROR(StrCatFnAdapter::RegisterGlobalOverload(
cel::builtin::kAdd, &ConcatString, registry));
using BytesCatFnAdapter =
BinaryFunctionAdapter<absl::StatusOr<BytesValue>, const BytesValue&,
const BytesValue&>;
return BytesCatFnAdapter::RegisterGlobalOverload(cel::builtin::kAdd,
&ConcatBytes, registry);
}
}
absl::Status RegisterStringFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
for (bool receiver_style : {true, false}) {
auto status =
BinaryFunctionAdapter<bool, const StringValue&, const StringValue&>::
Register(cel::builtin::kStringContains, receiver_style,
StringContains, registry);
CEL_RETURN_IF_ERROR(status);
status =
BinaryFunctionAdapter<bool, const StringValue&, const StringValue&>::
Register(cel::builtin::kStringEndsWith, receiver_style,
StringEndsWith, registry);
CEL_RETURN_IF_ERROR(status);
status =
BinaryFunctionAdapter<bool, const StringValue&, const StringValue&>::
Register(cel::builtin::kStringStartsWith, receiver_style,
StringStartsWith, registry);
CEL_RETURN_IF_ERROR(status);
}
if (options.enable_string_concat) {
CEL_RETURN_IF_ERROR(RegisterConcatFunctions(registry));
}
return RegisterSizeFunctions(registry);
}
} | #include "runtime/standard/string_functions.h"
#include <vector>
#include "base/builtins.h"
#include "base/function_descriptor.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAre;
enum class CallStyle { kFree, kReceiver };
MATCHER_P3(MatchesDescriptor, name, call_style, expected_kinds, "") {
bool receiver_style;
switch (call_style) {
case CallStyle::kFree:
receiver_style = false;
break;
case CallStyle::kReceiver:
receiver_style = true;
break;
}
const FunctionDescriptor& descriptor = *arg;
const std::vector<Kind>& types = expected_kinds;
return descriptor.name() == name &&
descriptor.receiver_style() == receiver_style &&
descriptor.types() == types;
}
TEST(RegisterStringFunctions, FunctionsRegistered) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterStringFunctions(registry, options));
auto overloads = registry.ListFunctions();
EXPECT_THAT(
overloads[builtin::kAdd],
UnorderedElementsAre(
MatchesDescriptor(builtin::kAdd, CallStyle::kFree,
std::vector<Kind>{Kind::kString, Kind::kString}),
MatchesDescriptor(builtin::kAdd, CallStyle::kFree,
std::vector<Kind>{Kind::kBytes, Kind::kBytes})));
EXPECT_THAT(overloads[builtin::kSize],
UnorderedElementsAre(
MatchesDescriptor(builtin::kSize, CallStyle::kFree,
std::vector<Kind>{Kind::kString}),
MatchesDescriptor(builtin::kSize, CallStyle::kFree,
std::vector<Kind>{Kind::kBytes}),
MatchesDescriptor(builtin::kSize, CallStyle::kReceiver,
std::vector<Kind>{Kind::kString}),
MatchesDescriptor(builtin::kSize, CallStyle::kReceiver,
std::vector<Kind>{Kind::kBytes})));
EXPECT_THAT(
overloads[builtin::kStringContains],
UnorderedElementsAre(
MatchesDescriptor(builtin::kStringContains, CallStyle::kFree,
std::vector<Kind>{Kind::kString, Kind::kString}),
MatchesDescriptor(builtin::kStringContains, CallStyle::kReceiver,
std::vector<Kind>{Kind::kString, Kind::kString})));
EXPECT_THAT(
overloads[builtin::kStringStartsWith],
UnorderedElementsAre(
MatchesDescriptor(builtin::kStringStartsWith, CallStyle::kFree,
std::vector<Kind>{Kind::kString, Kind::kString}),
MatchesDescriptor(builtin::kStringStartsWith, CallStyle::kReceiver,
std::vector<Kind>{Kind::kString, Kind::kString})));
EXPECT_THAT(
overloads[builtin::kStringEndsWith],
UnorderedElementsAre(
MatchesDescriptor(builtin::kStringEndsWith, CallStyle::kFree,
std::vector<Kind>{Kind::kString, Kind::kString}),
MatchesDescriptor(builtin::kStringEndsWith, CallStyle::kReceiver,
std::vector<Kind>{Kind::kString, Kind::kString})));
}
TEST(RegisterStringFunctions, ConcatSkippedWhenDisabled) {
FunctionRegistry registry;
RuntimeOptions options;
options.enable_string_concat = false;
ASSERT_OK(RegisterStringFunctions(registry, options));
auto overloads = registry.ListFunctions();
EXPECT_THAT(overloads[builtin::kAdd], IsEmpty());
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/standard/string_functions.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/standard/string_functions_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
e497c608-fa1c-48c5-bcd4-388e26ca95a6 | cpp | abseil/abseil-cpp | str_join | absl/strings/str_join.h | absl/strings/str_join_test.cc | #ifndef ABSL_STRINGS_STR_JOIN_H_
#define ABSL_STRINGS_STR_JOIN_H_
#include <cstdio>
#include <cstring>
#include <initializer_list>
#include <iterator>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/base/macros.h"
#include "absl/strings/internal/str_join_internal.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
inline strings_internal::AlphaNumFormatterImpl AlphaNumFormatter() {
return strings_internal::AlphaNumFormatterImpl();
}
inline strings_internal::StreamFormatterImpl StreamFormatter() {
return strings_internal::StreamFormatterImpl();
}
template <typename FirstFormatter, typename SecondFormatter>
inline strings_internal::PairFormatterImpl<FirstFormatter, SecondFormatter>
PairFormatter(FirstFormatter f1, absl::string_view sep, SecondFormatter f2) {
return strings_internal::PairFormatterImpl<FirstFormatter, SecondFormatter>(
std::move(f1), sep, std::move(f2));
}
inline strings_internal::PairFormatterImpl<
strings_internal::AlphaNumFormatterImpl,
strings_internal::AlphaNumFormatterImpl>
PairFormatter(absl::string_view sep) {
return PairFormatter(AlphaNumFormatter(), sep, AlphaNumFormatter());
}
template <typename Formatter>
strings_internal::DereferenceFormatterImpl<Formatter> DereferenceFormatter(
Formatter&& f) {
return strings_internal::DereferenceFormatterImpl<Formatter>(
std::forward<Formatter>(f));
}
inline strings_internal::DereferenceFormatterImpl<
strings_internal::AlphaNumFormatterImpl>
DereferenceFormatter() {
return strings_internal::DereferenceFormatterImpl<
strings_internal::AlphaNumFormatterImpl>(AlphaNumFormatter());
}
template <typename Iterator, typename Formatter>
std::string StrJoin(Iterator start, Iterator end, absl::string_view sep,
Formatter&& fmt) {
return strings_internal::JoinAlgorithm(start, end, sep, fmt);
}
template <typename Range, typename Formatter>
std::string StrJoin(const Range& range, absl::string_view separator,
Formatter&& fmt) {
return strings_internal::JoinRange(range, separator, fmt);
}
template <typename T, typename Formatter,
typename = typename std::enable_if<
!std::is_convertible<T, absl::string_view>::value>::type>
std::string StrJoin(std::initializer_list<T> il, absl::string_view separator,
Formatter&& fmt) {
return strings_internal::JoinRange(il, separator, fmt);
}
template <typename Formatter>
inline std::string StrJoin(std::initializer_list<absl::string_view> il,
absl::string_view separator, Formatter&& fmt) {
return strings_internal::JoinRange(il, separator, fmt);
}
template <typename... T, typename Formatter>
std::string StrJoin(const std::tuple<T...>& value, absl::string_view separator,
Formatter&& fmt) {
return strings_internal::JoinAlgorithm(value, separator, fmt);
}
template <typename Iterator>
std::string StrJoin(Iterator start, Iterator end, absl::string_view separator) {
return strings_internal::JoinRange(start, end, separator);
}
template <typename Range>
std::string StrJoin(const Range& range, absl::string_view separator) {
return strings_internal::JoinRange(range, separator);
}
template <typename T, typename = typename std::enable_if<!std::is_convertible<
T, absl::string_view>::value>::type>
std::string StrJoin(std::initializer_list<T> il, absl::string_view separator) {
return strings_internal::JoinRange(il, separator);
}
inline std::string StrJoin(std::initializer_list<absl::string_view> il,
absl::string_view separator) {
return strings_internal::JoinRange(il, separator);
}
template <typename... T>
std::string StrJoin(const std::tuple<T...>& value,
absl::string_view separator) {
return strings_internal::JoinTuple(value, separator,
std::index_sequence_for<T...>{});
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/strings/str_join.h"
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <functional>
#include <initializer_list>
#include <iterator>
#include <map>
#include <memory>
#include <ostream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/macros.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
namespace {
TEST(StrJoin, APIExamples) {
{
std::vector<std::string> v = {"foo", "bar", "baz"};
EXPECT_EQ("foo-bar-baz", absl::StrJoin(v, "-"));
}
{
std::vector<absl::string_view> v = {"foo", "bar", "baz"};
EXPECT_EQ("foo-bar-baz", absl::StrJoin(v, "-"));
}
{
std::vector<const char*> v = {"foo", "bar", "baz"};
EXPECT_EQ("foo-bar-baz", absl::StrJoin(v, "-"));
}
{
std::string a = "foo", b = "bar", c = "baz";
std::vector<char*> v = {&a[0], &b[0], &c[0]};
EXPECT_EQ("foo-bar-baz", absl::StrJoin(v, "-"));
}
{
std::vector<int> v = {1, 2, 3, -4};
EXPECT_EQ("1-2-3--4", absl::StrJoin(v, "-"));
}
{
std::string s = absl::StrJoin({"a", "b", "c"}, "-");
EXPECT_EQ("a-b-c", s);
}
{
std::string s = absl::StrJoin(std::make_tuple(123, "abc", 0.456), "-");
EXPECT_EQ("123-abc-0.456", s);
}
{
std::vector<std::unique_ptr<int>> v;
v.emplace_back(new int(1));
v.emplace_back(new int(2));
v.emplace_back(new int(3));
EXPECT_EQ("1-2-3", absl::StrJoin(v, "-"));
}
{
const int a[] = {1, 2, 3, -4};
EXPECT_EQ("1-2-3--4", absl::StrJoin(a, a + ABSL_ARRAYSIZE(a), "-"));
}
{
int x = 1, y = 2, z = 3;
std::vector<int*> v = {&x, &y, &z};
EXPECT_EQ("1-2-3", absl::StrJoin(v, "-"));
}
{
int x = 1, y = 2, z = 3;
int *px = &x, *py = &y, *pz = &z;
std::vector<int**> v = {&px, &py, &pz};
EXPECT_EQ("1-2-3", absl::StrJoin(v, "-"));
}
{
std::string a("a"), b("b");
std::vector<std::string*> v = {&a, &b};
EXPECT_EQ("a-b", absl::StrJoin(v, "-"));
}
{
std::map<std::string, int> m = {{"a", 1}, {"b", 2}, {"c", 3}};
EXPECT_EQ("a=1,b=2,c=3", absl::StrJoin(m, ",", absl::PairFormatter("=")));
}
{
const std::string s = "a=b=c=d";
EXPECT_EQ("a-b-c-d", absl::StrJoin(absl::StrSplit(s, "="), "-"));
}
{
std::vector<std::string> v;
EXPECT_EQ("", absl::StrJoin(v, "-"));
}
{
std::vector<std::string> v = {"foo"};
EXPECT_EQ("foo", absl::StrJoin(v, "-"));
}
{
std::vector<std::string> v = {""};
EXPECT_EQ("", absl::StrJoin(v, "-"));
}
{
std::vector<std::string> v = {"a", ""};
EXPECT_EQ("a-", absl::StrJoin(v, "-"));
}
{
std::vector<std::string> v = {"", ""};
EXPECT_EQ("-", absl::StrJoin(v, "-"));
}
{
std::vector<bool> v = {true, false, true};
EXPECT_EQ("1-0-1", absl::StrJoin(v, "-"));
}
}
TEST(StrJoin, CustomFormatter) {
std::vector<std::string> v{"One", "Two", "Three"};
{
std::string joined =
absl::StrJoin(v, "", [](std::string* out, const std::string& in) {
absl::StrAppend(out, "(", in, ")");
});
EXPECT_EQ("(One)(Two)(Three)", joined);
}
{
class ImmovableFormatter {
public:
void operator()(std::string* out, const std::string& in) {
absl::StrAppend(out, "(", in, ")");
}
ImmovableFormatter() {}
ImmovableFormatter(const ImmovableFormatter&) = delete;
};
EXPECT_EQ("(One)(Two)(Three)", absl::StrJoin(v, "", ImmovableFormatter()));
}
{
class OverloadedFormatter {
public:
void operator()(std::string* out, const std::string& in) {
absl::StrAppend(out, "(", in, ")");
}
void operator()(std::string* out, const std::string& in) const {
absl::StrAppend(out, "[", in, "]");
}
};
EXPECT_EQ("(One)(Two)(Three)", absl::StrJoin(v, "", OverloadedFormatter()));
const OverloadedFormatter fmt = {};
EXPECT_EQ("[One][Two][Three]", absl::StrJoin(v, "", fmt));
}
}
TEST(AlphaNumFormatter, FormatterAPI) {
auto f = absl::AlphaNumFormatter();
std::string s;
f(&s, "Testing: ");
f(&s, static_cast<int>(1));
f(&s, static_cast<int16_t>(2));
f(&s, static_cast<int64_t>(3));
f(&s, static_cast<float>(4));
f(&s, static_cast<double>(5));
f(&s, static_cast<unsigned>(6));
f(&s, static_cast<size_t>(7));
f(&s, absl::string_view(" OK"));
EXPECT_EQ("Testing: 1234567 OK", s);
}
TEST(AlphaNumFormatter, VectorOfBool) {
auto f = absl::AlphaNumFormatter();
std::string s;
std::vector<bool> v = {true, false, true};
f(&s, *v.cbegin());
f(&s, *v.begin());
f(&s, v[1]);
EXPECT_EQ("110", s);
}
TEST(AlphaNumFormatter, AlphaNum) {
auto f = absl::AlphaNumFormatter();
std::string s;
f(&s, absl::AlphaNum("hello"));
EXPECT_EQ("hello", s);
}
struct StreamableType {
std::string contents;
};
inline std::ostream& operator<<(std::ostream& os, const StreamableType& t) {
os << "Streamable:" << t.contents;
return os;
}
TEST(StreamFormatter, FormatterAPI) {
auto f = absl::StreamFormatter();
std::string s;
f(&s, "Testing: ");
f(&s, static_cast<int>(1));
f(&s, static_cast<int16_t>(2));
f(&s, static_cast<int64_t>(3));
f(&s, static_cast<float>(4));
f(&s, static_cast<double>(5));
f(&s, static_cast<unsigned>(6));
f(&s, static_cast<size_t>(7));
f(&s, absl::string_view(" OK "));
StreamableType streamable = {"object"};
f(&s, streamable);
EXPECT_EQ("Testing: 1234567 OK Streamable:object", s);
}
struct TestingParenFormatter {
template <typename T>
void operator()(std::string* s, const T& t) {
absl::StrAppend(s, "(", t, ")");
}
};
TEST(PairFormatter, FormatterAPI) {
{
const auto f = absl::PairFormatter("=");
std::string s;
f(&s, std::make_pair("a", "b"));
f(&s, std::make_pair(1, 2));
EXPECT_EQ("a=b1=2", s);
}
{
auto f = absl::PairFormatter(TestingParenFormatter(), "=",
TestingParenFormatter());
std::string s;
f(&s, std::make_pair("a", "b"));
f(&s, std::make_pair(1, 2));
EXPECT_EQ("(a)=(b)(1)=(2)", s);
}
}
TEST(DereferenceFormatter, FormatterAPI) {
{
const absl::strings_internal::DereferenceFormatterImpl<
absl::strings_internal::AlphaNumFormatterImpl>
f;
int x = 1, y = 2, z = 3;
std::string s;
f(&s, &x);
f(&s, &y);
f(&s, &z);
EXPECT_EQ("123", s);
}
{
absl::strings_internal::DereferenceFormatterImpl<
absl::strings_internal::DefaultFormatter<std::string>::Type>
f;
std::string x = "x";
std::string y = "y";
std::string z = "z";
std::string s;
f(&s, &x);
f(&s, &y);
f(&s, &z);
EXPECT_EQ(s, "xyz");
}
{
auto f = absl::DereferenceFormatter(TestingParenFormatter());
int x = 1, y = 2, z = 3;
std::string s;
f(&s, &x);
f(&s, &y);
f(&s, &z);
EXPECT_EQ("(1)(2)(3)", s);
}
{
absl::strings_internal::DereferenceFormatterImpl<
absl::strings_internal::AlphaNumFormatterImpl>
f;
auto x = std::unique_ptr<int>(new int(1));
auto y = std::unique_ptr<int>(new int(2));
auto z = std::unique_ptr<int>(new int(3));
std::string s;
f(&s, x);
f(&s, y);
f(&s, z);
EXPECT_EQ("123", s);
}
}
TEST(StrJoin, PublicAPIOverloads) {
std::vector<std::string> v = {"a", "b", "c"};
EXPECT_EQ("a-b-c",
absl::StrJoin(v.begin(), v.end(), "-", absl::AlphaNumFormatter()));
EXPECT_EQ("a-b-c", absl::StrJoin(v, "-", absl::AlphaNumFormatter()));
EXPECT_EQ("a-b-c", absl::StrJoin(v.begin(), v.end(), "-"));
EXPECT_EQ("a-b-c", absl::StrJoin(v, "-"));
}
TEST(StrJoin, Array) {
const absl::string_view a[] = {"a", "b", "c"};
EXPECT_EQ("a-b-c", absl::StrJoin(a, "-"));
}
TEST(StrJoin, InitializerList) {
{ EXPECT_EQ("a-b-c", absl::StrJoin({"a", "b", "c"}, "-")); }
{
auto a = {"a", "b", "c"};
EXPECT_EQ("a-b-c", absl::StrJoin(a, "-"));
}
{
std::initializer_list<const char*> a = {"a", "b", "c"};
EXPECT_EQ("a-b-c", absl::StrJoin(a, "-"));
}
{
std::initializer_list<std::string> a = {"a", "b", "c"};
EXPECT_EQ("a-b-c", absl::StrJoin(a, "-"));
}
{
std::initializer_list<absl::string_view> a = {"a", "b", "c"};
EXPECT_EQ("a-b-c", absl::StrJoin(a, "-"));
}
{
auto a = {"a", "b", "c"};
TestingParenFormatter f;
EXPECT_EQ("(a)-(b)-(c)", absl::StrJoin(a, "-", f));
}
{
EXPECT_EQ("1-2-3", absl::StrJoin({1, 2, 3}, "-"));
}
{
auto a = {1, 2, 3};
TestingParenFormatter f;
EXPECT_EQ("(1)-(2)-(3)", absl::StrJoin(a, "-", f));
}
}
TEST(StrJoin, StringViewInitializerList) {
{
std::string b = "b";
EXPECT_EQ("a-b-c", absl::StrJoin({"a", b, "c"}, "-"));
}
{
TestingParenFormatter f;
std::string b = "b";
EXPECT_EQ("(a)-(b)-(c)", absl::StrJoin({"a", b, "c"}, "-", f));
}
class NoCopy {
public:
explicit NoCopy(absl::string_view view) : view_(view) {}
NoCopy(const NoCopy&) = delete;
operator absl::string_view() { return view_; }
private:
absl::string_view view_;
};
{
EXPECT_EQ("a-b-c",
absl::StrJoin({NoCopy("a"), NoCopy("b"), NoCopy("c")}, "-"));
}
{
TestingParenFormatter f;
EXPECT_EQ("(a)-(b)-(c)",
absl::StrJoin({NoCopy("a"), NoCopy("b"), NoCopy("c")}, "-", f));
}
}
TEST(StrJoin, Tuple) {
EXPECT_EQ("", absl::StrJoin(std::make_tuple(), "-"));
EXPECT_EQ("hello", absl::StrJoin(std::make_tuple("hello"), "-"));
int x(10);
std::string y("hello");
double z(3.14);
EXPECT_EQ("10-hello-3.14", absl::StrJoin(std::make_tuple(x, y, z), "-"));
EXPECT_EQ("10-hello-3.14",
absl::StrJoin(std::make_tuple(x, std::cref(y), z), "-"));
struct TestFormatter {
char buffer[128];
void operator()(std::string* out, int v) {
snprintf(buffer, sizeof(buffer), "%#.8x", v);
out->append(buffer);
}
void operator()(std::string* out, double v) {
snprintf(buffer, sizeof(buffer), "%#.0f", v);
out->append(buffer);
}
void operator()(std::string* out, const std::string& v) {
snprintf(buffer, sizeof(buffer), "%.4s", v.c_str());
out->append(buffer);
}
};
EXPECT_EQ("0x0000000a-hell-3.",
absl::StrJoin(std::make_tuple(x, y, z), "-", TestFormatter()));
EXPECT_EQ(
"0x0000000a-hell-3.",
absl::StrJoin(std::make_tuple(x, std::cref(y), z), "-", TestFormatter()));
EXPECT_EQ("0x0000000a-hell-3.",
absl::StrJoin(std::make_tuple(&x, &y, &z), "-",
absl::DereferenceFormatter(TestFormatter())));
EXPECT_EQ("0x0000000a-hell-3.",
absl::StrJoin(std::make_tuple(absl::make_unique<int>(x),
absl::make_unique<std::string>(y),
absl::make_unique<double>(z)),
"-", absl::DereferenceFormatter(TestFormatter())));
EXPECT_EQ("0x0000000a-hell-3.",
absl::StrJoin(std::make_tuple(absl::make_unique<int>(x), &y, &z),
"-", absl::DereferenceFormatter(TestFormatter())));
}
class TestValue {
public:
TestValue(const char* data, size_t size) : data_(data), size_(size) {}
const char* data() const { return data_; }
size_t size() const { return size_; }
private:
const char* data_;
size_t size_;
};
template <typename ValueT>
class TestIterator {
public:
using iterator_category = std::forward_iterator_tag;
using value_type = ValueT;
using pointer = void;
using reference = const value_type&;
using difference_type = int;
static TestIterator begin(const std::vector<absl::string_view>& data) {
return TestIterator(&data, 0);
}
static TestIterator end(const std::vector<absl::string_view>& data) {
return TestIterator(nullptr, data.size());
}
bool operator==(const TestIterator& other) const {
return pos_ == other.pos_;
}
bool operator!=(const TestIterator& other) const {
return pos_ != other.pos_;
}
value_type operator*() const {
return ValueT((*data_)[pos_].data(), (*data_)[pos_].size());
}
TestIterator& operator++() {
++pos_;
return *this;
}
TestIterator operator++(int) {
TestIterator result = *this;
++(*this);
return result;
}
TestIterator& operator--() {
--pos_;
return *this;
}
TestIterator operator--(int) {
TestIterator result = *this;
--(*this);
return result;
}
private:
TestIterator(const std::vector<absl::string_view>* data, size_t pos)
: data_(data), pos_(pos) {}
const std::vector<absl::string_view>* data_;
size_t pos_;
};
template <typename ValueT>
class TestIteratorRange {
public:
explicit TestIteratorRange(const std::vector<absl::string_view>& data)
: begin_(TestIterator<ValueT>::begin(data)),
end_(TestIterator<ValueT>::end(data)) {}
const TestIterator<ValueT>& begin() const { return begin_; }
const TestIterator<ValueT>& end() const { return end_; }
private:
TestIterator<ValueT> begin_;
TestIterator<ValueT> end_;
};
TEST(StrJoin, TestIteratorRequirementsNoFormatter) {
const std::vector<absl::string_view> a = {"a", "b", "c"};
EXPECT_EQ("a-b-c",
absl::StrJoin(TestIteratorRange<absl::string_view>(a), "-"));
}
TEST(StrJoin, TestIteratorRequirementsCustomFormatter) {
const std::vector<absl::string_view> a = {"a", "b", "c"};
EXPECT_EQ("a-b-c",
absl::StrJoin(TestIteratorRange<TestValue>(a), "-",
[](std::string* out, const TestValue& value) {
absl::StrAppend(
out,
absl::string_view(value.data(), value.size()));
}));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/str_join.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/str_join_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
f5f7b9eb-13c0-4e83-beba-fbe9edad6cf3 | cpp | tensorflow/tensorflow | device_event_mgr | tensorflow/core/common_runtime/device/device_event_mgr.cc | tensorflow/core/common_runtime/device/device_event_mgr_test.cc | #include "tensorflow/core/common_runtime/device/device_event_mgr.h"
#include <functional>
#include <memory>
#include <utility>
#include "tensorflow/core/platform/stacktrace.h"
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace {
static const int kNumThreads = 2;
}
namespace device_event_mgr {
class ThreadLabel {
public:
static const char* GetValue() { return value_; }
static void SetValue(const char* v) { value_ = v; }
private:
static thread_local const char* value_;
};
thread_local const char* ThreadLabel::value_ = "";
void WarnIfInCallback(std::function<void()> f) {
const char* label = ThreadLabel::GetValue();
if (label && !strcmp(label, "device_event_mgr")) {
if (f) {
f();
} else {
LOG(WARNING) << "Executing inside EventMgr callback thread: "
<< CurrentStackTrace();
}
}
}
void InitThreadpoolLabels(thread::ThreadPool* threadpool) {
static const char* label = "device_event_mgr";
mutex mu;
int init_count = 0;
condition_variable all_initialized;
int exit_count = 0;
condition_variable ready_to_exit;
const int num_threads = threadpool->NumThreads();
for (int i = 0; i < num_threads; ++i) {
threadpool->Schedule([num_threads, &mu, &init_count, &all_initialized,
&exit_count, &ready_to_exit]() {
device_event_mgr::ThreadLabel::SetValue(label);
mutex_lock l(mu);
++init_count;
if (init_count == num_threads) {
all_initialized.notify_all();
}
while (init_count < num_threads) {
all_initialized.wait(l);
}
if (++exit_count == num_threads) {
ready_to_exit.notify_all();
}
});
}
{
mutex_lock l(mu);
while (exit_count < num_threads) {
ready_to_exit.wait(l);
}
}
}
}
EventMgr::EventMgr(se::StreamExecutor* se, const GPUOptions& gpu_options)
: exec_(se),
polling_active_delay_usecs_(gpu_options.polling_active_delay_usecs()
? gpu_options.polling_active_delay_usecs()
: 10),
threadpool_(Env::Default(), "Device_Event_Manager", kNumThreads) {
device_event_mgr::InitThreadpoolLabels(&threadpool_);
StartPollingLoop();
}
EventMgr::~EventMgr() {
StopPollingLoop();
for (auto& [stream, stream_callbacks] : callbacks_) {
for (auto& [event, callback] : stream_callbacks) {
threadpool_.Schedule(std::move(callback));
}
}
}
void EventMgr::StartPollingLoop() {
CHECK(polling_stopped_ == nullptr);
{
mutex_lock l(mu_);
stop_polling_ = false;
}
polling_stopped_ = std::make_unique<Notification>();
threadpool_.Schedule([this]() { PollLoop(); });
}
void EventMgr::StopPollingLoop() {
if (polling_stopped_) {
{
mutex_lock l(mu_);
stop_polling_ = true;
events_pending_.notify_all();
}
polling_stopped_->WaitForNotification();
polling_stopped_.reset(nullptr);
}
}
void EventMgr::PollLoop() {
ToFreeVector to_free;
while (true) {
bool events_still_pending;
{
mutex_lock l(mu_);
if (stop_polling_) {
break;
}
if (callbacks_.empty()) {
events_pending_.wait(l);
}
PollEvents(nullptr, &to_free);
events_still_pending = !callbacks_.empty();
}
FreeMemory(to_free);
to_free.clear();
if (events_still_pending) {
Env::Default()->SleepForMicroseconds(polling_active_delay_usecs_);
}
}
polling_stopped_->Notify();
}
void EventMgr::EnqueueCallback(se::Stream* stream, std::function<void()> func) {
VLOG(2) << "EnqueueCallback with one or more callbacks pending on "
<< callbacks_.size() << " streams and " << free_events_.size()
<< " unused event objects.";
if (free_events_.empty()) {
free_events_.emplace_back(exec_->CreateEvent().value());
}
std::unique_ptr<se::Event> e = std::move(free_events_.back());
free_events_.pop_back();
stream->RecordEvent(e.get()).IgnoreError();
bool was_empty = callbacks_.empty();
callbacks_[stream].push_back({std::move(e), std::move(func)});
if (was_empty) {
events_pending_.notify_all();
}
}
void EventMgr::PollEvents(se::Stream* stream,
absl::InlinedVector<InUse, 4UL>* to_free) {
VLOG(2) << "PollEvents with one or more callbacks pending on "
<< callbacks_.size() << " streams and " << free_events_.size()
<< " unused event objects.";
auto poll_events_for_stream_it =
[&](auto& stream_it) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
auto& stream_callbacks = stream_it->second;
auto it = stream_callbacks.begin();
while (it != stream_callbacks.end()) {
auto& [event, callback] = *it;
se::Event::Status s = event->PollForStatus();
bool keep_looping = true;
switch (s) {
case se::Event::Status::kUnknown:
case se::Event::Status::kError:
LOG(FATAL) << "Unexpected Event status: " << static_cast<int>(s);
break;
case se::Event::Status::kPending:
keep_looping = false;
break;
case se::Event::Status::kComplete:
free_events_.push_back(std::move(event));
to_free->push_back({nullptr, std::move(callback)});
++it;
break;
}
if (!keep_looping) {
break;
}
}
stream_callbacks.erase(stream_callbacks.begin(), it);
if (stream_callbacks.empty()) {
callbacks_.erase(stream_it++);
} else {
stream_it++;
}
};
if (stream != nullptr) {
auto stream_it = callbacks_.find(stream);
if (stream_it != callbacks_.end()) {
poll_events_for_stream_it(stream_it);
}
} else {
for (auto stream_it = callbacks_.begin(); stream_it != callbacks_.end();) {
poll_events_for_stream_it(stream_it);
}
}
}
EventMgrFactory* EventMgrFactory::Singleton() {
static EventMgrFactory* instance = new EventMgrFactory;
return instance;
}
EventMgr* EventMgrFactory::GetEventMgr(se::StreamExecutor* se,
const GPUOptions& gpu_options) {
mutex_lock l(mu_);
auto itr = event_mgr_map_.find(se);
if (itr == event_mgr_map_.end()) {
auto event_mgr = new EventMgr(se, gpu_options);
event_mgr_map_[se] = event_mgr;
return event_mgr;
} else {
return itr->second;
}
}
} | #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "tensorflow/core/common_runtime/device/device_event_mgr.h"
#include <atomic>
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/tsl/framework/device_id.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/gpu/gpu_device.h"
#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
class TEST_EventMgr : public EventMgr {
public:
TEST_EventMgr(se::StreamExecutor* se, const GPUOptions& gpu_options)
: EventMgr(se, gpu_options) {}
};
class TEST_EventMgrHelper {
public:
explicit TEST_EventMgrHelper(EventMgr* em) : em_(em) {
StopPollingLoop();
}
size_t queue_size() {
mutex_lock l(em_->mu_);
size_t n = 0;
for (const auto& [stream, events_and_callbacks] : em_->callbacks_) {
n += events_and_callbacks.size();
}
return n;
}
size_t free_size() {
mutex_lock l(em_->mu_);
return em_->free_events_.size();
}
void PollEvents() {
while (queue_size() > 0) {
EventMgr::ToFreeVector to_free;
{
mutex_lock l(em_->mu_);
em_->PollEvents(nullptr, &to_free);
}
em_->FreeMemory(to_free);
}
}
void StopPollingLoop() { return em_->StopPollingLoop(); }
void StartPollingLoop() { return em_->StartPollingLoop(); }
private:
EventMgr* em_;
};
static std::atomic_int_fast64_t live_tensor_bytes(0);
class TestTensorBuffer : public TensorBuffer {
public:
explicit TestTensorBuffer(size_t bytes)
: TensorBuffer(nullptr), bytes_(bytes) {
live_tensor_bytes += bytes_;
}
~TestTensorBuffer() override { live_tensor_bytes -= bytes_; }
size_t size() const override { return bytes_; }
TensorBuffer* root_buffer() override { return nullptr; }
void FillAllocationDescription(AllocationDescription* arg) const override {}
private:
size_t bytes_;
};
namespace {
TEST(EventMgr, Empty) {
auto stream_exec = se::GPUMachineManager()->ExecutorForDevice(0).value();
TEST_EventMgr em(stream_exec, GPUOptions());
TEST_EventMgrHelper th(&em);
EXPECT_EQ(0, th.queue_size());
EXPECT_EQ(0, th.free_size());
}
TEST(EventMgr, WarnIfInCallback) {
auto stream_exec = se::GPUMachineManager()->ExecutorForDevice(0).value();
TEST_EventMgr em(stream_exec, GPUOptions());
TEST_EventMgrHelper th(&em);
TF_ASSERT_OK_AND_ASSIGN(auto stream, stream_exec->CreateStream());
bool hit = false;
th.StartPollingLoop();
device_event_mgr::WarnIfInCallback([&hit] { hit = true; });
EXPECT_FALSE(hit);
Notification note;
em.ThenExecute(stream.get(), [&hit, ¬e]() {
device_event_mgr::WarnIfInCallback([&hit, ¬e] {
hit = true;
note.Notify();
});
});
note.WaitForNotification();
EXPECT_TRUE(hit);
}
}
class GPUDeviceTestHelper {
public:
GPUDeviceTestHelper(size_t memory_limit, int pending_cap) {
SessionOptions sops;
device_ =
DeviceFactory::NewDevice(DEVICE_GPU, sops, "/job:a/replica:0/task:0");
gpu_.reset(reinterpret_cast<BaseGPUDevice*>(device_.release()));
gpu_allocator_ = GPUProcessState::singleton()->GetGPUAllocator(
GPUOptions(), tsl::TfDeviceId(0), memory_limit, {});
host_allocator_ = GPUProcessState::singleton()->GetGpuHostAllocator(
{}, 0);
}
BaseGPUDevice* gpu() { return gpu_.get(); }
Allocator* gpu_allocator() { return gpu_allocator_; }
Allocator* host_allocator() { return host_allocator_; }
se::Stream* compute_stream() { return gpu_->stream_->compute; }
se::Stream* h2d_stream() { return gpu_->stream_->host_to_device; }
se::Stream* d2h_stream() { return gpu_->stream_->device_to_host; }
se::Stream* d2d_stream() { return gpu_->stream_->device_to_device[0]; }
EventMgr* event_mgr() { return gpu_->em_; }
int pending_cap() { return gpu_->pending_cap_; }
private:
std::unique_ptr<Device> device_;
std::unique_ptr<BaseGPUDevice> gpu_;
Allocator* gpu_allocator_;
Allocator* host_allocator_;
};
namespace {
class EMBenchmarkHelper {
GPUDeviceTestHelper* gpu_helper_;
std::vector<std::unique_ptr<OpKernel>> add_kernels_;
std::vector<OpKernelContext::Params*> add_params_;
std::vector<std::unique_ptr<OpKernelContext>> add_contexts_;
NodeDef add_node_def_;
NodeDef id_node_def_;
gtl::InlinedVector<TensorValue, 4> add_inputs_;
std::vector<AllocatorAttributes> allocator_attrs_;
gtl::InlinedVector<Tensor, 4> gpu_inputs_;
gtl::InlinedVector<Tensor, 4> gpu_outputs_;
gtl::InlinedVector<Tensor, 4> host_inputs_;
gtl::InlinedVector<Tensor, 4> host_outputs_;
public:
static constexpr int kTDim = 1024;
int num_ops() const { return add_kernels_.size(); }
size_t tensor_size() const {
return add_inputs_.empty() ? 0 : add_inputs_[0]->NumElements();
}
Tensor& host_outputs(int i) { return host_outputs_[i]; }
Tensor& host_inputs(int i) { return host_inputs_[i]; }
EMBenchmarkHelper(GPUDeviceTestHelper* h) : gpu_helper_(h) {}
void ReInit(int num_ops, int tensor_size) {
gpu_inputs_.clear();
while (gpu_inputs_.size() < 2) {
gpu_inputs_.push_back(Tensor(gpu_helper_->gpu_allocator(), DT_FLOAT,
{tensor_size}, AllocationAttributes()));
}
gpu_outputs_.clear();
while (gpu_outputs_.empty()) {
gpu_outputs_.push_back(Tensor(gpu_helper_->gpu_allocator(), DT_FLOAT,
{tensor_size}, AllocationAttributes()));
}
host_inputs_.clear();
while (host_inputs_.size() < 2) {
int instance_index = host_inputs_.size();
host_inputs_.push_back(Tensor(gpu_helper_->host_allocator(), DT_FLOAT,
{tensor_size}, AllocationAttributes()));
for (int i = 0; i < tensor_size; ++i) {
host_inputs_.back().flat<float>()(i) =
i * (1.0 + (0.5 * instance_index));
}
}
host_outputs_.clear();
while (host_outputs_.empty()) {
host_outputs_.push_back(Tensor(gpu_helper_->host_allocator(), DT_FLOAT,
{tensor_size}, AllocationAttributes()));
for (int i = 0; i < tensor_size; ++i) {
host_outputs_.back().flat<float>()(i) = -1;
}
}
add_kernels_.clear();
add_params_.clear();
while (add_kernels_.size() < num_ops) {
MakeAddOp();
}
}
std::unique_ptr<OpKernel> GetOpKernel(const NodeDef& node_def,
Status* status) {
return CreateOpKernel("GPU", gpu_helper_->gpu(),
gpu_helper_->gpu_allocator(), node_def,
TF_GRAPH_DEF_VERSION, status);
}
void MakeAddOp() {
if (add_kernels_.empty()) {
TF_ASSERT_OK(NodeDefBuilder("add_op", "Add")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Device("/job:a/replica:0/task:0/GPU:0")
.Finalize(&add_node_def_));
}
Status status;
add_kernels_.emplace_back(GetOpKernel(add_node_def_, &status));
TF_ASSERT_OK(status);
add_params_.push_back(new OpKernelContext::Params);
PrepOpKernel(add_params_.back(), add_kernels_.back().get());
}
void SetOutputAttrs(OpKernelContext::Params* params,
std::vector<AllocatorAttributes>* attrs) {
attrs->clear();
for (int index = 0; index < params->op_kernel->num_outputs(); index++) {
AllocatorAttributes attr;
const bool on_host =
(params->op_kernel->output_memory_types()[index] == HOST_MEMORY);
attr.set_on_host(on_host);
attrs->push_back(attr);
}
params->output_attr_array = attrs->data();
params->forward_from_array = {};
}
void PrepOpKernel(OpKernelContext::Params* params, OpKernel* kernel) {
params->step_id = 1;
params->device = gpu_helper_->gpu();
params->log_memory = false;
params->rendezvous = nullptr;
params->collective_executor = nullptr;
params->session_state = nullptr;
params->session_handle = "session_handle";
params->tensor_store = nullptr;
params->cancellation_manager = nullptr;
params->call_frame = nullptr;
params->function_library = nullptr;
params->runner = nullptr;
params->graph_collector = nullptr;
params->step_container = nullptr;
params->slice_reader_cache = nullptr;
params->resource_manager = gpu_helper_->gpu()->resource_manager();
params->stats_collector = nullptr;
params->inc_num_deferred_ops_function = nullptr;
params->dec_num_deferred_ops_function = nullptr;
params->op_device_context = nullptr;
params->track_allocations = false;
params->op_kernel = kernel;
params->frame_iter = FrameAndIter(0, 0);
params->is_input_dead = false;
if (add_inputs_.empty()) {
add_inputs_.resize(2);
add_inputs_[0] = TensorValue(&gpu_inputs_[0]);
add_inputs_[1] = TensorValue(&gpu_inputs_[1]);
}
params->inputs = add_inputs_;
SetOutputAttrs(params, &allocator_attrs_);
}
struct TimeSet {
int iter = 0;
int64_t start = 0;
int64_t copy_done = 0;
int64_t compute_done = 0;
int64_t final_copy = 0;
int64_t all_done = 0;
};
void DisplayTimes(std::vector<TimeSet>* times) {
LOG(INFO) << "Summarize set of " << times->size() << " iters";
for (auto& ts : *times) {
ts.final_copy = ts.all_done - ts.compute_done;
ts.compute_done = ts.compute_done - ts.copy_done;
ts.copy_done = ts.copy_done - ts.start;
ts.all_done = ts.all_done - ts.start;
}
struct TSSort {
bool operator()(const TimeSet& a, const TimeSet& b) {
return a.all_done < b.all_done;
}
};
std::sort(times->begin(), times->end(), TSSort());
int64_t last_time = 0;
for (int i = 0; i < times->size(); ++i) {
if (i == (times->size() - 1) ||
(times->at(i).all_done >= (1.05 * last_time))) {
LOG(INFO) << "rank " << i << " iter: " << times->at(i).iter
<< " copy: " << times->at(i).copy_done
<< " compute: " << times->at(i).compute_done
<< " copy back: " << times->at(i).final_copy
<< " sum: " << times->at(i).all_done;
last_time = times->at(i).all_done;
}
}
}
void DoAddChain(int adds_per_copy, int rounds, bool event_after_add,
std::function<void()> callback, std::vector<TimeSet>* times) {
Tensor alias0(gpu_inputs_[0]);
Tensor alias1(gpu_inputs_[1]);
for (int r = 0; r < rounds; ++r) {
if (times) {
times->at(r).iter = r;
times->at(r).start = Env::Default()->NowMicros();
}
TF_ASSERT_OK(
gpu_helper_->h2d_stream()->WaitFor(gpu_helper_->compute_stream()));
const int64_t src_bytes = host_inputs_[0].TotalBytes();
se::DeviceMemoryBase gpu_dst_ptr0(DMAHelper::base(&gpu_inputs_[0]),
src_bytes);
TF_ASSERT_OK(gpu_helper_->h2d_stream()->Memcpy(
&gpu_dst_ptr0, DMAHelper::base(&host_inputs_[0]), src_bytes));
se::DeviceMemoryBase gpu_dst_ptr1(DMAHelper::base(&gpu_inputs_[1]),
src_bytes);
TF_ASSERT_OK(gpu_helper_->h2d_stream()->Memcpy(
&gpu_dst_ptr1, DMAHelper::base(&host_inputs_[1]), src_bytes));
TF_ASSERT_OK(
gpu_helper_->compute_stream()->WaitFor(gpu_helper_->h2d_stream()));
if (times) {
gpu_helper_->event_mgr()->ThenExecute(
gpu_helper_->compute_stream(), [times, r]() {
times->at(r).copy_done = Env::Default()->NowMicros();
});
}
std::unique_ptr<OpKernelContext> ctx;
for (int apc = 0; apc < adds_per_copy; ++apc) {
ctx.reset(new OpKernelContext(add_params_[apc], 1));
gpu_helper_->gpu()->Compute(add_kernels_[apc].get(), ctx.get());
TF_ASSERT_OK(ctx->status());
if (event_after_add) {
gpu_helper_->event_mgr()->ThenExecute(gpu_helper_->compute_stream(),
callback);
}
}
if (times) {
gpu_helper_->event_mgr()->ThenExecute(
gpu_helper_->compute_stream(), [times, r]() {
times->at(r).compute_done = Env::Default()->NowMicros();
});
}
TF_ASSERT_OK(
gpu_helper_->d2h_stream()->WaitFor(gpu_helper_->compute_stream()));
const int64_t return_bytes = ctx->mutable_output(0)->TotalBytes();
se::DeviceMemoryBase gpu_src_ptr(DMAHelper::base(ctx->mutable_output(0)),
return_bytes);
TF_ASSERT_OK(gpu_helper_->d2h_stream()->Memcpy(
DMAHelper::base(&host_outputs_[0]), gpu_src_ptr, return_bytes));
gpu_helper_->event_mgr()->ThenExecute(gpu_helper_->d2h_stream(),
callback);
if (times) {
gpu_helper_->event_mgr()->ThenExecute(
gpu_helper_->d2h_stream(), [times, r]() {
times->at(r).all_done = Env::Default()->NowMicros();
});
}
}
}
};
static void BM_no_ops(::testing::benchmark::State& state) {
const int threads = state.range(0);
const int iters = state.max_iterations;
auto stream_exec = se::GPUMachineManager()->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, stream_exec->CreateStream());
TEST_EventMgr em(stream_exec, GPUOptions());
auto benchmark_exec = [&]() {
std::atomic<int> counter;
counter.store(0, std::memory_order_seq_cst);
se::Stream* stream_ptr = stream.get();
auto runner = [&em, &counter, stream_ptr, iters]() {
auto callback = [&counter]() { counter.fetch_add(1); };
for (int i = 0; i < iters; ++i) {
em.ThenExecute(stream_ptr, callback);
}
};
for (int t = 0; t < threads; ++t) {
Env::Default()->SchedClosure(runner);
}
int expected = iters * threads;
while (counter < expected) {
Env::Default()->SleepForMicroseconds(1);
}
};
#ifdef PLATFORM_GOOGLE
while (state.KeepRunningBatch(state.max_iterations)) {
benchmark_exec();
}
#else
state.ResumeTiming();
benchmark_exec();
state.PauseTiming();
#endif
}
BENCHMARK(BM_no_ops)->UseRealTime()->Arg(4)->Arg(8)->Arg(32);
GPUDeviceTestHelper* gpu_helper = nullptr;
EMBenchmarkHelper* bm_helper = nullptr;
mutex helper_mu;
#ifdef PLATFORM_GOOGLE
static void BM_chain_ops(::testing::benchmark::State& state, int tensor_size,
int adds_per_round, bool event_after_add,
int pending_cap) {
#else
static void BM_chain_ops(::testing::benchmark::State& state, int tensor_size,
int adds_per_round, bool event_after_add,
int pending_cap, int threads) {
#endif
const int iters = state.max_iterations;
{
mutex_lock l(helper_mu);
if (gpu_helper && gpu_helper->pending_cap() != pending_cap) {
delete bm_helper;
bm_helper = nullptr;
delete gpu_helper;
gpu_helper = nullptr;
}
if (!gpu_helper) {
gpu_helper = new GPUDeviceTestHelper(1 << 24, pending_cap);
bm_helper = new EMBenchmarkHelper(gpu_helper);
}
if (bm_helper->num_ops() != adds_per_round ||
bm_helper->tensor_size() != tensor_size) {
bm_helper->ReInit(adds_per_round, tensor_size);
}
}
std::vector<EMBenchmarkHelper::TimeSet> times;
std::vector<EMBenchmarkHelper::TimeSet>* time_ptr = nullptr;
if (VLOG_IS_ON(1)) {
times.resize(iters);
time_ptr = ×
}
std::atomic<int> counter;
counter.store(0, std::memory_order_seq_cst);
auto callback = [&counter]() { counter.fetch_add(1); };
int expected = 1 + (event_after_add ? adds_per_round : 0);
bm_helper->DoAddChain(adds_per_round, 1, event_after_add, callback, nullptr);
while (counter < expected) {
Env::Default()->SleepForMicroseconds(1);
}
counter = 0;
#ifdef PLATFORM_GOOGLE
while (state.KeepRunningBatch(state.max_iterations)) {
expected = iters * (1 + (event_after_add ? adds_per_round : 0));
bm_helper->DoAddChain(adds_per_round, iters, event_after_add, callback,
time_ptr);
while (counter < expected) {
Env::Default()->SleepForMicroseconds(1);
}
}
#else
state.ResumeTiming();
expected = threads * iters * (1 + (event_after_add ? adds_per_round : 0));
for (int i = 0; i < threads; ++i) {
Env::Default()->SchedClosure(
[callback, iters, adds_per_round, event_after_add, time_ptr]() {
bm_helper->DoAddChain(adds_per_round, iters, event_after_add,
callback, time_ptr);
});
}
while (counter < expected) {
Env::Default()->SleepForMicroseconds(1);
}
state.PauseTiming();
#endif
VLOG(1) << "counter = " << counter << " post_execute Output: "
<< bm_helper->host_outputs(0).SummarizeValue(64);
if (time_ptr) bm_helper->DisplayTimes(time_ptr);
}
#ifdef PLATFORM_GOOGLE
static void BM_chain_1024_1_false(::testing::benchmark::State& state) {
BM_chain_ops(state, 1024, 1, false, 0);
}
static void BM_chain_1024_1_true(::testing::benchmark::State& state) {
BM_chain_ops(state, 1024, 1, true, 0);
}
static void BM_chain_1024_10_false(::testing::benchmark::State& state) {
BM_chain_ops(state, 1024, 10, false, 0);
}
static void BM_chain_1024_10_true(::testing::benchmark::State& state) {
BM_chain_ops(state, 1024, 10, true, 0);
}
static void BM_chain_1024_100_false(::testing::benchmark::State& state) {
BM_chain_ops(state, 1024, 100, false, 0);
}
static void BM_chain_1024_100_true(::testing::benchmark::State& state) {
BM_chain_ops(state, 1024, 100, true, 0);
}
static void BM_chain_1M_1_false(::testing::benchmark::State& state) {
BM_chain_ops(state, 1 << 20, 1, false, 0);
}
static void BM_chain_1M_1_true(::testing::benchmark::State& state) {
BM_chain_ops(state, 1 << 20, 1, true, 0);
}
static void BM_chain_1M_10_false(::testing::benchmark::State& state) {
BM_chain_ops(state, 1 << 20, 10, false, 0);
}
static void BM_chain_1M_10_true(::testing::benchmark::State& state) {
BM_chain_ops(state, 1 << 20, 10, true, 0);
}
static void BM_chain_1M_100_false(::testing::benchmark::State& state) {
BM_chain_ops(state, 1 << 20, 100, false, 0);
}
static void BM_chain_1M_100_true(::testing::benchmark::State& state) {
BM_chain_ops(state, 1 << 20, 100, true, 0);
}
BENCHMARK(BM_chain_1024_1_false)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1024_1_true)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1024_1_false)->UseRealTime()->Threads(2);
BENCHMARK(BM_chain_1024_1_true)->UseRealTime()->Threads(2);
BENCHMARK(BM_chain_1024_1_false)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1024_1_true)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1024_10_false)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1024_10_true)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1024_10_false)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1024_10_true)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1024_100_false)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1024_100_true)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1024_100_false)->UseRealTime()->Threads(2);
BENCHMARK(BM_chain_1024_100_true)->UseRealTime()->Threads(2);
BENCHMARK(BM_chain_1024_100_false)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1024_100_true)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1M_1_false)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1M_1_true)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1M_1_false)->UseRealTime()->Threads(2);
BENCHMARK(BM_chain_1M_1_true)->UseRealTime()->Threads(2);
BENCHMARK(BM_chain_1M_1_false)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1M_1_true)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1M_10_false)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1M_10_true)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1M_10_false)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1M_10_true)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1M_100_false)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1M_100_true)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1M_100_false)->UseRealTime()->Threads(2);
BENCHMARK(BM_chain_1M_100_true)->UseRealTime()->Threads(2);
BENCHMARK(BM_chain_1M_100_false)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1M_100_true)->UseRealTime()->Threads(8);
#else
static void BM_chain_1024_1_false(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1024, 1, false, 0, threads);
}
static void BM_chain_1024_1_true(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1024, 1, true, 0, threads);
}
static void BM_chain_1024_10_false(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1024, 10, false, 0, threads);
}
static void BM_chain_1024_10_true(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1024, 10, true, 0, threads);
}
static void BM_chain_1024_100_false(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1024, 100, false, 0, threads);
}
static void BM_chain_1024_100_true(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1024, 100, true, 0, threads);
}
static void BM_chain_1M_1_false(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1 << 20, 1, false, 0, threads);
}
static void BM_chain_1M_1_true(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1 << 20, 1, true, 0, threads);
}
static void BM_chain_1M_10_false(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1 << 20, 10, false, 0, threads);
}
static void BM_chain_1M_10_true(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1 << 20, 10, true, 0, threads);
}
static void BM_chain_1M_100_false(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1 << 20, 100, false, 0, threads);
}
static void BM_chain_1M_100_true(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1 << 20, 100, true, 0, threads);
}
BENCHMARK(BM_chain_1024_1_false)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1024_1_true)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1024_1_false)->UseRealTime()->Arg(2);
BENCHMARK(BM_chain_1024_1_true)->UseRealTime()->Arg(2);
BENCHMARK(BM_chain_1024_1_false)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1024_1_true)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1024_10_false)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1024_10_true)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1024_10_false)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1024_10_true)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1024_100_false)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1024_100_true)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1024_100_false)->UseRealTime()->Arg(2);
BENCHMARK(BM_chain_1024_100_true)->UseRealTime()->Arg(2);
BENCHMARK(BM_chain_1024_100_false)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1024_100_true)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1M_1_false)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1M_1_true)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1M_1_false)->UseRealTime()->Arg(2);
BENCHMARK(BM_chain_1M_1_true)->UseRealTime()->Arg(2);
BENCHMARK(BM_chain_1M_1_false)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1M_1_true)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1M_10_false)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1M_10_true)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1M_10_false)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1M_10_true)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1M_100_false)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1M_100_true)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1M_100_false)->UseRealTime()->Arg(2);
BENCHMARK(BM_chain_1M_100_true)->UseRealTime()->Arg(2);
BENCHMARK(BM_chain_1M_100_false)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1M_100_true)->UseRealTime()->Arg(8);
#endif
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device/device_event_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device/device_event_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e2ab1c0d-c7b7-446e-9d85-8322e4227a2e | cpp | tensorflow/tensorflow | convert_tensor | tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.cc | tensorflow/compiler/mlir/tensorflow/utils/convert_tensor_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include <cstdint>
#include <limits>
#include <optional>
#include <string>
#include <vector>
#include "absl/base/casts.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/DebugStringHelper.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_attributes.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/mangling_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/bfloat16.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/tstring.h"
#include "tsl/platform/ml_dtypes.h"
namespace tensorflow {
using llvm::ArrayRef;
using llvm::SmallVector;
using mlir::Builder;
using mlir::DenseStringElementsAttr;
using mlir::ElementsAttr;
using mlir::RankedTensorType;
using mlir::ShapedType;
using mlir::Type;
using tensorflow::errors::InvalidArgument;
static TensorProto ConvertToProto(const Tensor& input_tensor,
bool use_tensor_content = true) {
TensorProto tensor_proto;
if (use_tensor_content)
input_tensor.AsProtoTensorContent(&tensor_proto);
else
input_tensor.AsProtoField(&tensor_proto);
return tensor_proto;
}
static std::string MangleTensor(const Tensor& tensor) {
return mangling_util::MangleTensor(ConvertToProto(tensor));
}
template <typename T>
absl::StatusOr<ElementsAttr> ConvertFlatTensor(const Tensor& input_tensor,
ShapedType type) {
auto arr = input_tensor.flat<T>();
return ElementsAttr(mlir::DenseElementsAttr::get(
type, llvm::ArrayRef(arr.data(), arr.size())));
}
ElementsAttr ConvertTensorOfCustomFloatType(const Tensor& tensor,
RankedTensorType type) {
auto buffer =
llvm::ArrayRef(static_cast<char*>(tensor.data()), tensor.TotalBytes());
return mlir::DenseElementsAttr::getFromRawBuffer(type, buffer);
}
absl::StatusOr<ElementsAttr> ConvertStringTensor(const Tensor& input_tensor,
ShapedType type) {
auto arr = input_tensor.flat<tstring>();
std::vector<mlir::StringRef> string_refs;
string_refs.reserve(arr.size());
for (int i = 0; i < arr.size(); i++) {
const auto& val = arr(i);
string_refs.push_back({val.data(), val.size()});
}
return ElementsAttr(DenseStringElementsAttr::get(type, string_refs));
}
absl::StatusOr<ElementsAttr> ConvertTensor(const Tensor& input_tensor,
Builder* builder) {
const auto& input_dtype = input_tensor.dtype();
const auto& input_shape = input_tensor.shape();
Type elt_type;
TF_RETURN_IF_ERROR(ConvertDataType(input_dtype, *builder, &elt_type));
SmallVector<int64_t, 4> shape;
ConvertToMlirShape(input_shape, &shape);
auto type = RankedTensorType::get(shape, elt_type);
#define CONVERT_FLAT(DTYPE, CTYPE) \
case DTYPE: \
return ConvertFlatTensor<CTYPE>(input_tensor, type);
switch (input_dtype) {
CONVERT_FLAT(DT_BOOL, bool)
CONVERT_FLAT(DT_FLOAT, float)
CONVERT_FLAT(DT_DOUBLE, double)
CONVERT_FLAT(DT_INT8, int8)
CONVERT_FLAT(DT_INT16, int16)
CONVERT_FLAT(DT_INT32, int32)
CONVERT_FLAT(DT_INT64, int64_t)
CONVERT_FLAT(DT_UINT8, uint8)
CONVERT_FLAT(DT_UINT16, uint16)
CONVERT_FLAT(DT_UINT32, uint32)
CONVERT_FLAT(DT_UINT64, uint64)
CONVERT_FLAT(DT_COMPLEX64, std::complex<float>)
CONVERT_FLAT(DT_COMPLEX128, std::complex<double>)
case DT_BFLOAT16:
case DT_HALF:
case DT_FLOAT8_E5M2:
case DT_FLOAT8_E4M3FN:
return ConvertTensorOfCustomFloatType(input_tensor, type);
case DT_STRING:
return ConvertStringTensor(input_tensor, type);
default:
return ElementsAttr(
mlir::TF::TensorProtoAttr::get(type, MangleTensor(input_tensor)));
}
#undef CONVERT_FLAT
}
int NumberOfMaterializedElements(const TensorProto& tensor) {
if (!tensor.tensor_content().empty()) return -1;
#define MATCH(DTYPE, FIELD) \
case DTYPE: \
return tensor.FIELD##_val().size()
switch (tensor.dtype()) {
MATCH(DT_FLOAT, float);
MATCH(DT_DOUBLE, double);
MATCH(DT_INT8, int);
MATCH(DT_UINT8, int);
MATCH(DT_INT16, int);
MATCH(DT_UINT16, int);
MATCH(DT_INT32, int);
MATCH(DT_UINT32, uint32);
MATCH(DT_INT64, int64);
MATCH(DT_UINT64, uint64);
MATCH(DT_BOOL, bool);
MATCH(DT_HALF, half);
MATCH(DT_BFLOAT16, half);
MATCH(DT_STRING, string);
case DT_COMPLEX64:
case DT_COMPLEX128:
default:
return -1;
}
}
absl::StatusOr<ElementsAttr> ConvertTensorProto(const TensorProto& input_tensor,
Builder* builder) {
TensorShape input_tensor_shape(input_tensor.tensor_shape());
if (NumberOfMaterializedElements(input_tensor) == 1 &&
input_tensor_shape.num_elements() > 1) {
TensorProto tensor_copy = input_tensor;
auto* shape = tensor_copy.mutable_tensor_shape();
shape->clear_dim();
shape->add_dim()->set_size(1);
TF_ASSIGN_OR_RETURN(ElementsAttr single_attr,
ConvertTensorProto(tensor_copy, builder));
llvm::SmallVector<int64_t> original_dimensions;
for (auto dim : input_tensor_shape) original_dimensions.push_back(dim.size);
return ElementsAttr(mlir::SplatElementsAttr::get(
single_attr.getShapedType().clone(original_dimensions),
single_attr.getValues<mlir::Attribute>()[0]));
}
Tensor t;
if (!t.FromProto(input_tensor))
return InvalidArgument("Failed to parse input_tensor.");
return ConvertTensor(t, builder);
}
void ConvertToTensorShapeProto(ArrayRef<int64_t> shape,
TensorShapeProto* output_shape) {
for (auto d : shape) {
output_shape->add_dim()->set_size(ShapedType::isDynamic(d) ? kTFDynamicSize
: d);
}
}
PartialTensorShape ConvertTypeToTensorShape(const mlir::Type& type) {
if (mlir::isa<mlir::UnrankedTensorType>(type)) {
return PartialTensorShape();
}
if (auto tensor_type = mlir::dyn_cast<mlir::RankedTensorType>(type)) {
TensorShapeProto tensor_shape_proto;
ConvertToTensorShapeProto(tensor_type.getShape(), &tensor_shape_proto);
return PartialTensorShape(tensor_shape_proto);
}
return TensorShape();
}
mlir::TF::ShapeAttr ConvertTypeToTensorShapeAttr(const mlir::Type& type) {
if (mlir::isa<mlir::UnrankedTensorType>(type)) {
return mlir::TF::ShapeAttr::get(type.getContext(), std::nullopt);
}
if (auto tensor_type = mlir::dyn_cast<mlir::RankedTensorType>(type)) {
return mlir::TF::ShapeAttr::get(type.getContext(), tensor_type.getShape());
}
return mlir::TF::ShapeAttr::get(type.getContext(), ArrayRef<int64_t>());
}
absl::StatusOr<TensorSpecProto> ConvertTypeToTensorSpecProto(
const mlir::Type& type) {
DataType dtype;
TF_RETURN_IF_ERROR(ConvertToDataType(type, &dtype));
TensorSpecProto tensor_spec;
tensor_spec.set_dtype(dtype);
*tensor_spec.mutable_shape() = ConvertTypeToTensorShape(type).AsProto();
return tensor_spec;
}
absl::StatusOr<mlir::Attribute> ConvertTensorShapeProto(
const TensorShapeProto& shape, mlir::MLIRContext* context) {
if (shape.unknown_rank())
return mlir::TF::ShapeAttr::get(context, std::nullopt);
llvm::SmallVector<int64_t, 4> dims;
dims.reserve(shape.dim().size());
for (const auto& dim : shape.dim()) {
dims.push_back(dim.size() == kTFDynamicSize ? ShapedType::kDynamic
: dim.size());
}
return mlir::TF::ShapeAttr::get(context, llvm::ArrayRef(dims));
}
void ConvertStringElementsAttr(
const DenseStringElementsAttr attr,
protobuf::RepeatedPtrField<std::string>* output) {
for (const auto& val : attr.getRawStringData())
output->Add({val.data(), val.size()});
}
template <typename T>
void ConvertComplexElementsAttr(const mlir::DenseElementsAttr attr,
protobuf::RepeatedField<T>* output) {
for (const auto& val : attr.getValues<std::complex<T>>()) {
output->Add(val.real());
output->Add(val.imag());
}
}
Status ConvertTensorProtoAttr(const mlir::TF::TensorProtoAttr attr,
TensorProto* output_tensor) {
auto mangled_tensor = attr.getValue();
absl::string_view tensor_view(mangled_tensor.data(), mangled_tensor.size());
return mangling_util::DemangleTensor(tensor_view, output_tensor);
}
template <typename T>
void ConvertElementsAttr(const mlir::DenseElementsAttr attr,
protobuf::RepeatedField<T>* output) {
if (attr.isSplat()) {
if (attr.getSplatValue<T>() != T(0)) output->Add(attr.getSplatValue<T>());
} else {
output->Reserve(attr.getNumElements());
for (auto value : attr.getValues<T>()) output->AddAlreadyReserved(value);
}
}
template <typename T, typename Cord>
void ConvertFloatElementsAttr(const mlir::DenseElementsAttr attr,
protobuf::RepeatedField<T>* output,
Cord* tensor_content) {
if (attr.isSplat()) {
if (attr.getSplatValue<T>() != T(0)) output->Add(attr.getSplatValue<T>());
} else {
port::CopyFromArray(tensor_content, attr.getRawData().data(),
attr.getRawData().size());
}
}
void ConvertHalfElementsAttr(const mlir::DenseElementsAttr attr,
protobuf::RepeatedField<int>* output) {
if (attr.isSplat()) {
if (attr.getSplatValue<Eigen::half>() != Eigen::half(0))
output->Add(
Eigen::numext::bit_cast<uint16_t>(attr.getSplatValue<Eigen::half>()));
} else {
output->Reserve(attr.getNumElements());
for (const Eigen::half value : attr.getValues<Eigen::half>())
output->AddAlreadyReserved(Eigen::numext::bit_cast<uint16_t>(value));
}
}
template <typename T, typename U = T, typename Cord>
void ConvertIntElementsAttr(const mlir::DenseElementsAttr attr,
protobuf::RepeatedField<T>* output,
Cord* tensor_content) {
if (attr.isSplat()) {
if (attr.getSplatValue<U>() != U(0))
output->Add(static_cast<T>(attr.getSplatValue<U>()));
} else {
port::CopyFromArray(tensor_content, attr.getRawData().data(),
attr.getRawData().size());
}
}
template <typename T, typename U = T, typename Cord>
void ConvertUIntElementsAttr(const mlir::DenseElementsAttr attr,
protobuf::RepeatedField<T>* output,
Cord* tensor_content) {
if (attr.isSplat()) {
if (attr.getSplatValue<U>() != U(0))
output->Add(static_cast<T>(attr.getSplatValue<U>()));
} else {
port::CopyFromArray(tensor_content, attr.getRawData().data(),
attr.getRawData().size());
}
}
void ConvertBfloat16ElementsAttr(const mlir::DenseElementsAttr attr,
protobuf::RepeatedField<int>* output) {
if (attr.isSplat()) {
if (attr.getSplatValue<bfloat16>() != bfloat16(0))
output->Add(
Eigen::numext::bit_cast<uint16_t>(attr.getSplatValue<bfloat16>()));
} else {
output->Reserve(attr.getNumElements());
for (const bfloat16 value : attr.getValues<bfloat16>())
output->AddAlreadyReserved(Eigen::numext::bit_cast<uint16_t>(value));
}
}
template <typename T>
void ConvertFloat8ElementsAttr(const mlir::DenseElementsAttr attr,
std::string* output) {
if (attr.isSplat()) {
if (attr.getSplatValue<T>() != T(0))
output->push_back(
Eigen::numext::bit_cast<uint8_t>(attr.getSplatValue<T>()));
} else {
output->reserve(attr.getNumElements());
for (const T value : attr.getValues<T>())
output->push_back(Eigen::numext::bit_cast<uint8_t>(value));
}
}
Status ConvertToTensorProto(const ElementsAttr attr, TensorProto* output) {
auto type = attr.getShapedType();
auto shape = type.getShape();
DataType output_dtype;
TF_RETURN_IF_ERROR(ConvertToDataType(type, &output_dtype));
output->set_dtype(output_dtype);
ConvertToTensorShapeProto(shape, output->mutable_tensor_shape());
if (auto tensor_attr = mlir::dyn_cast<mlir::TF::TensorProtoAttr>(attr))
return ConvertTensorProtoAttr(tensor_attr, output);
auto dense_attr = mlir::dyn_cast<mlir::DenseElementsAttr>(attr);
if (!dense_attr) return errors::InvalidArgument("Unsupported elements attr");
switch (output_dtype) {
case DT_BOOL:
ConvertElementsAttr(dense_attr, output->mutable_bool_val());
break;
case DT_BFLOAT16:
ConvertBfloat16ElementsAttr(dense_attr, output->mutable_half_val());
break;
case DT_COMPLEX64:
ConvertComplexElementsAttr(dense_attr, output->mutable_scomplex_val());
break;
case DT_COMPLEX128:
ConvertComplexElementsAttr(dense_attr, output->mutable_dcomplex_val());
break;
case DT_DOUBLE:
ConvertFloatElementsAttr(dense_attr, output->mutable_double_val(),
output->mutable_tensor_content());
break;
case DT_HALF:
ConvertHalfElementsAttr(dense_attr, output->mutable_half_val());
break;
case DT_FLOAT:
ConvertFloatElementsAttr(dense_attr, output->mutable_float_val(),
output->mutable_tensor_content());
break;
case DT_FLOAT8_E5M2:
ConvertFloat8ElementsAttr<tsl::float8_e5m2>(dense_attr,
output->mutable_float8_val());
break;
case DT_FLOAT8_E4M3FN:
ConvertFloat8ElementsAttr<tsl::float8_e4m3fn>(
dense_attr, output->mutable_float8_val());
break;
case tensorflow::DT_INT4:
ConvertIntElementsAttr<int, tsl::int4>(dense_attr,
output->mutable_int_val(),
output->mutable_tensor_content());
break;
case tensorflow::DT_UINT4:
ConvertUIntElementsAttr<int, tsl::uint4>(
dense_attr, output->mutable_int_val(),
output->mutable_tensor_content());
break;
case DT_QUINT8:
case DT_INT8:
ConvertUIntElementsAttr<int, int8_t>(dense_attr,
output->mutable_int_val(),
output->mutable_tensor_content());
break;
case DT_QUINT16:
case DT_INT16:
ConvertIntElementsAttr<int, int16_t>(dense_attr,
output->mutable_int_val(),
output->mutable_tensor_content());
break;
case DT_INT32:
ConvertIntElementsAttr(dense_attr, output->mutable_int_val(),
output->mutable_tensor_content());
break;
case DT_INT64:
ConvertIntElementsAttr(dense_attr, output->mutable_int64_val(),
output->mutable_tensor_content());
break;
case DT_STRING:
ConvertStringElementsAttr(mlir::cast<DenseStringElementsAttr>(dense_attr),
output->mutable_string_val());
break;
case DT_UINT8:
ConvertUIntElementsAttr<int, uint8_t>(dense_attr,
output->mutable_int_val(),
output->mutable_tensor_content());
break;
case DT_UINT16:
ConvertUIntElementsAttr<int, uint16_t>(dense_attr,
output->mutable_int_val(),
output->mutable_tensor_content());
break;
case DT_UINT32:
ConvertUIntElementsAttr(dense_attr, output->mutable_uint32_val(),
output->mutable_tensor_content());
break;
case DT_UINT64:
ConvertUIntElementsAttr(dense_attr, output->mutable_uint64_val(),
output->mutable_tensor_content());
break;
default:
return errors::Unimplemented(absl::StrCat("Unimplemented data type ",
DataTypeString(output_dtype)));
}
return absl::OkStatus();
}
Status ConvertToTensor(const mlir::ElementsAttr attr, Tensor* output_tensor) {
TensorProto tensor_proto;
TF_RETURN_IF_ERROR(ConvertToTensorProto(attr, &tensor_proto));
if (!output_tensor->FromProto(tensor_proto)) {
return InvalidArgument("Couldn't convert tensor proto to tensor.");
}
return absl::OkStatus();
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include <cstring>
#include <initializer_list>
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
#include "xla/test.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/ml_dtypes.h"
namespace tensorflow {
namespace {
using ::testing::Eq;
using ::testing::IsFalse;
using ::testing::IsTrue;
static void RegisterDialects(mlir::MLIRContext &context) {
context.loadDialect<mlir::TF::TensorFlowDialect>();
}
TEST(ConvertTypeToTensorTypeTest, UnrankedTensorType) {
mlir::MLIRContext context;
RegisterDialects(context);
mlir::Builder b(&context);
PartialTensorShape output_shape =
ConvertTypeToTensorShape(mlir::UnrankedTensorType::get(b.getF32Type()));
EXPECT_TRUE(output_shape.IsIdenticalTo(PartialTensorShape()));
}
TEST(ConvertTypeToTensorTypeTest, NonFullyDefinedRankedTensorType) {
mlir::MLIRContext context;
RegisterDialects(context);
mlir::Builder b(&context);
PartialTensorShape output_shape = ConvertTypeToTensorShape(
GetTypeFromTFTensorShape({-1, 2, 3}, b.getF32Type()));
EXPECT_TRUE(output_shape.IsIdenticalTo(PartialTensorShape({-1, 2, 3})));
}
TEST(ConvertTypeToTensorTypeTest, FullyDefinedRankedTensorType) {
mlir::MLIRContext context;
RegisterDialects(context);
mlir::Builder b(&context);
PartialTensorShape output_shape = ConvertTypeToTensorShape(
mlir::RankedTensorType::get({1, 2, 3}, b.getF32Type()));
EXPECT_TRUE(output_shape.IsIdenticalTo(PartialTensorShape({1, 2, 3})));
}
TEST(ConvertTypeToTensorTypeTest, ScalarTensorType) {
mlir::MLIRContext context;
mlir::Builder b(&context);
PartialTensorShape output_shape = ConvertTypeToTensorShape(b.getF32Type());
EXPECT_TRUE(output_shape.IsIdenticalTo(TensorShape()));
}
TEST(ConvertTypeToTensorTypeTest, ConvertStringTensor) {
mlir::MLIRContext context;
RegisterDialects(context);
mlir::Builder b(&context);
Tensor tensor(DT_STRING, TensorShape({1, 2, 2, 1}));
EXPECT_EQ(4, tensor.NumElements());
auto Tt = tensor.flat<tstring>();
Tt.setValues({"one", "two", "three", "four"});
auto value_or_status = ConvertTensor(tensor, &b);
ASSERT_TRUE(value_or_status.ok());
auto attr = value_or_status.value();
EXPECT_TRUE(mlir::isa<mlir::DenseStringElementsAttr>(attr));
auto string_attr = mlir::cast<mlir::DenseStringElementsAttr>(attr);
auto string_values = string_attr.getRawStringData();
ASSERT_EQ(string_values.size(), 4);
EXPECT_EQ(string_values[0], mlir::StringRef("one"));
EXPECT_EQ(string_values[1], mlir::StringRef("two"));
EXPECT_EQ(string_values[2], mlir::StringRef("three"));
EXPECT_EQ(string_values[3], mlir::StringRef("four"));
}
class ConvertTensorTest : public ::testing::Test {
protected:
template <typename T>
void VerifyConversion(std::initializer_list<T> values, DataType dtype,
mlir::Type expected_ty) {
mlir::Builder b(expected_ty.getContext());
Tensor tensor(dtype, TensorShape({static_cast<int64_t>(values.size())}));
tensor.flat<T>().setValues(values);
auto value_or = ConvertTensor(tensor, &b);
TF_ASSERT_OK(value_or.status());
auto attr = value_or.value();
EXPECT_EQ(attr.getShapedType().getElementType(), expected_ty);
Tensor out;
TF_ASSERT_OK(ConvertToTensor(attr, &out));
test::ExpectTensorEqual<T>(tensor, out);
}
};
TEST_F(ConvertTensorTest, Simple) {
mlir::MLIRContext context;
RegisterDialects(context);
ASSERT_NO_FATAL_FAILURE(VerifyConversion<Eigen::half>(
{Eigen::half(1.0)}, DT_HALF, mlir::FloatType::getF16(&context)));
ASSERT_NO_FATAL_FAILURE(
VerifyConversion<bfloat16>({bfloat16(1.0), bfloat16(-1.0)}, DT_BFLOAT16,
mlir::FloatType::getBF16(&context)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<float>(
{1.0, -1.0}, DT_FLOAT, mlir::FloatType::getF32(&context)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<double>(
{1.0, -1.0}, DT_DOUBLE, mlir::FloatType::getF64(&context)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<tsl::float8_e5m2>(
{tsl::float8_e5m2{1.0}, tsl::float8_e5m2{-1.0}}, DT_FLOAT8_E5M2,
mlir::FloatType::getFloat8E5M2(&context)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<tsl::float8_e4m3fn>(
{tsl::float8_e4m3fn{1.0}, tsl::float8_e4m3fn{-1.0}}, DT_FLOAT8_E4M3FN,
mlir::FloatType::getFloat8E4M3FN(&context)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<int4>(
{static_cast<int4>(1), static_cast<int4>(-1)}, DT_INT4,
mlir::IntegerType::get(&context, 4,
mlir::IntegerType::SignednessSemantics::Signed)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<int8>(
{1, -1}, DT_INT8, mlir::IntegerType::get(&context, 8)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<int16>(
{1, -1}, DT_INT16, mlir::IntegerType::get(&context, 16)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<int32>(
{1, -1}, DT_INT32, mlir::IntegerType::get(&context, 32)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<int64_t>(
{1, -1}, DT_INT64, mlir::IntegerType::get(&context, 64)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<uint4>(
{static_cast<uint4>(1), static_cast<uint4>(2)}, DT_UINT4,
mlir::IntegerType::get(
&context, 4, mlir::IntegerType::SignednessSemantics::Unsigned)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<uint8>(
{1, 2}, DT_UINT8,
mlir::IntegerType::get(
&context, 8, mlir::IntegerType::SignednessSemantics::Unsigned)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<uint16>(
{1, 2}, DT_UINT16,
mlir::IntegerType::get(
&context, 16, mlir::IntegerType::SignednessSemantics::Unsigned)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<uint32>(
{1, 2}, DT_UINT32,
mlir::IntegerType::get(
&context, 32, mlir::IntegerType::SignednessSemantics::Unsigned)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<uint64>(
{1, 2}, DT_UINT64,
mlir::IntegerType::get(
&context, 64, mlir::IntegerType::SignednessSemantics::Unsigned)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<std::complex<float>>(
{{0.0, 1.0}, {1.0, 0.0}}, DT_COMPLEX64,
mlir::ComplexType::get(mlir::FloatType::getF32(&context))));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<std::complex<double>>(
{{0.0, 1.0}, {1.0, 0.0}}, DT_COMPLEX128,
mlir::ComplexType::get(mlir::FloatType::getF64(&context))));
}
bool IsSplat(mlir::ElementsAttr attr) {
return mlir::cast<mlir::DenseElementsAttr>(attr).isSplat();
}
TEST(ConvertTensorProtoTest, SplatTensor) {
TensorProto tensor;
tensor.set_dtype(DT_FLOAT);
tensor.mutable_tensor_shape()->add_dim()->set_size(1ULL << 35);
tensor.add_float_val(42.0);
mlir::MLIRContext context;
mlir::Builder builder(&context);
TF_ASSERT_OK_AND_ASSIGN(mlir::ElementsAttr attribute,
ConvertTensorProto(tensor, &builder));
EXPECT_THAT(
attribute,
AllOf(Eq(mlir::DenseElementsAttr::get(
mlir::RankedTensorType::get({1ULL << 35}, builder.getF32Type()),
42.0f)),
ResultOf(IsSplat, IsTrue())));
}
TEST(ConvertTensorProtoTest, NonSplatTensor) {
TensorProto proto = tensor::CreateTensorProto<float>(
{1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
mlir::MLIRContext context;
mlir::Builder builder(&context);
TF_ASSERT_OK_AND_ASSIGN(mlir::ElementsAttr attribute,
ConvertTensorProto(proto, &builder));
EXPECT_THAT(
attribute,
AllOf(Eq(mlir::DenseElementsAttr::get(
mlir::RankedTensorType::get({2, 2}, builder.getF32Type()),
{1.0f, 2.0f, 3.0f, 4.0f})),
ResultOf(IsSplat, IsFalse())));
}
TEST(ConvertTypeToTensorSpecProtoTest, UnrankedTensorType) {
mlir::MLIRContext context;
mlir::Builder b(&context);
auto output_proto = ConvertTypeToTensorSpecProto(
mlir::UnrankedTensorType::get(b.getF32Type()));
TF_ASSERT_OK(output_proto.status());
EXPECT_EQ(output_proto->dtype(), DT_FLOAT);
EXPECT_TRUE(output_proto->shape().unknown_rank());
}
TEST(ConvertTypeToTensorSpecProtoTest, RankedTensorType) {
mlir::MLIRContext context;
mlir::Builder b(&context);
auto output_proto = ConvertTypeToTensorSpecProto(
mlir::RankedTensorType::get({1, 2, 3}, b.getF32Type()));
TF_ASSERT_OK(output_proto.status());
EXPECT_EQ(output_proto->dtype(), DT_FLOAT);
EXPECT_EQ(output_proto->shape().dim_size(), 3);
EXPECT_EQ(output_proto->shape().dim().at(0).size(), 1);
EXPECT_EQ(output_proto->shape().dim().at(1).size(), 2);
EXPECT_EQ(output_proto->shape().dim().at(2).size(), 3);
}
TEST(ConvertTypeToTensorSpecProtoTest, ScalarTensorType) {
mlir::MLIRContext context;
mlir::Builder b(&context);
auto output_proto = ConvertTypeToTensorSpecProto(b.getF32Type());
TF_ASSERT_OK(output_proto.status());
EXPECT_EQ(output_proto->dtype(), DT_FLOAT);
EXPECT_FALSE(output_proto->shape().unknown_rank());
EXPECT_EQ(output_proto->shape().dim_size(), 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/convert_tensor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0a821a6c-44b2-46e5-928c-96e5c8548a77 | cpp | tensorflow/tensorflow | import | tensorflow/lite/toco/tflite/import.cc | tensorflow/lite/toco/tflite/import_test.cc | #include "tensorflow/lite/toco/tflite/import.h"
#include <memory>
#include <string>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "flatbuffers/verifier.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
#include "tensorflow/lite/core/tools/verifier.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/stderr_reporter.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/tflite/operator.h"
#include "tensorflow/lite/toco/tflite/types.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
namespace tflite {
namespace details {
void LoadTensorsTable(const ::tflite::Model& input_model,
TensorsTable* tensors_table) {
auto tensors = (*input_model.subgraphs())[0]->tensors();
if (!tensors) return;
for (const auto* tensor : *tensors) {
tensors_table->push_back(tensor->name()->c_str());
}
}
void LoadOperatorsTable(const ::tflite::Model& input_model,
OperatorsTable* operators_table) {
auto opcodes = input_model.operator_codes();
if (!opcodes) return;
for (const auto* opcode : *opcodes) {
auto builtin_code = GetBuiltinCode(opcode);
if (builtin_code != ::tflite::BuiltinOperator_CUSTOM) {
operators_table->push_back(EnumNameBuiltinOperator(builtin_code));
} else {
operators_table->push_back(opcode->custom_code()->c_str());
}
}
}
}
void ImportTensors(const ::tflite::Model& input_model, Model* model) {
auto tensors = (*input_model.subgraphs())[0]->tensors();
auto* buffers = input_model.buffers();
if (!tensors) return;
for (const auto* input_tensor : *tensors) {
Array& array = model->GetOrCreateArray(input_tensor->name()->c_str());
array.data_type = DataType::Deserialize(input_tensor->type());
int buffer_index = input_tensor->buffer();
auto* buffer = buffers->Get(buffer_index);
DataBuffer::Deserialize(*input_tensor, *buffer, &array);
auto shape = input_tensor->shape();
if (shape) {
array.mutable_shape()->mutable_dims()->clear();
for (uint32_t i = 0; i < shape->Length(); ++i) {
auto d = shape->Get(i);
array.mutable_shape()->mutable_dims()->push_back(d);
}
}
auto quantization = input_tensor->quantization();
if (quantization) {
if (quantization->min() && quantization->max()) {
CHECK_EQ(1, quantization->min()->Length());
CHECK_EQ(1, quantization->max()->Length());
MinMax& minmax = array.GetOrCreateMinMax();
minmax.min = quantization->min()->Get(0);
minmax.max = quantization->max()->Get(0);
}
if (quantization->scale() && quantization->zero_point()) {
CHECK_EQ(1, quantization->scale()->Length());
CHECK_EQ(1, quantization->zero_point()->Length());
QuantizationParams& q = array.GetOrCreateQuantizationParams();
q.scale = quantization->scale()->Get(0);
q.zero_point = quantization->zero_point()->Get(0);
}
}
}
}
void ImportOperators(
const ::tflite::Model& input_model,
const std::map<std::string, std::unique_ptr<BaseOperator>>& ops_by_name,
const details::TensorsTable& tensors_table,
const details::OperatorsTable& operators_table, Model* model) {
auto ops = (*input_model.subgraphs())[0]->operators();
if (!ops) return;
for (const auto* input_op : *ops) {
uint32_t index = input_op->opcode_index();
if (index > operators_table.size()) {
LOG(FATAL) << "Index " << index << " must be between zero and "
<< operators_table.size();
}
std::string opname = operators_table.at(index);
std::unique_ptr<Operator> new_op = nullptr;
if (ops_by_name.count(opname) == 0) {
std::string effective_opname = "TENSORFLOW_UNSUPPORTED";
if (ops_by_name.count(effective_opname) == 0) {
LOG(FATAL) << "Internal logic error: TENSORFLOW_UNSUPPORTED not found.";
}
new_op = ops_by_name.at(effective_opname)
->Deserialize(input_op->builtin_options(),
input_op->custom_options());
if (new_op->type == OperatorType::kUnsupported) {
auto* unsupported_op =
static_cast<TensorFlowUnsupportedOperator*>(new_op.get());
unsupported_op->tensorflow_op = opname;
unsupported_op->quantized = true;
} else {
LOG(FATAL) << "Expected a TensorFlowUnsupportedOperator";
}
} else {
new_op = ops_by_name.at(opname)->Deserialize(input_op->builtin_options(),
input_op->custom_options());
}
model->operators.emplace_back(new_op.release());
auto* op = model->operators.back().get();
auto inputs = input_op->inputs();
for (uint32_t i = 0; i < inputs->Length(); i++) {
auto input_index = inputs->Get(i);
if (input_index != -1) {
const std::string& input_name = tensors_table.at(input_index);
op->inputs.push_back(input_name);
} else {
const std::string& tensor_name =
toco::AvailableArrayName(*model, "OptionalTensor");
model->CreateOptionalArray(tensor_name);
op->inputs.push_back(tensor_name);
}
}
auto outputs = input_op->outputs();
for (int i = 0, end = outputs->Length(); i < end; i++) {
auto output_index = outputs->Get(i);
const std::string& output_name = tensors_table.at(output_index);
op->outputs.push_back(output_name);
}
}
}
void ImportIOTensors(const ModelFlags& model_flags,
const ::tflite::Model& input_model,
const details::TensorsTable& tensors_table, Model* model) {
if (model_flags.input_arrays().empty()) {
auto inputs = (*input_model.subgraphs())[0]->inputs();
if (inputs) {
for (int input : *inputs) {
const std::string& input_name = tensors_table.at(input);
model->flags.add_input_arrays()->set_name(input_name);
}
}
}
if (model_flags.output_arrays().empty()) {
auto outputs = (*input_model.subgraphs())[0]->outputs();
if (outputs) {
for (int output : *outputs) {
const std::string& output_name = tensors_table.at(output);
model->flags.add_output_arrays(output_name);
}
}
}
}
namespace {
bool Verify(const void* buf, size_t len) {
::flatbuffers::Verifier verifier(static_cast<const uint8_t*>(buf), len);
return ::tflite::VerifyModelBuffer(verifier);
}
}
std::unique_ptr<Model> Import(const ModelFlags& model_flags,
const std::string& input_file_contents) {
::tflite::AlwaysTrueResolver r;
if (!::tflite::Verify(input_file_contents.data(), input_file_contents.size(),
r, ::tflite::DefaultErrorReporter())) {
LOG(FATAL) << "Invalid flatbuffer.";
}
const ::tflite::Model* input_model =
::tflite::GetModel(input_file_contents.data());
const auto ops_by_name = BuildOperatorByNameMap();
if (!input_model->subgraphs() || input_model->subgraphs()->size() != 1) {
LOG(FATAL) << "Number of subgraphs in tflite should be exactly 1.";
}
std::unique_ptr<Model> model;
model = std::make_unique<Model>();
details::TensorsTable tensors_table;
details::LoadTensorsTable(*input_model, &tensors_table);
details::OperatorsTable operators_table;
details::LoadOperatorsTable(*input_model, &operators_table);
ImportTensors(*input_model, model.get());
ImportOperators(*input_model, ops_by_name, tensors_table, operators_table,
model.get());
ImportIOTensors(model_flags, *input_model, tensors_table, model.get());
UndoWeightsShuffling(model.get());
return model;
}
}
} | #include "tensorflow/lite/toco/tflite/import.h"
#include <initializer_list>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_conversion_utils.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/toco_types.h"
#include "tensorflow/lite/version.h"
namespace toco {
namespace tflite {
namespace {
using ::testing::ElementsAre;
using flatbuffers::Offset;
using flatbuffers::Vector;
class ImportTest : public ::testing::Test {
protected:
template <typename T>
Offset<Vector<unsigned char>> CreateDataVector(const std::vector<T>& data) {
return builder_.CreateVector(reinterpret_cast<const uint8_t*>(data.data()),
sizeof(T) * data.size());
}
Offset<Vector<Offset<::tflite::Buffer>>> BuildBuffers() {
auto buf0 = ::tflite::CreateBuffer(builder_, CreateDataVector<float>({}));
auto buf1 = ::tflite::CreateBuffer(
builder_, CreateDataVector<float>({1.0f, 2.0f, 3.0f, 4.0f}));
auto buf2 =
::tflite::CreateBuffer(builder_, CreateDataVector<float>({3.0f, 4.0f}));
return builder_.CreateVector(
std::vector<Offset<::tflite::Buffer>>({buf0, buf1, buf2}));
}
Offset<Vector<Offset<::tflite::Tensor>>> BuildTensors() {
auto q = ::tflite::CreateQuantizationParameters(
builder_,
builder_.CreateVector<float>({0.1f}),
builder_.CreateVector<float>({0.2f}),
builder_.CreateVector<float>({0.3f}),
builder_.CreateVector<int64_t>({100LL}));
auto t1 =
::tflite::CreateTensor(builder_, builder_.CreateVector<int>({1, 2, 2}),
::tflite::TensorType_FLOAT32, 1,
builder_.CreateString("tensor_one"), q);
auto t2 =
::tflite::CreateTensor(builder_, builder_.CreateVector<int>({2, 1}),
::tflite::TensorType_FLOAT32, 0,
builder_.CreateString("tensor_two"), q);
return builder_.CreateVector(
std::vector<Offset<::tflite::Tensor>>({t1, t2}));
}
Offset<Vector<Offset<::tflite::OperatorCode>>> BuildOpCodes(
std::initializer_list<::tflite::BuiltinOperator> op_codes) {
std::vector<Offset<::tflite::OperatorCode>> op_codes_vector;
for (auto op : op_codes) {
op_codes_vector.push_back(::tflite::CreateOperatorCode(builder_, op, 0));
}
return builder_.CreateVector(op_codes_vector);
}
Offset<Vector<Offset<::tflite::OperatorCode>>> BuildOpCodes() {
return BuildOpCodes({::tflite::BuiltinOperator_MAX_POOL_2D,
::tflite::BuiltinOperator_CONV_2D});
}
Offset<Vector<Offset<::tflite::Operator>>> BuildOperators(
std::initializer_list<int> inputs, std::initializer_list<int> outputs) {
auto is = builder_.CreateVector<int>(inputs);
if (inputs.size() == 0) is = 0;
auto os = builder_.CreateVector<int>(outputs);
if (outputs.size() == 0) os = 0;
auto op = ::tflite::CreateOperator(
builder_, 0, is, os, ::tflite::BuiltinOptions_Conv2DOptions,
::tflite::CreateConv2DOptions(builder_, ::tflite::Padding_VALID, 1, 1,
::tflite::ActivationFunctionType_NONE)
.Union(),
0, ::tflite::CustomOptionsFormat_FLEXBUFFERS);
return builder_.CreateVector(std::vector<Offset<::tflite::Operator>>({op}));
}
Offset<Vector<Offset<::tflite::Operator>>> BuildOperators() {
return BuildOperators({0}, {1});
}
Offset<Vector<Offset<::tflite::SubGraph>>> BuildSubGraphs(
Offset<Vector<Offset<::tflite::Tensor>>> tensors,
Offset<Vector<Offset<::tflite::Operator>>> operators,
int num_sub_graphs = 1) {
std::vector<int32_t> inputs = {0};
std::vector<int32_t> outputs = {1};
std::vector<Offset<::tflite::SubGraph>> v;
for (int i = 0; i < num_sub_graphs; ++i) {
v.push_back(::tflite::CreateSubGraph(
builder_, tensors, builder_.CreateVector(inputs),
builder_.CreateVector(outputs), operators,
builder_.CreateString("subgraph")));
}
return builder_.CreateVector(v);
}
void BuildTestModel() {
auto buffers = BuildBuffers();
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes();
auto operators = BuildOperators();
auto subgraphs = BuildSubGraphs(tensors, operators);
auto s = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION,
opcodes, subgraphs, s, buffers));
input_model_ = ::tflite::GetModel(builder_.GetBufferPointer());
}
std::string InputModelAsString() {
return std::string(reinterpret_cast<char*>(builder_.GetBufferPointer()),
builder_.GetSize());
}
flatbuffers::FlatBufferBuilder builder_;
const ::tflite::Model* input_model_ = nullptr;
};
TEST_F(ImportTest, LoadTensorsTable) {
BuildTestModel();
details::TensorsTable tensors;
details::LoadTensorsTable(*input_model_, &tensors);
EXPECT_THAT(tensors, ElementsAre("tensor_one", "tensor_two"));
}
TEST_F(ImportTest, LoadOperatorsTable) {
BuildTestModel();
details::OperatorsTable operators;
details::LoadOperatorsTable(*input_model_, &operators);
EXPECT_THAT(operators, ElementsAre("MAX_POOL_2D", "CONV_2D"));
}
TEST_F(ImportTest, Tensors) {
BuildTestModel();
auto model = Import(ModelFlags(), InputModelAsString());
ASSERT_GT(model->HasArray("tensor_one"), 0);
Array& a1 = model->GetArray("tensor_one");
EXPECT_EQ(ArrayDataType::kFloat, a1.data_type);
EXPECT_THAT(a1.GetBuffer<ArrayDataType::kFloat>().data,
ElementsAre(1.0f, 2.0f, 3.0f, 4.0f));
ASSERT_TRUE(a1.has_shape());
EXPECT_THAT(a1.shape().dims(), ElementsAre(1, 2, 2));
const auto& mm = a1.minmax;
ASSERT_TRUE(mm.get());
EXPECT_FLOAT_EQ(0.1, mm->min);
EXPECT_FLOAT_EQ(0.2, mm->max);
const auto& q = a1.quantization_params;
ASSERT_TRUE(q.get());
EXPECT_FLOAT_EQ(0.3, q->scale);
EXPECT_EQ(100, q->zero_point);
}
TEST_F(ImportTest, NoBuffers) {
auto buffers = 0;
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes();
auto operators = BuildOperators();
auto subgraphs = BuildSubGraphs(tensors, operators);
auto comment = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION, opcodes,
subgraphs, comment, buffers));
EXPECT_DEATH(Import(ModelFlags(), InputModelAsString()),
"Missing 'buffers' section.");
}
TEST_F(ImportTest, NoInputs) {
auto buffers = BuildBuffers();
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes();
auto operators = BuildOperators({}, {1});
auto subgraphs = BuildSubGraphs(tensors, operators);
auto comment = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION, opcodes,
subgraphs, comment, buffers));
EXPECT_DEATH(Import(ModelFlags(), InputModelAsString()),
"Missing 'inputs' for operator.");
}
TEST_F(ImportTest, NoOutputs) {
auto buffers = BuildBuffers();
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes();
auto operators = BuildOperators({0}, {});
auto subgraphs = BuildSubGraphs(tensors, operators);
auto comment = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION, opcodes,
subgraphs, comment, buffers));
EXPECT_DEATH(Import(ModelFlags(), InputModelAsString()),
"Missing 'outputs' for operator.");
}
TEST_F(ImportTest, InvalidOpCode) {
auto buffers = BuildBuffers();
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes({static_cast<::tflite::BuiltinOperator>(-1),
::tflite::BuiltinOperator_CONV_2D});
auto operators = BuildOperators();
auto subgraphs = BuildSubGraphs(tensors, operators);
auto comment = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION, opcodes,
subgraphs, comment, buffers));
EXPECT_DEATH(Import(ModelFlags(), InputModelAsString()),
"Operator id '-1' is out of range.");
}
TEST_F(ImportTest, MultipleSubGraphs) {
auto buffers = BuildBuffers();
auto tensors = BuildTensors();
auto opcodes = BuildOpCodes();
auto operators = BuildOperators();
auto subgraphs = BuildSubGraphs(tensors, operators, 2);
auto comment = builder_.CreateString("");
::tflite::FinishModelBuffer(
builder_, ::tflite::CreateModel(builder_, TFLITE_SCHEMA_VERSION, opcodes,
subgraphs, comment, buffers));
input_model_ = ::tflite::GetModel(builder_.GetBufferPointer());
EXPECT_DEATH(Import(ModelFlags(), InputModelAsString()),
"Number of subgraphs in tflite should be exactly 1.");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/tflite/import.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/tflite/import_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b204cf53-9ac5-4632-8b9c-991be7e258ce | cpp | tensorflow/tensorflow | tflite_settings_json_parser | tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.cc | tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser_test.cc | #include "tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.h"
#include <string>
#include "flatbuffers/idl.h"
#include "tensorflow/lite/acceleration/configuration/configuration_fbs_contents-inl.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/tools/logging.h"
namespace tflite {
namespace delegates {
namespace utils {
TfLiteSettingsJsonParser::TfLiteSettingsJsonParser() {
TFLITE_DCHECK(parser_.Parse(configuration_fbs_contents) &&
parser_.SetRootType("TFLiteSettings"));
}
const TFLiteSettings* TfLiteSettingsJsonParser::Parse(
const std::string& json_file_path) {
if (!LoadFromJsonFile(json_file_path) || buffer_pointer_ == nullptr) {
return nullptr;
}
return flatbuffers::GetRoot<TFLiteSettings>(buffer_pointer_);
}
const uint8_t* TfLiteSettingsJsonParser::GetBufferPointer() {
return buffer_pointer_;
}
flatbuffers::uoffset_t TfLiteSettingsJsonParser::GetBufferSize() {
return buffer_size_;
}
bool TfLiteSettingsJsonParser::LoadFromJsonFile(
const std::string& json_file_path) {
buffer_size_ = 0;
buffer_pointer_ = nullptr;
if (json_file_path.empty()) {
TFLITE_LOG(ERROR) << "Invalid JSON file path.";
return false;
}
std::string json_file;
if (!flatbuffers::LoadFile(json_file_path.c_str(), false, &json_file)) {
TFLITE_LOG(ERROR) << "Failed to load the delegate settings file ("
<< json_file_path << ").";
return false;
}
if (!parser_.Parse(json_file.c_str())) {
TFLITE_LOG(ERROR) << "Failed to parse the delegate settings file ("
<< json_file_path << ").";
return false;
}
buffer_size_ = parser_.builder_.GetSize();
buffer_pointer_ = parser_.builder_.GetBufferPointer();
return true;
}
}
}
} | #include "tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.h"
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
namespace {
using tflite::TFLiteSettings;
using tflite::delegates::utils::TfLiteSettingsJsonParser;
TEST(TfLiteSettingsJsonParserTest, SuccessWithValidXNNPackDelegateSettings) {
TfLiteSettingsJsonParser parser;
const TFLiteSettings* tflite_settings = parser.Parse(
"tensorflow/lite/delegates/utils/experimental/"
"stable_delegate/test_xnnpack_settings.json");
EXPECT_NE(parser.GetBufferPointer(), nullptr);
EXPECT_NE(parser.GetBufferSize(), 0);
ASSERT_NE(tflite_settings, nullptr);
EXPECT_EQ(tflite_settings->delegate(), tflite::Delegate_XNNPACK);
ASSERT_NE(tflite_settings->xnnpack_settings(), nullptr);
EXPECT_EQ(tflite_settings->xnnpack_settings()->num_threads(), 5);
}
TEST(TfLiteSettingsJsonParserTest, GetBufferPointerReturnsValidBufferPointers) {
TfLiteSettingsJsonParser parser;
parser.Parse(
"tensorflow/lite/delegates/utils/experimental/"
"stable_delegate/test_xnnpack_settings.json");
const uint8_t* buffer_pointer = parser.GetBufferPointer();
ASSERT_NE(buffer_pointer, nullptr);
ASSERT_NE(parser.GetBufferSize(), 0);
const TFLiteSettings* tflite_settings =
flatbuffers::GetRoot<TFLiteSettings>(buffer_pointer);
ASSERT_NE(tflite_settings, nullptr);
EXPECT_EQ(tflite_settings->delegate(), tflite::Delegate_XNNPACK);
ASSERT_NE(tflite_settings->xnnpack_settings(), nullptr);
EXPECT_EQ(tflite_settings->xnnpack_settings()->num_threads(), 5);
}
TEST(TfLiteSettingsJsonParserTest, FailedToParseInvalidSettings) {
TfLiteSettingsJsonParser parser;
EXPECT_EQ(
parser.Parse("tensorflow/lite/tools/delegates/experimental/"
"stable_delegate/test_invalid_settings.json"),
nullptr);
EXPECT_EQ(parser.GetBufferPointer(), nullptr);
EXPECT_EQ(parser.GetBufferSize(), 0);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
570e99a0-b00f-43b7-841a-542291098e88 | cpp | tensorflow/tensorflow | batch_matmul | tensorflow/lite/kernels/batch_matmul.cc | tensorflow/lite/kernels/batch_matmul_test.cc | #include "tensorflow/lite/kernels/internal/reference/batch_matmul.h"
#include <stddef.h>
#include <algorithm>
#include <cstdint>
#include <limits>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/batch_matmul.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace batch_matmul {
static const int kInputLHSTensor = 0;
static const int kInputRHSTensor = 1;
static const int kOutputTensor = 0;
static const int kNumTempTensorsForAdjoints = 2;
static const int kNumTempTensorsForHybrid = 5;
enum KernelType {
kReference,
kGenericOptimized,
};
struct OpData {
int32_t output_multiplier;
int output_shift;
int32_t output_activation_min;
int32_t output_activation_max;
int scratch_tensor_index;
bool rhs_transposed;
bool compute_row_sums = false;
};
struct OpContext {
OpContext(TfLiteContext* context, TfLiteNode* node) {
params = reinterpret_cast<TfLiteBatchMatMulParams*>(node->builtin_data);
lhs = GetInput(context, node, kInputLHSTensor);
rhs = GetInput(context, node, kInputRHSTensor);
output = GetOutput(context, node, 0);
}
TfLiteBatchMatMulParams* params;
const TfLiteTensor* lhs;
const TfLiteTensor* rhs;
TfLiteTensor* output;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
op_data->rhs_transposed = false;
context->AddTensors(context,
kNumTempTensorsForAdjoints + kNumTempTensorsForHybrid,
&op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete static_cast<OpData*>(buffer);
}
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const RuntimeShape& extended_lhs_shape,
const RuntimeShape& extended_rhs_shape,
bool adj_x, bool adj_y, int output_rank,
TfLiteTensor* output) {
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank);
for (int i = 0; i < output_rank - 2; ++i) {
const int lhs_dim = extended_lhs_shape.Dims(i);
const int rhs_dim = extended_rhs_shape.Dims(i);
int broadcast_dim = lhs_dim;
if ((lhs_dim != rhs_dim) && (lhs_dim == 1)) {
broadcast_dim = rhs_dim;
}
output_shape->data[i] = broadcast_dim;
}
int lhs_rows_index = adj_x ? output_rank - 1 : output_rank - 2;
int rhs_cols_index = adj_y ? output_rank - 2 : output_rank - 1;
output_shape->data[output_rank - 2] = extended_lhs_shape.Dims(lhs_rows_index);
output_shape->data[output_rank - 1] = extended_rhs_shape.Dims(rhs_cols_index);
TfLiteStatus stat = context->ResizeTensor(context, output, output_shape);
return stat;
}
TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node,
OpContext* op_context) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* lhs = op_context->lhs;
const TfLiteTensor* rhs = op_context->rhs;
TfLiteIntArrayFree(node->temporaries);
bool is_hybrid =
(op_context->lhs->type == kTfLiteFloat32 && rhs->type == kTfLiteInt8);
if (is_hybrid) {
node->temporaries = TfLiteIntArrayCreate(kNumTempTensorsForAdjoints +
kNumTempTensorsForHybrid);
} else {
node->temporaries = TfLiteIntArrayCreate(kNumTempTensorsForAdjoints);
}
const int lhs_rank = NumDimensions(lhs);
const int rhs_rank = NumDimensions(rhs);
const int batch_size = op_context->params->adj_x
? lhs->dims->data[lhs_rank - 1]
: lhs->dims->data[lhs_rank - 2];
const int num_units = op_context->params->adj_y
? rhs->dims->data[rhs_rank - 2]
: rhs->dims->data[rhs_rank - 1];
{
node->temporaries->data[0] = op_data->scratch_tensor_index;
TfLiteTensor* scratch_buffer;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 0, &scratch_buffer));
TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(lhs_rank);
for (int i = 0; i < lhs_rank - 2; ++i) {
scratch_buffer_size->data[i] = lhs->dims->data[i];
}
scratch_buffer_size->data[lhs_rank - 2] = lhs->dims->data[lhs_rank - 1];
scratch_buffer_size->data[lhs_rank - 1] = lhs->dims->data[lhs_rank - 2];
scratch_buffer->type = op_context->lhs->type;
scratch_buffer->allocation_type = kTfLiteArenaRw;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer,
scratch_buffer_size));
}
{
node->temporaries->data[1] = op_data->scratch_tensor_index + 1;
TfLiteTensor* scratch_buffer;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 1, &scratch_buffer));
scratch_buffer->name = "BatchMatMul_scratch_buffer";
const TfLiteTensor* rhs = op_context->rhs;
int rhs_rank = NumDimensions(rhs);
TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(rhs_rank);
for (int i = 0; i < rhs_rank - 2; ++i) {
scratch_buffer_size->data[i] = rhs->dims->data[i];
}
scratch_buffer_size->data[rhs_rank - 2] = rhs->dims->data[rhs_rank - 1];
scratch_buffer_size->data[rhs_rank - 1] = rhs->dims->data[rhs_rank - 2];
if (IsConstantTensor(op_context->rhs)) {
scratch_buffer->allocation_type = kTfLiteArenaRwPersistent;
} else {
scratch_buffer->allocation_type = kTfLiteArenaRw;
}
scratch_buffer->type = op_context->rhs->type;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer,
scratch_buffer_size));
}
if (is_hybrid) {
int num_batches = 1;
for (int i = 0; i < lhs_rank - 2; ++i) {
num_batches *= lhs->dims->data[i];
}
int num_weights_matrices = 1;
for (int i = 0; i < rhs_rank - 2; ++i) {
num_weights_matrices *= rhs->dims->data[i];
}
op_data->compute_row_sums = true;
node->temporaries->data[2] = op_data->scratch_tensor_index + 2;
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 2,
&input_quantized));
input_quantized->type = op_context->rhs->type;
input_quantized->allocation_type = kTfLiteArenaRw;
TfLiteIntArray* input_quantized_size =
TfLiteIntArrayCopy(op_context->lhs->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
input_quantized_size));
node->temporaries->data[3] = op_data->scratch_tensor_index + 3;
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 3,
&scaling_factors));
scaling_factors->type = kTfLiteFloat32;
scaling_factors->allocation_type = kTfLiteArenaRw;
int scaling_dims[1] = {num_batches * batch_size};
if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) {
TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1);
scaling_factors_size->data[0] = scaling_dims[0];
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors,
scaling_factors_size));
}
node->temporaries->data[4] = op_data->scratch_tensor_index + 4;
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 4, &accum_scratch));
accum_scratch->type = kTfLiteInt32;
accum_scratch->allocation_type = kTfLiteArenaRw;
int accum_scratch_dims[2] = {num_units, batch_size};
if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2,
accum_scratch_dims)) {
TfLiteIntArray* accum_size = TfLiteIntArrayCreate(2);
accum_size->data[0] = num_units;
accum_size->data[1] = batch_size;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, accum_scratch, accum_size));
}
node->temporaries->data[5] = op_data->scratch_tensor_index + 5;
TfLiteTensor* input_offsets;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 5, &input_offsets));
input_offsets->type = kTfLiteInt32;
input_offsets->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1, scaling_dims)) {
TfLiteIntArray* input_offsets_size = TfLiteIntArrayCreate(1);
input_offsets_size->data[0] = num_batches * batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_offsets,
input_offsets_size));
}
node->temporaries->data[6] = op_data->scratch_tensor_index + 6;
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 6, &row_sums));
row_sums->type = kTfLiteInt32;
row_sums->allocation_type = kTfLiteArenaRwPersistent;
int row_sums_dims[1] = {num_weights_matrices * num_units};
if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) {
TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(1);
row_sums_size->data[0] = row_sums_dims[0];
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, row_sums, row_sums_size));
}
}
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpContext op_context(context, node);
TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context));
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
bool adj_x = op_context.params->adj_x;
bool adj_y = op_context.params->adj_y;
const TfLiteTensor* lhs_data;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputLHSTensor, &lhs_data));
const TfLiteTensor* rhs_data;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputRHSTensor, &rhs_data));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if ((lhs_data->type == kTfLiteInt8 || lhs_data->type == kTfLiteInt16) &&
output->type != kTfLiteInt32) {
double real_multiplier = 0.0;
TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
context, lhs_data, rhs_data, output, &real_multiplier));
int exponent;
QuantizeMultiplier(real_multiplier, &op_data->output_multiplier, &exponent);
op_data->output_shift = exponent;
if (lhs_data->type == kTfLiteInt8) {
op_data->output_activation_min = std::numeric_limits<int8_t>::min();
op_data->output_activation_max = std::numeric_limits<int8_t>::max();
} else {
op_data->output_activation_min = std::numeric_limits<int16_t>::min();
op_data->output_activation_max = std::numeric_limits<int16_t>::max();
}
}
if (lhs_data->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, lhs_data->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, rhs_data->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
}
TF_LITE_ENSURE(context, lhs_data->type == kTfLiteFloat32 ||
lhs_data->type == kTfLiteInt8 ||
lhs_data->type == kTfLiteInt16);
TF_LITE_ENSURE(context, rhs_data->type == kTfLiteFloat32 ||
rhs_data->type == kTfLiteInt8 ||
rhs_data->type == kTfLiteInt16);
TF_LITE_ENSURE(context, (lhs_data->type == kTfLiteFloat32 &&
rhs_data->type == kTfLiteInt8) ||
lhs_data->type == rhs_data->type);
TF_LITE_ENSURE(context, NumDimensions(lhs_data) >= 2);
TF_LITE_ENSURE(context, NumDimensions(lhs_data) <= 5);
TF_LITE_ENSURE(context, NumDimensions(rhs_data) >= 2);
TF_LITE_ENSURE(context, NumDimensions(rhs_data) <= 5);
const int lhs_rank = NumDimensions(lhs_data);
const int rhs_rank = NumDimensions(rhs_data);
const int output_rank = std::max(lhs_rank, rhs_rank);
const RuntimeShape extended_lhs_shape =
RuntimeShape::ExtendedShape(output_rank, GetTensorShape(lhs_data));
const RuntimeShape extended_rhs_shape =
RuntimeShape::ExtendedShape(output_rank, GetTensorShape(rhs_data));
for (int i = 0; i < output_rank - 2; ++i) {
const int lhs_dim = extended_lhs_shape.Dims(i);
const int rhs_dim = extended_rhs_shape.Dims(i);
if (lhs_dim != rhs_dim) {
if (lhs_dim != 1) {
TF_LITE_ENSURE_EQ(context, rhs_dim, 1);
}
}
}
int accum_dim_lhs = adj_x ? extended_lhs_shape.Dims(output_rank - 2)
: extended_lhs_shape.Dims(output_rank - 1);
int accum_dim_rhs = adj_y ? extended_rhs_shape.Dims(output_rank - 1)
: extended_rhs_shape.Dims(output_rank - 2);
TF_LITE_ENSURE_EQ(context, accum_dim_lhs, accum_dim_rhs);
TfLiteStatus status =
ResizeOutputTensor(context, extended_lhs_shape, extended_rhs_shape, adj_x,
adj_y, output_rank, output);
return status;
}
template <typename scalar>
void TransposeRowsColumnsImpl(const TfLiteTensor* tensor_in,
const scalar* input, TfLiteTensor* tensor_out,
scalar* output) {
RuntimeShape transposed_shape(GetTensorShape(tensor_in));
RuntimeShape shape(GetTensorShape(tensor_in));
TransposeParams params;
int rank = NumDimensions(tensor_in);
params.perm_count = rank;
for (int i = 0; i < rank - 2; ++i) {
params.perm[i] = i;
}
params.perm[rank - 2] = rank - 1;
params.perm[rank - 1] = rank - 2;
transposed_shape.SetDim(rank - 1, shape.Dims(rank - 2));
transposed_shape.SetDim(rank - 2, shape.Dims(rank - 1));
optimized_ops::Transpose(params, shape, input, transposed_shape, output);
}
TfLiteStatus TransposeRowsColumns(TfLiteContext* context,
const TfLiteTensor* tensor_in,
TfLiteTensor* tensor_out) {
if (tensor_in->type == kTfLiteFloat32) {
TransposeRowsColumnsImpl<float>(tensor_in, GetTensorData<float>(tensor_in),
tensor_out,
GetTensorData<float>(tensor_out));
return kTfLiteOk;
} else if (tensor_in->type == kTfLiteInt8) {
TransposeRowsColumnsImpl<int8_t>(
tensor_in, GetTensorData<int8_t>(tensor_in), tensor_out,
GetTensorData<int8_t>(tensor_out));
return kTfLiteOk;
} else if (tensor_in->type == kTfLiteInt16) {
TransposeRowsColumnsImpl<int16_t>(
tensor_in, GetTensorData<int16_t>(tensor_in), tensor_out,
GetTensorData<int16_t>(tensor_out));
return kTfLiteOk;
} else {
TF_LITE_KERNEL_LOG(
context, "Can only transpose tensors with float, int8 or int16 type.");
return kTfLiteError;
}
}
RuntimeShape SwapRowColumnDims(const RuntimeShape& shape) {
RuntimeShape swapped_shape(shape);
const int32_t dims = shape.DimensionsCount();
swapped_shape.SetDim(dims - 2, shape.Dims(dims - 1));
swapped_shape.SetDim(dims - 1, shape.Dims(dims - 2));
return swapped_shape;
}
TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node, OpData* data,
const RuntimeShape& input_shape,
const TfLiteTensor* input,
const RuntimeShape& filter_shape,
const TfLiteTensor* filter,
TfLiteTensor* input_quantized,
TfLiteTensor* scaling_factors,
TfLiteTensor* accum_scratch, TfLiteTensor* row_sums,
TfLiteTensor* input_offsets, TfLiteTensor* output) {
const auto* params =
reinterpret_cast<TfLiteBatchMatMulParams*>(node->builtin_data);
const int32_t num_input_dims = input_shape.DimensionsCount();
const int input_size = input_shape.Dims(num_input_dims - 2);
const int batch_size = input_shape.Dims(num_input_dims - 1);
int num_batches_to_quantize = batch_size;
for (int i = 0; i < input_shape.DimensionsCount() - 2; ++i) {
num_batches_to_quantize *= input_shape.Dims(i);
}
const int scaling_factor_size = GetTensorShape(scaling_factors).FlatSize();
TF_LITE_ENSURE(context, scaling_factor_size >= num_batches_to_quantize);
float* scaling_factors_ptr = GetTensorData<float>(scaling_factors);
int32_t* input_offset_ptr = nullptr;
int32_t* row_sums_ptr = nullptr;
input_offset_ptr = GetTensorData<int32_t>(input_offsets);
row_sums_ptr = GetTensorData<int32_t>(row_sums);
if (!params->asymmetric_quantize_inputs) {
memset(input_offset_ptr, 0, input_offsets->bytes);
}
int8_t* quant_data = GetTensorData<int8_t>(input_quantized);
const int8_t* filter_data = GetTensorData<int8_t>(filter);
const float* input_ptr = GetTensorData<float>(input);
tensor_utils::BatchQuantizeFloats(input_ptr, num_batches_to_quantize,
input_size, quant_data, scaling_factors_ptr,
input_offset_ptr,
params->asymmetric_quantize_inputs);
for (int b = 0; b < num_batches_to_quantize; ++b) {
scaling_factors_ptr[b] *= filter->params.scale;
}
RuntimeShape output_shape = GetTensorShape(output);
int output_size = 1;
for (int i = 0; i < output_shape.DimensionsCount(); ++i) {
output_size *= output_shape.Dims(i);
}
std::fill_n(GetTensorData<float>(output), output_size, 0.0f);
reference_ops::BatchMatMul(
filter_shape, filter_data, input_shape, quant_data, scaling_factors_ptr,
input_offset_ptr, row_sums_ptr, GetTensorShape(output),
GetTensorData<float>(output), &(data->compute_row_sums));
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus EvalInt8Int8(TfLiteContext* context, const OpData* data,
const RuntimeShape& lhs_shape,
const TfLiteTensor* lhs,
const RuntimeShape& rhs_shape,
const TfLiteTensor* rhs,
const RuntimeShape& output_shape,
TfLiteTensor* output, bool transpose_lhs) {
FullyConnectedParams op_params;
int32_t input_offset = -lhs->params.zero_point;
int32_t filter_offset = -rhs->params.zero_point;
int32_t output_offset = output->params.zero_point;
op_params.input_offset = input_offset;
op_params.weights_offset = filter_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
op_params.quantized_activation_min = data->output_activation_min;
op_params.quantized_activation_max = data->output_activation_max;
op_params.lhs_cacheable = IsConstantTensor(lhs);
op_params.rhs_cacheable = IsConstantTensor(rhs);
if (kernel_type == kReference) {
reference_ops::BatchMatMul<int8_t, int32_t>(
op_params, rhs_shape, GetTensorData<int8_t>(rhs), lhs_shape,
GetTensorData<int8_t>(lhs), GetTensorShape(output),
GetTensorData<int8_t>(output));
} else {
optimized_ops::BatchMatMul(
op_params, rhs_shape, GetTensorData<int8_t>(rhs), lhs_shape,
GetTensorData<int8_t>(lhs), GetTensorShape(output),
GetTensorData<int8_t>(output),
CpuBackendContext::GetFromContext(context), transpose_lhs);
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus EvalInt8Int32(TfLiteContext* context, const OpData* data,
const RuntimeShape& lhs_shape,
const TfLiteTensor* lhs,
const RuntimeShape& rhs_shape,
const TfLiteTensor* rhs,
const RuntimeShape& output_shape,
TfLiteTensor* output, bool transpose_lhs) {
FullyConnectedParams op_params;
int32_t input_offset = -lhs->params.zero_point;
int32_t weights_offset = -rhs->params.zero_point;
int32_t output_offset = output->params.zero_point;
op_params.input_offset = input_offset;
op_params.weights_offset = weights_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
op_params.quantized_activation_min = data->output_activation_min;
op_params.quantized_activation_max = data->output_activation_max;
op_params.lhs_cacheable = IsConstantTensor(lhs);
op_params.rhs_cacheable = IsConstantTensor(rhs);
if (kernel_type == kReference) {
reference_ops::BatchMatMul<int8, int8, int32>(
rhs_shape, GetTensorData<int8>(rhs), lhs_shape,
GetTensorData<int8>(lhs), GetTensorShape(output),
GetTensorData<int32>(output));
} else {
optimized_ops::BatchMatMul(
op_params, rhs_shape, GetTensorData<int8_t>(rhs), lhs_shape,
GetTensorData<int8_t>(lhs), GetTensorShape(output),
GetTensorData<int32_t>(output),
CpuBackendContext::GetFromContext(context), transpose_lhs);
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus EvalInt16(TfLiteContext* context, const OpData* data,
const RuntimeShape& lhs_shape, const TfLiteTensor* lhs,
const RuntimeShape& rhs_shape, const TfLiteTensor* rhs,
const RuntimeShape& output_shape, TfLiteTensor* output) {
FullyConnectedParams op_params;
int32_t input_offset = -lhs->params.zero_point;
int32_t filter_offset = -rhs->params.zero_point;
int32_t output_offset = output->params.zero_point;
op_params.input_offset = input_offset;
op_params.weights_offset = filter_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
op_params.quantized_activation_min = data->output_activation_min;
op_params.quantized_activation_max = data->output_activation_max;
reference_ops::BatchMatMul<int16_t, int64_t>(
op_params, rhs_shape, GetTensorData<int16_t>(rhs), lhs_shape,
GetTensorData<int16_t>(lhs), GetTensorShape(output),
GetTensorData<int16_t>(output));
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
OpData* data, const RuntimeShape& lhs_shape,
const TfLiteTensor* lhs,
const RuntimeShape& rhs_shape,
const TfLiteTensor* rhs, TfLiteTensor* output,
bool transpose_lhs) {
if (lhs->type == kTfLiteFloat32 && rhs->type == kTfLiteInt8) {
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 2,
&input_quantized));
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 3,
&scaling_factors));
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 4, &accum_scratch));
TfLiteTensor* input_offsets;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 5, &input_offsets));
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 6, &row_sums));
return EvalHybrid(context, node, data, lhs_shape, lhs, rhs_shape, rhs,
input_quantized, scaling_factors, accum_scratch, row_sums,
input_offsets, output);
} else if (lhs->type == kTfLiteInt8 && rhs->type == kTfLiteInt8) {
if (output->type == kTfLiteInt8) {
return EvalInt8Int8<kernel_type>(context, data, lhs_shape, lhs, rhs_shape,
rhs, GetTensorShape(output), output,
transpose_lhs);
} else {
return EvalInt8Int32<kernel_type>(context, data, lhs_shape, lhs,
rhs_shape, rhs, GetTensorShape(output),
output, transpose_lhs);
}
} else if (lhs->type == kTfLiteInt16 && rhs->type == kTfLiteInt16) {
return EvalInt16<kernel_type>(context, data, lhs_shape, lhs, rhs_shape, rhs,
GetTensorShape(output), output);
} else {
TF_LITE_KERNEL_LOG(
context,
"Currently only hybrid, int8 and int16 quantization are supported.\n");
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteTensor* GetTempRhs(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* rhs) {
TfLiteTensor* transposed_rhs = GetTemporary(context, node, 1);
if (transposed_rhs == nullptr) {
return nullptr;
}
if (rhs->type == kTfLiteInt8 || rhs->type == kTfLiteInt16) {
transposed_rhs->params.scale = rhs->params.scale;
transposed_rhs->params.zero_point = rhs->params.zero_point;
}
return transposed_rhs;
}
TfLiteTensor* GetTempLhs(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* lhs) {
TfLiteTensor* transposed_lhs = GetTemporary(context, node, 0);
if (transposed_lhs == nullptr) {
return nullptr;
}
if (lhs->type == kTfLiteInt8 || lhs->type == kTfLiteInt16) {
transposed_lhs->params.scale = lhs->params.scale;
transposed_lhs->params.zero_point = lhs->params.zero_point;
}
return transposed_lhs;
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpContext op_context(context, node);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* lhs;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputLHSTensor, &lhs));
const TfLiteTensor* rhs;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputRHSTensor, &rhs));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
RuntimeShape orig_lhs_shape = GetTensorShape(lhs);
RuntimeShape orig_rhs_shape = GetTensorShape(rhs);
bool adj_y = op_context.params->adj_y;
bool adj_x = op_context.params->adj_x;
int32_t rhs_dims_count = orig_rhs_shape.DimensionsCount();
int32_t lhs_dims_count = orig_lhs_shape.DimensionsCount();
if (rhs_dims_count > 2 && lhs_dims_count > 2) {
int rhs_one = orig_rhs_shape.DimsData()[rhs_dims_count - 3];
if (rhs_one == 1) {
int32_t* lhs_dims = orig_lhs_shape.DimsData();
int32_t* rhs_dims = orig_rhs_shape.DimsData();
RuntimeShape tmp_l(lhs_dims_count - 1, lhs_dims);
tmp_l.SetDim(lhs_dims_count - 3,
lhs_dims[lhs_dims_count - 3] * lhs_dims[lhs_dims_count - 2]);
tmp_l.SetDim(lhs_dims_count - 2, lhs_dims[lhs_dims_count - 1]);
orig_lhs_shape.ReplaceWith(tmp_l.DimensionsCount(), tmp_l.DimsData());
RuntimeShape tmp_r(rhs_dims_count - 1, orig_rhs_shape.DimsData());
tmp_r.SetDim(rhs_dims_count - 3, rhs_dims[rhs_dims_count - 2]);
tmp_r.SetDim(rhs_dims_count - 2, rhs_dims[rhs_dims_count - 1]);
orig_rhs_shape.ReplaceWith(tmp_r.DimensionsCount(), tmp_r.DimsData());
}
}
rhs_dims_count = orig_rhs_shape.DimensionsCount();
lhs_dims_count = orig_lhs_shape.DimensionsCount();
const TfLiteTensor* rhs_tensor = rhs;
bool implicit_transpose_possible = true;
if ((lhs->type == kTfLiteFloat32 && rhs->type == kTfLiteInt8) ||
kernel_type == kReference || rhs->type == kTfLiteInt16) {
implicit_transpose_possible = false;
}
bool do_implicit_transpose = !adj_y && implicit_transpose_possible;
if (!adj_y && !implicit_transpose_possible) {
rhs_tensor = GetTempRhs(context, node, rhs);
}
const TfLiteTensor* lhs_tensor = adj_x ? GetTempLhs(context, node, lhs) : lhs;
if (!adj_y && !implicit_transpose_possible) {
if (!(IsConstantTensor(rhs) && op_data->rhs_transposed)) {
TransposeRowsColumns(context, rhs, GetTemporary(context, node, 1));
op_data->rhs_transposed = true;
}
}
if (adj_x) {
TransposeRowsColumns(context, lhs, GetTemporary(context, node, 0));
}
RuntimeShape rhs_shape = (adj_y && !do_implicit_transpose)
? orig_rhs_shape
: SwapRowColumnDims(orig_rhs_shape);
RuntimeShape lhs_shape =
adj_x ? orig_lhs_shape : SwapRowColumnDims(orig_lhs_shape);
switch (rhs->type) {
case kTfLiteFloat32:
if (kernel_type == kGenericOptimized) {
optimized_ops::BatchMatMul(
rhs_shape, GetTensorData<float>(rhs_tensor), lhs_shape,
GetTensorData<float>(lhs_tensor), GetTensorShape(output),
GetTensorData<float>(output),
CpuBackendContext::GetFromContext(context), do_implicit_transpose);
} else {
reference_ops::BatchMatMul(rhs_shape, GetTensorData<float>(rhs_tensor),
lhs_shape, GetTensorData<float>(lhs_tensor),
GetTensorShape(output),
GetTensorData<float>(output));
}
break;
case kTfLiteInt8:
case kTfLiteInt16:
EvalQuantized<kernel_type>(context, node, op_data, lhs_shape, lhs_tensor,
rhs_shape, rhs_tensor, output,
do_implicit_transpose);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Currently BatchMatMul doesn't support type: %s",
TfLiteTypeGetName(lhs->type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_BATCH_MATMUL_REF() {
static TfLiteRegistration r = {batch_matmul::Init, batch_matmul::Free,
batch_matmul::Prepare,
batch_matmul::Eval<batch_matmul::kReference>};
return &r;
}
TfLiteRegistration* Register_BATCH_MATMUL_GENERIC_OPTIMIZED() {
static TfLiteRegistration r = {
batch_matmul::Init, batch_matmul::Free, batch_matmul::Prepare,
batch_matmul::Eval<batch_matmul::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_BATCH_MATMUL() {
return Register_BATCH_MATMUL_GENERIC_OPTIMIZED();
}
}
}
} | #include <stddef.h>
#include <stdint.h>
#include <initializer_list>
#include <limits>
#include <map>
#include <numeric>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace ops {
namespace builtin {
TfLiteRegistration* Register_BATCH_MATMUL_REF();
TfLiteRegistration* Register_BATCH_MATMUL_GENERIC_OPTIMIZED();
}
}
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
template <typename T>
tflite::TensorType GetTFLiteType() {
if (std::is_same<T, int8_t>::value) {
return TensorType_INT8;
}
if (std::is_same<T, int16_t>::value) {
return TensorType_INT16;
}
if (std::is_same<T, int32_t>::value) {
return TensorType_INT32;
}
return TensorType_FLOAT32;
}
template <typename T>
class BatchMatMulOpModel : public SingleOpModel {
public:
BatchMatMulOpModel(const TensorData& lhs, const TensorData& rhs,
bool adj_x = false, bool adj_y = false) {
lhs_id_ = AddInput(lhs);
rhs_id_ = AddInput(rhs);
output_id_ = AddOutput(GetTFLiteType<T>());
SetBuiltinOp(BuiltinOperator_BATCH_MATMUL,
BuiltinOptions_BatchMatMulOptions,
CreateBatchMatMulOptions(builder_, adj_x, adj_y).Union());
BuildInterpreter({GetShape(lhs_id_), GetShape(rhs_id_)});
}
int lhs() const { return lhs_id_; }
int rhs() const { return rhs_id_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_id_); }
std::vector<int32_t> GetOutputShape() { return GetTensorShape(output_id_); }
protected:
int lhs_id_;
int rhs_id_;
int output_id_;
};
const auto kKernelMap = new std::map<string, TfLiteRegistration*>({
{"Reference", ops::builtin::Register_BATCH_MATMUL_REF()},
{"GenericOptimized",
ops::builtin::Register_BATCH_MATMUL_GENERIC_OPTIMIZED()},
});
class BatchMatMulOpTest : public SingleOpTest {
protected:
const std::map<string, TfLiteRegistration*>& GetKernelMap() override {
return *kKernelMap;
}
};
TEST_P(BatchMatMulOpTest, Float32Test_Ones) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {3, 2, 1, 4}},
{TensorType_FLOAT32, {3, 1, 4, 1}});
std::vector<float> lhs(24);
std::iota(lhs.begin(), lhs.end(), 1);
std::vector<float> rhs(12);
std::iota(rhs.begin(), rhs.end(), 1);
std::vector<float> res{30, 70, 278, 382, 782, 950};
model.PopulateTensor<float>(model.lhs(), lhs);
model.PopulateTensor<float>(model.rhs(), rhs);
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray(res));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 2, 1, 1}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Flatten) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {3, 2, 2, 4}},
{TensorType_FLOAT32, {3, 1, 4, 1}});
std::vector<float> lhs(48);
std::iota(lhs.begin(), lhs.end(), 1);
std::vector<float> rhs(12);
std::iota(rhs.begin(), rhs.end(), 1);
std::vector<float> res{30, 70, 110, 150, 486, 590,
694, 798, 1454, 1622, 1790, 1958};
model.PopulateTensor<float>(model.lhs(), lhs);
model.PopulateTensor<float>(model.rhs(), rhs);
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray(res));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 2, 2, 1}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Simple) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {1, 2, 3}},
{TensorType_FLOAT32, {1, 3, 4}});
model.PopulateTensor<float>(model.lhs(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
Pointwise(FloatingPointEq(),
{74., 80., 86., 92., 173., 188., 203., 218.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Int8Test_Simple) {
BatchMatMulOpModel<int32_t> model({TensorType_INT8, {1, 2, 3}},
{TensorType_INT8, {1, 3, 4}});
model.PopulateTensor<int8_t>(model.lhs(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int8_t>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({74, 80, 86, 92, 173, 188, 203, 218}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Int8Test_LargeElement) {
BatchMatMulOpModel<int32_t> model({TensorType_INT8, {1, 2, 3}},
{TensorType_INT8, {1, 3, 4}});
model.PopulateTensor<int8_t>(model.lhs(), {121, 122, 123, 124, 125, 126});
model.PopulateTensor<int8_t>(model.rhs(), {117, 118, 119, 110, 111, 112, 113,
114, 115, 116, 117, 118});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray(
{41844, 42210, 42576, 41732, 42873, 43248, 43623, 42758}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_SimpleRHSAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {1, 2, 3}},
{TensorType_FLOAT32, {1, 4, 3}}, false, true);
model.PopulateTensor<float>(model.lhs(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<float>(model.rhs(),
{7, 11, 15, 8, 12, 16, 9, 13, 17, 10, 14, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
Pointwise(FloatingPointEq(),
{74., 80., 86., 92., 173., 188., 203., 218.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_SimpleLHSAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {1, 3, 2}},
{TensorType_FLOAT32, {1, 3, 4}}, true, false);
model.PopulateTensor<float>(model.lhs(), {1, 4, 2, 5, 3, 6});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
Pointwise(FloatingPointEq(),
{74., 80., 86., 92., 173., 188., 203., 218.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_BatchSizeTwo) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 2, 3}},
{TensorType_FLOAT32, {2, 3, 4}});
model.PopulateTensor<float>(model.lhs(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
Pointwise(FloatingPointEq(),
{74., 80., 86., 92., 173., 188., 203., 218., 560., 584.,
608., 632., 767., 800., 833., 866.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Broadcast) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 2, 3}},
{TensorType_FLOAT32, {3, 4}});
model.PopulateTensor<float>(model.lhs(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
Pointwise(FloatingPointEq(),
{74., 80., 86., 92., 173., 188., 203., 218., 272., 296.,
320., 344., 371., 404., 437., 470.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_BroadcastLHSAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 3, 2}},
{TensorType_FLOAT32, {3, 4}}, true, false);
model.PopulateTensor<float>(model.lhs(),
{1, 4, 2, 5, 3, 6, 7, 10, 8, 11, 9, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
Pointwise(FloatingPointEq(),
{74., 80., 86., 92., 173., 188., 203., 218., 272., 296.,
320., 344., 371., 404., 437., 470.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Broadcast2) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 1, 3, 2}},
{TensorType_FLOAT32, {3, 2, 4}});
model.PopulateTensor<float>(model.lhs(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
Pointwise(
FloatingPointEq(),
{29., 32., 35., 38., 65., 72., 79., 86., 101., 112., 123.,
134., 53., 56., 59., 62., 121., 128., 135., 142., 189., 200.,
211., 222., 77., 80., 83., 86., 177., 184., 191., 198., 277.,
288., 299., 310., 137., 152., 167., 182., 173., 192., 211., 230.,
209., 232., 255., 278., 257., 272., 287., 302., 325., 344., 363.,
382., 393., 416., 439., 462., 377., 392., 407., 422., 477., 496.,
515., 534., 577., 600., 623., 646.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 3, 3, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Broadcast2LHSAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 1, 2, 3}},
{TensorType_FLOAT32, {3, 2, 4}}, true, false);
model.PopulateTensor<float>(model.lhs(),
{1, 3, 5, 2, 4, 6, 7, 9, 11, 8, 10, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
Pointwise(
FloatingPointEq(),
{29., 32., 35., 38., 65., 72., 79., 86., 101., 112., 123.,
134., 53., 56., 59., 62., 121., 128., 135., 142., 189., 200.,
211., 222., 77., 80., 83., 86., 177., 184., 191., 198., 277.,
288., 299., 310., 137., 152., 167., 182., 173., 192., 211., 230.,
209., 232., 255., 278., 257., 272., 287., 302., 325., 344., 363.,
382., 393., 416., 439., 462., 377., 392., 407., 422., 477., 496.,
515., 534., 577., 600., 623., 646.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 3, 3, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Broadcast2RHSAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 1, 3, 2}},
{TensorType_FLOAT32, {3, 4, 2}}, false, true);
model.PopulateTensor<float>(model.lhs(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 11, 8, 12, 9, 13, 10, 14, 15, 19, 16, 20,
17, 21, 18, 22, 23, 27, 24, 28, 25, 29, 26, 30});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
Pointwise(
FloatingPointEq(),
{29., 32., 35., 38., 65., 72., 79., 86., 101., 112., 123.,
134., 53., 56., 59., 62., 121., 128., 135., 142., 189., 200.,
211., 222., 77., 80., 83., 86., 177., 184., 191., 198., 277.,
288., 299., 310., 137., 152., 167., 182., 173., 192., 211., 230.,
209., 232., 255., 278., 257., 272., 287., 302., 325., 344., 363.,
382., 393., 416., 439., 462., 377., 392., 407., 422., 477., 496.,
515., 534., 577., 600., 623., 646.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 3, 3, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Broadcast2BothAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 1, 2, 3}},
{TensorType_FLOAT32, {3, 4, 2}}, true, true);
model.PopulateTensor<float>(model.lhs(),
{1, 3, 5, 2, 4, 6, 7, 9, 11, 8, 10, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 11, 8, 12, 9, 13, 10, 14, 15, 19, 16, 20,
17, 21, 18, 22, 23, 27, 24, 28, 25, 29, 26, 30});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
Pointwise(
FloatingPointEq(),
{29., 32., 35., 38., 65., 72., 79., 86., 101., 112., 123.,
134., 53., 56., 59., 62., 121., 128., 135., 142., 189., 200.,
211., 222., 77., 80., 83., 86., 177., 184., 191., 198., 277.,
288., 299., 310., 137., 152., 167., 182., 173., 192., 211., 230.,
209., 232., 255., 278., 257., 272., 287., 302., 325., 344., 363.,
382., 393., 416., 439., 462., 377., 392., 407., 422., 477., 496.,
515., 534., 577., 600., 623., 646.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 3, 3, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_BroadcastFromRHS) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {4, 5}},
{TensorType_FLOAT32, {3, 1, 5, 2}});
model.PopulateTensor<float>(
model.lhs(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
model.PopulateTensor<float>(
model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
Pointwise(FloatingPointEq(),
{185., 200., 460., 500., 735., 800., 1010., 1100.,
335., 350., 860., 900., 1385., 1450., 1910., 2000.,
485., 500., 1260., 1300., 2035., 2100., 2810., 2900.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 1, 4, 2}));
}
INSTANTIATE_TEST_SUITE_P(
BatchMatMulOpTest, BatchMatMulOpTest,
::testing::ValuesIn(SingleOpTest::GetKernelTags(*kKernelMap)));
class ConstRHSBatchMatMulOpModel : public MultiOpModel {
public:
ConstRHSBatchMatMulOpModel(const TensorData& lhs,
std::initializer_list<int> rhs_shape,
std::initializer_list<float> rhs_data,
bool adj_x = false, bool adj_y = false) {
lhs_id_ = AddInput(lhs);
rhs_id_ = AddConstInput<float>(TensorType_FLOAT32, rhs_data, rhs_shape);
matmul_output_id_ = AddOutput(lhs.type);
std::vector<int> matmul_inputs{lhs_id_, rhs_id_};
std::vector<int> matmul_outputs{matmul_output_id_};
AddBuiltinOp(BuiltinOperator_BATCH_MATMUL,
BuiltinOptions_BatchMatMulOptions,
CreateBatchMatMulOptions(builder_, adj_x, adj_y).Union(),
matmul_inputs, matmul_outputs);
neg_output_id_ = AddOutput(lhs.type);
std::vector<int> neg_inputs{matmul_output_id_};
std::vector<int> neg_outputs{neg_output_id_};
AddBuiltinOp(BuiltinOperator_NEG, BuiltinOptions_NegOptions,
CreateNegOptions(builder_).Union(), neg_inputs, neg_outputs);
BuildInterpreter({GetShape(lhs_id_), GetShape(rhs_id_)});
}
int lhs() const { return lhs_id_; }
std::vector<float> GetOutput() {
return ExtractVector<float>(neg_output_id_);
}
std::vector<int32_t> GetOutputShape() {
return GetTensorShape(neg_output_id_);
}
protected:
int lhs_id_;
int rhs_id_;
int matmul_output_id_;
int neg_output_id_;
};
TEST(ConstRHSBatchMatMulOpModel, RHSNotAdjoint) {
ConstRHSBatchMatMulOpModel model({TensorType_FLOAT32, {1, 6, 2}}, {2, 3},
{6, 3, 7, 4, 6, 9});
model.PopulateTensor<float>(model.lhs(),
{6, 3, 7, 4, 6, 9, 2, 6, 7, 4, 3, 7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({-48, -36, -69, -58, -45, -85, -72, -72, -123,
-36, -42, -68, -58, -45, -85, -46, -51, -84}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 6, 3}));
model.PopulateTensor<float>(model.lhs(),
{6, 3, 7, 4, 6, 9, 2, 6, 7, 4, 3, 7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({-48, -36, -69, -58, -45, -85, -72, -72, -123,
-36, -42, -68, -58, -45, -85, -46, -51, -84}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 6, 3}));
}
class HybridBatchMatMulOpModel : public SingleOpModel {
public:
HybridBatchMatMulOpModel(int units, int batches, const TensorData& lhs,
const TensorData& rhs,
const TensorData& output = {TensorType_FLOAT32},
bool asymmetric_quantize_inputs = true,
bool adj_x = false, bool adj_y = false)
: units_(units), batches_(batches) {
int total_input_size = 1;
for (size_t i = 0; i < lhs.shape.size(); ++i) {
total_input_size *= lhs.shape[i];
}
input_size_ = total_input_size / batches_;
lhs_id_ = AddInput(lhs);
rhs_id_ = AddInput(rhs);
output_id_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_BATCH_MATMUL,
BuiltinOptions_BatchMatMulOptions,
CreateBatchMatMulOptions(builder_, adj_x, adj_y,
asymmetric_quantize_inputs)
.Union());
BuildInterpreter({GetShape(lhs_id_), GetShape(rhs_id_)},
-1,
false,
false);
}
void SetWeights(const std::vector<float>& data) {
SymmetricQuantizeAndPopulate(rhs_id_, data);
AllocateAndDelegate(true);
}
void SetSignedWeights(std::initializer_list<float> f) {
SignedSymmetricQuantizeAndPopulate(rhs_id_, f);
AllocateAndDelegate(true);
}
void SetInput(const std::vector<float>& f) { PopulateTensor(lhs_id_, f); }
std::vector<float> GetOutput() { return ExtractVector<float>(output_id_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_id_); }
int input_size() { return input_size_; }
int num_units() { return units_; }
int num_batches() { return batches_; }
int lhs() const { return lhs_id_; }
int rhs() const { return rhs_id_; }
protected:
int lhs_id_;
int rhs_id_;
int output_id_;
int units_;
int batches_;
int input_size_;
};
class HybridAsymmetricBatchMatMulOpTest : public SingleOpTest {
protected:
const std::map<string, TfLiteRegistration*>& GetKernelMap() override {
return *kKernelMap;
}
};
TEST_P(HybridAsymmetricBatchMatMulOpTest, SimpleTestQuantizedInt8) {
HybridBatchMatMulOpModel m(
3, 2,
{TensorType_FLOAT32, {2, 10}},
{TensorType_INT8, {10, 3}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
});
m.SetInput({
11, 12, 13, 14, 15, 16, 17, 18, -19, -20,
11, 12, 13, 14, 15, 16, 17, -18, 19, -20,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
193,
193,
193,
247,
247,
247,
},
3.f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest, MultipleNumBatchQuantizedInt8) {
HybridBatchMatMulOpModel m(
10, 4,
{TensorType_FLOAT32, {1, 2, 2, 3}},
{TensorType_INT8, {3, 10}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
});
m.SetInput({
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{
73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
},
0.64f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2, 2, 10}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest, RegressionTestQuantizedInt8) {
HybridBatchMatMulOpModel m(
10, 2,
{TensorType_FLOAT32, {2, 3}},
{TensorType_INT8, {3, 10}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
});
m.SetInput({
11, 12, 13,
11, 12, 13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
},
0.64f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 10}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest,
TestQuantizedInt8BatchesAndUnitsGreaterThanAccumDimSize) {
HybridBatchMatMulOpModel m(
8, 6,
{TensorType_FLOAT32, {6, 3}},
{TensorType_INT8, {3, 8}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights(
{1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3});
m.SetInput({
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74},
0.15f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 8}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest,
TestQuantizedInt8BatchesAndUnitsGreaterThanAccumDimSizeAdjX) {
HybridBatchMatMulOpModel m(
8, 6,
{TensorType_FLOAT32, {3, 6}},
{TensorType_INT8, {3, 8}, 0, 0, 10.0 / 127.0, 0},
{TensorType_FLOAT32},
true,
true);
m.SetSignedWeights(
{1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3});
m.SetInput(
{11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74},
0.15f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 8}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest,
TestQuantizedInt8BatchesAndUnitsGreaterThanAccumDimSizeAdjY) {
HybridBatchMatMulOpModel m(
8, 6,
{TensorType_FLOAT32, {6, 3}},
{TensorType_INT8, {8, 3}, 0, 0, 10.0 / 127.0, 0},
{TensorType_FLOAT32},
true,
false,
true);
m.SetSignedWeights(
{1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3});
m.SetInput({
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74},
0.15f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 8}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest,
TestQuantizedInt8BatchesAndUnitsGreaterThanAccumDimSizeAdjXAdjY) {
HybridBatchMatMulOpModel m(
8, 6,
{TensorType_FLOAT32, {3, 6}},
{TensorType_INT8, {8, 3}, 0, 0, 10.0 / 127.0, 0},
{TensorType_FLOAT32},
true,
true,
true);
m.SetSignedWeights(
{1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3});
m.SetInput(
{11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74},
0.15f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 8}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest, QuantizedInt8BroadcastWeights) {
HybridBatchMatMulOpModel m(
3, 2,
{TensorType_FLOAT32, {2, 2, 10}},
{TensorType_INT8, {10, 3}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
11, 12, 13, 14, 15, 16, 17, 18, -19, -20,
11, 12, 13, 14, 15, 16, 17, -18, 19, -20,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
23, 23, 23,
57, 57, 57,
193, 193, 193,
247, 247, 247,
},
3.f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 3}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest, QuantizedInt8BroadcastBigWeights) {
HybridBatchMatMulOpModel m(
9, 2,
{TensorType_FLOAT32, {2, 2, 10}},
{TensorType_INT8, {10, 9}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, 1, 1, 17, 17, 17, 26, 26, 26,
2, 2, 2, 18, 18, 18, 27, 27, 27,
3, 3, 3, 19, 19, 19, 28, 28, 28,
4, 4, 4, 20, 20, 20, 29, 29, 29,
5, 5, 5, 21, 21, 21, 30, 30, 30,
6, 6, 6, 22, 22, 22, 31, 31, 31,
7, 7, 7, 23, 23, 23, 32, 32, 32,
8, 8, 8, 24, 24, 24, 33, 33, 33,
9, 9, 9, 25, 25, 25, 34, 34, 34,
10, 10, 10, 26, 26, 26, 35, 35, 35,
});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
11, 12, 13, 14, 15, 16, 17, 18, -19, -20,
11, 12, 13, 14, 15, 16, 17, -18, 19, -20,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{
23, 23, 23, 295, 295, 295, 448, 448, 448,
57, 57, 57, 361, 361, 361, 532, 532, 532,
193, 193, 193, 1425, 1425, 1425, 2118, 2118, 2118,
247, 247, 247, 1511, 1511, 1511, 2222, 2222, 2222
},
10.0f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 9}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest, QuantizedInt8BroadcastInputs) {
HybridBatchMatMulOpModel m(
3, 2,
{TensorType_FLOAT32, {2, 10}},
{TensorType_INT8, {2, 10, 3}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, -3, 1,
2, -2, 2,
3, -1, 3,
4, 0, 4,
5, 1, 5,
6, 2, 6,
7, 3, 7,
8, 4, 8,
9, 5, 9,
10, 6, 10,
1, 1, 1,
2, 2, 2,
3, 3, 3,
4, 4, 4,
5, 5, 5,
6, 6, 6,
7, 7, 7,
8, 8, 8,
9, 9, 9,
10, 10, 10,
});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
23, -45, 23,
57, -19, 57,
23, 23, 23,
57, 57, 57,
},
1.5f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 3}));
}
INSTANTIATE_TEST_SUITE_P(
HybridAsymmetricBatchMatMulOpTest, HybridAsymmetricBatchMatMulOpTest,
::testing::ValuesIn(SingleOpTest::GetKernelTags(*kKernelMap)));
class HybridSymmetricBatchMatMulOpTest : public SingleOpTest {
protected:
const std::map<string, TfLiteRegistration*>& GetKernelMap() override {
return *kKernelMap;
}
};
TEST_P(HybridSymmetricBatchMatMulOpTest, SimpleTestQuantizedInt8) {
HybridBatchMatMulOpModel m(
3, 2,
{TensorType_FLOAT32, {2, 10}},
{TensorType_INT8, {10, 3}, 0, 0, 10.0 / 127.0, 0},
{TensorType_FLOAT32}, false);
m.SetSignedWeights({
1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
});
m.SetInput({
11, 12, 13, 14, 15, 16, 17, 18, -19, -20,
11, 12, 13, 14, 15, 16, 17, -18, 19, -20,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
193,
193,
193,
247,
247,
247,
},
1.5f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3}));
}
TEST_P(HybridSymmetricBatchMatMulOpTest, QuantizedInt8BroadcastWeights) {
HybridBatchMatMulOpModel m(
3, 2,
{TensorType_FLOAT32, {2, 2, 10}},
{TensorType_INT8, {10, 3}, 0, 0, 10.0 / 127.0, 0},
{TensorType_FLOAT32}, false);
m.SetSignedWeights({
1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
11, 12, 13, 14, 15, 16, 17, 18, -19, -20,
11, 12, 13, 14, 15, 16, 17, -18, 19, -20,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
23, 23, 23,
57, 57, 57,
193, 193, 193,
247, 247, 247,
},
1.5f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 3}));
}
TEST_P(HybridSymmetricBatchMatMulOpTest, QuantizedInt8BroadcastBigWeights) {
HybridBatchMatMulOpModel m(
9, 2,
{TensorType_FLOAT32, {2, 2, 10}},
{TensorType_INT8, {10, 9}, 0, 0, 10.0 / 127.0, 0},
{TensorType_FLOAT32}, false);
m.SetSignedWeights({
1, 1, 1, 17, 17, 17, 26, 26, 26,
2, 2, 2, 18, 18, 18, 27, 27, 27,
3, 3, 3, 19, 19, 19, 28, 28, 28,
4, 4, 4, 20, 20, 20, 29, 29, 29,
5, 5, 5, 21, 21, 21, 30, 30, 30,
6, 6, 6, 22, 22, 22, 31, 31, 31,
7, 7, 7, 23, 23, 23, 32, 32, 32,
8, 8, 8, 24, 24, 24, 33, 33, 33,
9, 9, 9, 25, 25, 25, 34, 34, 34,
10, 10, 10, 26, 26, 26, 35, 35, 35,
});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
11, 12, 13, 14, 15, 16, 17, 18, -19, -20,
11, 12, 13, 14, 15, 16, 17, -18, 19, -20,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{
23, 23, 23, 295, 295, 295, 448, 448, 448,
57, 57, 57, 361, 361, 361, 532, 532, 532,
193, 193, 193, 1425, 1425, 1425, 2118, 2118, 2118,
247, 247, 247, 1511, 1511, 1511, 2222, 2222, 2222
},
10.0f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 9}));
}
TEST_P(HybridSymmetricBatchMatMulOpTest, QuantizedInt8BroadcastInputs) {
HybridBatchMatMulOpModel m(
3, 2,
{TensorType_FLOAT32, {2, 10}},
{TensorType_INT8, {2, 10, 3}, 0, 0, 10.0 / 127.0, 0},
{TensorType_FLOAT32}, false);
m.SetSignedWeights({
1, -3, 1,
2, -2, 2,
3, -1, 3,
4, 0, 4,
5, 1, 5,
6, 2, 6,
7, 3, 7,
8, 4, 8,
9, 5, 9,
10, 6, 10,
1, 1, 1,
2, 2, 2,
3, 3, 3,
4, 4, 4,
5, 5, 5,
6, 6, 6,
7, 7, 7,
8, 8, 8,
9, 9, 9,
10, 10, 10,
});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
23, -45, 23,
57, -19, 57,
23, 23, 23,
57, 57, 57,
},
1.5f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 3}));
}
INSTANTIATE_TEST_SUITE_P(
HybridSymmetricBatchMatMulOpTest, HybridSymmetricBatchMatMulOpTest,
::testing::ValuesIn(SingleOpTest::GetKernelTags(*kKernelMap)));
class QuantizedBatchMatMulOpModel : public SingleOpModel {
public:
QuantizedBatchMatMulOpModel(int units, int batches, const TensorData& lhs,
const TensorData& output = {TensorType_INT8},
bool adj_x = false, bool adj_y = false)
: units_(units), batches_(batches) {
int total_input_size = 1;
for (size_t i = 0; i < lhs.shape.size(); ++i) {
total_input_size *= lhs.shape[i];
}
input_size_ = total_input_size / batches_;
int rhs_batch_size = adj_y ? units_ : input_size_;
int rhs_channels = adj_y ? input_size_ : units_;
lhs_id_ = AddInput(lhs);
rhs_id_ = AddInput({lhs.type,
{rhs_batch_size, rhs_channels},
0,
0,
GetScale(lhs_id_),
GetZeroPoint(lhs_id_)});
output_id_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_BATCH_MATMUL,
BuiltinOptions_BatchMatMulOptions,
CreateBatchMatMulOptions(builder_, adj_x, adj_y).Union());
BuildInterpreter({GetShape(lhs_id_), GetShape(rhs_id_)});
}
template <typename T>
void SetWeights(const std::vector<float>& data) {
QuantizeAndPopulate<T>(rhs_id_, data);
}
template <typename T>
void SetInput(const std::vector<float>& data) {
QuantizeAndPopulate<T>(lhs_id_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_id_);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_id_), GetScale(output_id_),
GetZeroPoint(output_id_));
}
protected:
int lhs_id_;
int rhs_id_;
int output_id_;
int units_;
int batches_;
int input_size_;
};
class QuantizedBatchMatMulOpTest : public SingleOpTest {
protected:
const std::map<string, TfLiteRegistration*>& GetKernelMap() override {
return *kKernelMap;
}
};
TEST_P(QuantizedBatchMatMulOpTest, SimpleTestQuantizedInt8) {
QuantizedBatchMatMulOpModel m(
3, 2,
{TensorType_INT8, {2, 10}, -63.5, 64},
{TensorType_INT8, {}, -127, 128});
m.SetWeights<int8_t>({
1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
});
m.SetInput<int8_t>({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({23, 23, 23, 57, 57, 57})));
EXPECT_THAT(m.GetOutput<int8_t>(), ElementsAre(22, 22, 22, 56, 56, 56));
}
TEST_P(QuantizedBatchMatMulOpTest, SimpleTestQuantizedInt8AdjRHS) {
QuantizedBatchMatMulOpModel m(
3, 2,
{TensorType_INT8, {2, 10}, -63.5, 64},
{TensorType_INT8, {}, -127, 128}, false, true);
m.SetWeights<int8_t>({
1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
});
m.SetInput<int8_t>({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({14, 65, 128, 20, 95, 128})));
EXPECT_THAT(m.GetOutput<int8_t>(), ElementsAre(13, 64, 127, 19, 94, 127));
}
TEST_P(QuantizedBatchMatMulOpTest, SimpleTestQuantizedInt16) {
const float inputs_scale = 10.0 / std::numeric_limits<int16_t>::max();
const float output_scale = 1.0;
const int32_t zero_point = 0;
QuantizedBatchMatMulOpModel m(
3, 2,
{TensorType_INT16, {2, 10}, 0, 0, inputs_scale, zero_point},
{TensorType_INT16, {}, 0, 0, output_scale, zero_point});
m.SetWeights<int16_t>({
1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
});
m.SetInput<int16_t>({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDequantizedOutput<int16_t>(),
ElementsAreArray(ArrayFloatNear({23, 23, 23, 57, 57, 57})));
EXPECT_THAT(m.GetOutput<int16_t>(), ElementsAre(23, 23, 23, 57, 57, 57));
}
INSTANTIATE_TEST_SUITE_P(
QuantizedBatchMatMulOpTest, QuantizedBatchMatMulOpTest,
::testing::ValuesIn(SingleOpTest::GetKernelTags(*kKernelMap)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/batch_matmul.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/batch_matmul_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
31560376-54b1-4b76-9afc-50948117b640 | cpp | tensorflow/tensorflow | devicedb | tensorflow/lite/experimental/acceleration/compatibility/devicedb.cc | tensorflow/lite/experimental/acceleration/compatibility/devicedb_test.cc | #include "tensorflow/lite/experimental/acceleration/compatibility/devicedb.h"
#include <map>
#include <string>
#include <vector>
#include "tensorflow/lite/experimental/acceleration/compatibility/database_generated.h"
namespace tflite {
namespace acceleration {
namespace {
std::vector<const DeviceDecisionTreeEdge*> Find(
const DeviceDecisionTreeNode* root, const std::string& value) {
std::vector<const DeviceDecisionTreeEdge*> found;
if (root->comparison() == Comparison_EQUAL) {
const DeviceDecisionTreeEdge* possible =
root->items()->LookupByKey(value.c_str());
if (possible) {
found.push_back(possible);
}
} else {
for (const DeviceDecisionTreeEdge* item : *(root->items())) {
if ((root->comparison() == Comparison_MINIMUM)
? value >= item->value()->str()
: value <= item->value()->str()) {
found.push_back(item);
}
}
}
return found;
}
void UpdateVariablesFromDeviceDecisionTreeEdges(
std::map<std::string, std::string>* variable_values,
const DeviceDecisionTreeEdge& item) {
if (item.derived_properties()) {
for (const DerivedProperty* p : *(item.derived_properties())) {
(*variable_values)[p->variable()->str()] = p->value()->str();
}
}
}
void Follow(const DeviceDecisionTreeNode* root,
std::map<std::string, std::string>* variable_values) {
if (!root->variable()) {
return;
}
auto possible_value = variable_values->find(root->variable()->str());
if (possible_value == variable_values->end()) {
return;
}
std::vector<const DeviceDecisionTreeEdge*> edges =
Find(root, possible_value->second);
for (const DeviceDecisionTreeEdge* edge : edges) {
UpdateVariablesFromDeviceDecisionTreeEdges(variable_values, *edge);
if (edge->children()) {
for (const DeviceDecisionTreeNode* root : *(edge->children())) {
Follow(root, variable_values);
}
}
}
}
}
void UpdateVariablesFromDatabase(
std::map<std::string, std::string>* variable_values,
const DeviceDatabase& database) {
if (!database.root()) return;
for (const DeviceDecisionTreeNode* root : *(database.root())) {
Follow(root, variable_values);
}
}
}
} | #include "tensorflow/lite/experimental/acceleration/compatibility/devicedb.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/devicedb-sample.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/variables.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace acceleration {
namespace {
class DeviceDbTest : public ::testing::Test {
protected:
void LoadSample() {
device_db_ = flatbuffers::GetRoot<DeviceDatabase>(
g_tflite_acceleration_devicedb_sample_binary);
}
const DeviceDatabase* device_db_ = nullptr;
};
TEST_F(DeviceDbTest, Load) {
LoadSample();
ASSERT_TRUE(device_db_);
ASSERT_TRUE(device_db_->root());
EXPECT_EQ(device_db_->root()->size(), 4);
}
TEST_F(DeviceDbTest, SocLookup) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
variables[kDeviceModel] = "m712c";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[kSoCModel], "exynos_7872");
variables.clear();
variables[kDeviceModel] = "sc_02l";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[kSoCModel], "exynos_7885");
variables.clear();
variables[kDeviceModel] = "nosuch";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(kSoCModel), variables.end());
}
TEST_F(DeviceDbTest, StatusLookupWithSoC) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
variables[kOpenGLESVersion] = "3.1";
variables[kSoCModel] = "exynos_7872";
variables[kAndroidSdkVersion] = "24";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
variables[kOpenGLESVersion] = "3.0";
variables.erase(variables.find(gpu::kStatus));
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(gpu::kStatus), variables.end());
variables.clear();
variables[kOpenGLESVersion] = "3.1";
variables[kSoCModel] = "exynos_7883";
variables[kAndroidSdkVersion] = "24";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(gpu::kStatus), variables.end());
variables[kAndroidSdkVersion] = "29";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
}
TEST_F(DeviceDbTest, StatusLookupWithDevice) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
variables[kAndroidSdkVersion] = "24";
variables[kDeviceModel] = "sm_j810f";
variables[kDeviceName] = "j8y18lte";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusUnsupported);
variables.clear();
variables[kAndroidSdkVersion] = "24";
variables[kDeviceModel] = "sm_j810m";
variables[kDeviceName] = "j8y18lte";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
}
TEST_F(DeviceDbTest, StatusLookupBasedOnDerivedProperties) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
variables[kOpenGLESVersion] = "3.1";
variables[kAndroidSdkVersion] = "24";
variables[kDeviceModel] = "m712c";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
}
TEST_F(DeviceDbTest, StatusLookupWithMaximumComparison) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
variables[kDeviceModel] = "shiraz_ag_2011";
variables[kAndroidSdkVersion] = "28";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusUnsupported);
variables[kAndroidSdkVersion] = "27";
variables.erase(variables.find(gpu::kStatus));
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusUnsupported);
variables[kAndroidSdkVersion] = "29";
variables.erase(variables.find(gpu::kStatus));
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(gpu::kStatus), variables.end());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/compatibility/devicedb.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/compatibility/devicedb_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
86b85201-3336-4062-a4b9-5007b589da3e | cpp | tensorflow/tensorflow | index_domain | third_party/xla/xla/python/ifrt/index_domain.cc | third_party/xla/xla/python/ifrt/index_domain_test.cc | #include "xla/python/ifrt/index_domain.h"
#include <ostream>
#include <string>
#include "absl/strings/str_cat.h"
namespace xla {
namespace ifrt {
std::string IndexDomain::DebugString() const {
return absl::StrCat("IndexDomain(origin=", origin_.DebugString(),
",shape=", shape_.DebugString(), ")");
}
std::ostream& operator<<(std::ostream& os, const IndexDomain& index_domain) {
return os << index_domain.DebugString();
}
}
} | #include "xla/python/ifrt/index_domain.h"
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/hash/hash_testing.h"
#include "xla/python/ifrt/index.h"
#include "xla/python/ifrt/shape.h"
namespace xla {
namespace ifrt {
namespace {
TEST(IndexDomainTest, Construction) {
IndexDomain a(Index({1, 2}), Shape({3, 4}));
EXPECT_EQ(a.origin(), Index({1, 2}));
EXPECT_EQ(a.shape(), Shape({3, 4}));
IndexDomain b(Shape({3, 4}));
EXPECT_EQ(b.origin(), Index({0, 0}));
EXPECT_EQ(b.shape(), Shape({3, 4}));
}
TEST(IndexDomainTest, Operations) {
IndexDomain a(Index({1, 2}), Shape({3, 4}));
Index b({1, 2});
EXPECT_EQ(a + b, IndexDomain(Index({2, 4}), Shape({3, 4})));
{
IndexDomain c = a;
EXPECT_EQ(c += b, IndexDomain(Index({2, 4}), Shape({3, 4})));
}
EXPECT_EQ(a - b, IndexDomain(Index({0, 0}), Shape({3, 4})));
{
IndexDomain c = a;
EXPECT_EQ(c -= b, IndexDomain(Index({0, 0}), Shape({3, 4})));
}
}
TEST(IndexDomainTest, Hash) {
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
{IndexDomain(Index({1, 2}), Shape({3, 4})),
IndexDomain(Index({1, 2}), Shape({4, 3})),
IndexDomain(Index({2, 1}), Shape({3, 4})),
IndexDomain(Index({2, 1}), Shape({4, 3}))}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/index_domain.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/index_domain_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c26e3140-8b23-45b0-b07c-7702938d3de1 | cpp | tensorflow/tensorflow | rewrite_dataset_op | tensorflow/core/kernels/data/rewrite_dataset_op.cc | tensorflow/core/kernels/data/rewrite_dataset_op_test.cc | #include "tensorflow/core/kernels/data/rewrite_dataset_op.h"
#if !defined(IS_MOBILE_PLATFORM)
#include <map>
#include <string>
#include "tensorflow/core/data/rewrite_utils.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace data {
constexpr const char* const RewriteDatasetOp::kDatasetType;
constexpr const char* const RewriteDatasetOp::kInputDataset;
constexpr const char* const RewriteDatasetOp::kRewriteName;
constexpr const char* const RewriteDatasetOp::kOutputTypes;
constexpr const char* const RewriteDatasetOp::kOutputShapes;
RewriteDatasetOp::RewriteDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void RewriteDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
tstring rewrite_name;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kRewriteName, &rewrite_name));
auto config_factory = [rewrite_name]() {
RewriterConfig rewriter_config;
rewriter_config.add_optimizers(std::string(rewrite_name));
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE);
rewriter_config.set_fail_on_optimizer_errors(true);
return rewriter_config;
};
core::RefCountPtr<DatasetBase> rewritten;
OP_REQUIRES_OK(ctx, RewriteDataset(ctx, input, std::move(config_factory),
false, &rewritten));
*output = rewritten.release();
}
namespace {
REGISTER_KERNEL_BUILDER(Name("RewriteDataset").Device(DEVICE_CPU),
RewriteDatasetOp);
}
}
}
#else
namespace tensorflow {
namespace data {
RewriteDatasetOp::RewriteDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void RewriteDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
input->Ref();
*output = input;
}
namespace {
REGISTER_KERNEL_BUILDER(Name("RewriteDataset").Device(DEVICE_CPU),
RewriteDatasetOp);
}
}
}
#endif | #include "tensorflow/core/kernels/data/rewrite_dataset_op.h"
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "rewrite_dataset";
constexpr char kReplicateOnSplit[] = "replicate_on_split";
class RewriteDatasetParams : public DatasetParams {
public:
template <typename T>
RewriteDatasetParams(T input_dataset_params, string rewrite_name,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
rewrite_name_(rewrite_name) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<tstring>(TensorShape({}), {rewrite_name_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
*input_names = {RewriteDatasetOp::kInputDataset,
RewriteDatasetOp::kRewriteName};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
return absl::OkStatus();
}
string dataset_type() const override {
return RewriteDatasetOp::kDatasetType;
}
private:
string rewrite_name_;
};
class RewriteDatasetOpTest : public DatasetOpsTestBase {};
TEST_F(RewriteDatasetOpTest, ReplicateOnSplit) {
auto range_dataset_params = RangeDatasetParams(0, 5, 1);
auto rewrite_dataset_params =
RewriteDatasetParams(std::move(range_dataset_params),
kReplicateOnSplit,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
std::vector<Tensor> expected_outputs =
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}, {4}});
TF_ASSERT_OK(Initialize(rewrite_dataset_params));
TF_EXPECT_OK(CheckIteratorGetNext(expected_outputs, true));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/rewrite_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/rewrite_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f5e142f2-7a29-4a6e-950a-b8e0e50b01db | cpp | tensorflow/tensorflow | self_adjoint_eig | third_party/xla/xla/hlo/builder/lib/self_adjoint_eig.cc | third_party/xla/xla/hlo/builder/lib/self_adjoint_eig_test.cc | #include "xla/hlo/builder/lib/self_adjoint_eig.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
SelfAdjointEigResult SelfAdjointEig(XlaOp a, bool lower, int64_t max_iter,
float tol, bool sort_eigenvalues) {
XlaBuilder* builder = a.builder();
XlaOp result = builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
const int64_t num_dims = a_shape.rank();
if (num_dims < 2) {
return InvalidArgument(
"Arguments to Eigen decomposition must have rank >= 2: got shape %s.",
a_shape.ToString());
}
PrimitiveType type = a_shape.element_type();
if (!primitive_util::IsFloatingPointType(type) &&
!primitive_util::IsComplexType(type)) {
return InvalidArgument(
"Type of the input matrix must be floating point "
"or complex: got %s.",
a_shape.ToString());
}
const int64_t m = ShapeUtil::GetDimension(a_shape, -2);
const int64_t n = ShapeUtil::GetDimension(a_shape, -1);
if (m != n) {
return InvalidArgument(
"Arguments to symmetric eigendecomposition must be square matrices: "
"got shape (%d, %d).",
m, n);
}
const int num_batch_dims = a_shape.dimensions().size() - 2;
const std::vector<int64_t> batch_dims(
a_shape.dimensions().begin(),
a_shape.dimensions().begin() + num_batch_dims);
PrimitiveType eigvals_type =
primitive_util::IsComplexType(type)
? primitive_util::ComplexComponentType(type)
: type;
std::vector<int64_t> eigvals_dims = batch_dims;
eigvals_dims.push_back(m);
Shape eigh_shape = ShapeUtil::MakeTupleShape(
{a_shape, ShapeUtil::MakeShape(eigvals_type, eigvals_dims)});
std::string opaque =
absl::StrFormat("%d,%d,%d,%f", lower, sort_eigenvalues, max_iter, tol);
return CustomCall(a.builder(), "Eigh", {a}, eigh_shape, opaque);
});
return SelfAdjointEigResult{GetTupleElement(result, 0),
GetTupleElement(result, 1)};
}
} | #include "xla/hlo/builder/lib/self_adjoint_eig.h"
#include <algorithm>
#include <numeric>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/error_spec.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/math.h"
#include "xla/hlo/builder/lib/matrix.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
class SelfAdjointEigTest : public ClientLibraryTestBase {
protected:
void SetUp() override {
ClientLibraryTestBase::SetUp();
batch_3d_4x4_ = Array3D<float>{
{
{4, 6, 8, 10},
{6, 45, 54, 63},
{8, 54, 146, 166},
{10, 63, 166, 310},
},
{
{16, 24, 8, 12},
{24, 61, 82, 48},
{8, 82, 100, 6},
{12, 48, 6, 62},
},
};
matrix2d_8x8_ = Array2D<float>{
{14., 123., 49., 112., 115., 173., 182., 125.},
{123., 14., 60., 118., 150., 130., 91., 72.},
{49., 60., 138., 111., 106., 101., 115., 142.},
{112., 118., 111., 142., 91., 130., 25., 61.},
{115., 150., 106., 91., 116., 121., 128., 85.},
{173., 130., 101., 130., 121., 70., 151., 132.},
{182., 91., 115., 25., 128., 151., 66., 92.},
{125., 72., 142., 61., 85., 132., 92., 156.},
};
low_rank_4x4_ = Array2D<float>{
{2, 1, 4, 3},
{1, 5, 5, 9},
{4, 5, 10, 11},
{3, 9, 11, 17},
};
}
void TearDown() override { ClientLibraryTestBase::TearDown(); }
Array3D<float> GetUnitMatrix3D(const Array3D<float>& matrix) {
Array3D<float> result(matrix.n1(), matrix.n2(), matrix.n3(), 0.0);
for (int i = 0; i < matrix.n1(); ++i) {
for (int j = 0; j < matrix.n2(); ++j) {
result({i, j, j}) = 1.0;
}
}
return result;
}
Array3D<float> ExtractTriangularMatrix(const Array3D<float>& matrix,
bool lower) {
Array3D<float> result(matrix);
for (int i = 0; i < result.n1(); ++i) {
for (int j = 0; j < result.n2(); ++j) {
if (lower) {
for (int k = j + 1; k < result.n3(); ++k) {
result({i, j, k}) = 0.0;
}
} else {
for (int k = 0; k < j; ++k) {
result({i, j, k}) = 0.0;
}
}
}
}
return result;
}
Array3D<float> batch_3d_4x4_;
Array2D<float> matrix2d_8x8_;
Array2D<float> low_rank_4x4_;
Array2D<int> wrong_type_4x4_;
};
XlaOp GetAverageAbsoluteError(XlaOp m1, XlaOp m2, XlaBuilder* builder) {
Shape shape = builder->GetShape(m1).value();
int64_t size = ShapeUtil::ElementsIn(shape);
return ReduceAll(Abs(m1 - m2), ConstantR0WithType(builder, F32, 0),
CreateScalarAddComputation(F32, builder)) /
ConstantR0WithType(builder, F32, std::max<int64_t>(1, size));
}
XlaOp ComputeMatmulVWVt(SelfAdjointEigResult result, XlaBuilder* builder) {
Shape shape = builder->GetShape(result.v).value();
absl::Span<const int64_t> out_dims = shape.dimensions();
std::vector<int64_t> broadcast_dims(shape.rank() - 1);
std::iota(broadcast_dims.begin(), broadcast_dims.end(), 0);
broadcast_dims[shape.rank() - 2] = shape.rank() - 1;
auto vw =
Mul(result.v,
BroadcastInDim(ConvertElementType(result.w, shape.element_type()),
out_dims, broadcast_dims));
return BatchDot(vw, MaybeConjugate(TransposeInMinorDims(result.v), true),
PrecisionConfig::HIGHEST);
}
XLA_TEST_F(SelfAdjointEigTest, Test_VWVt_EQ_A_2x4x4) {
for (bool sort_eigenvalues : {false, true}) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR3Parameter<float>(batch_3d_4x4_, 0, "a", &builder, &a);
auto result = SelfAdjointEig(a, true, 15,
1e-5, sort_eigenvalues);
ComputeMatmulVWVt(result, &builder);
ComputeAndCompareR3<float>(&builder, batch_3d_4x4_, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
}
XLA_TEST_F(SelfAdjointEigTest, Test_VWVt_EQ_A_3x3_Complex) {
XlaBuilder builder(TestName());
Array<complex64> input = {
{1, complex64{2, -7}, complex64{4, -8}},
{complex64{2, 7}, 3, complex64{5, -9}},
{complex64{4, 8}, complex64{5, 9}, 6},
};
XlaOp a;
auto a_data = CreateParameter<complex64>(input, 0, "a", &builder, &a);
auto result = SelfAdjointEig(a);
ComputeMatmulVWVt(result, &builder);
ComputeAndCompare<complex64>(&builder, input, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SelfAdjointEigTest, Test_VWVt_EQ_A_Lower_2x4x4) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR3Parameter<float>(
ExtractTriangularMatrix(batch_3d_4x4_, true), 0, "a", &builder, &a);
auto result = SelfAdjointEig(a);
ComputeMatmulVWVt(result, &builder);
ComputeAndCompareR3<float>(&builder, batch_3d_4x4_, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SelfAdjointEigTest, Test_VWVt_EQ_A_Upper_2x4x4) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR3Parameter<float>(
ExtractTriangularMatrix(batch_3d_4x4_, false), 0, "a", &builder, &a);
auto result = SelfAdjointEig(a, false);
ComputeMatmulVWVt(result, &builder);
ComputeAndCompareR3<float>(&builder, batch_3d_4x4_, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SelfAdjointEigTest, Test_Orthogonality_2x4x4) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR3Parameter<float>(batch_3d_4x4_, 0, "a", &builder, &a);
auto result = SelfAdjointEig(a);
BatchDot(result.v, TransposeInMinorDims(result.v), PrecisionConfig::HIGHEST);
ComputeAndCompareR3<float>(&builder, GetUnitMatrix3D(batch_3d_4x4_),
{a_data.get()}, ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SelfAdjointEigTest, Test_VtWV_EQ_A_Rank_Deficient_4x4) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR2Parameter<float>(low_rank_4x4_, 0, "a", &builder, &a);
auto result = SelfAdjointEig(a);
ComputeMatmulVWVt(result, &builder);
ComputeAndCompareR2<float>(&builder, low_rank_4x4_, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SelfAdjointEigTest, Test_Eigen_8x8) {
XlaBuilder builder(TestName());
std::vector<float> expected{-182.69205, -116.86245, -105.74489, -9.545369,
37.81711, 104.732285, 120.29153, 868.00385};
XlaOp a;
auto a_data = CreateR2Parameter<float>(matrix2d_8x8_, 0, "a", &builder, &a);
auto result = SelfAdjointEig(a);
Add(result.w, ZerosLike(result.w));
ComputeAndCompareR1<float>(&builder, expected, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SelfAdjointEigTest, Test_Orthogonality_8x8) {
XlaBuilder builder(TestName());
float expected_vals = 1e-3;
XlaOp a;
auto a_data = CreateR2Parameter<float>(matrix2d_8x8_, 0, "a", &builder, &a);
auto result = SelfAdjointEig(a);
GetAverageAbsoluteError(IdentityMatrix(&builder, F32, 8, 8),
BatchDot(TransposeInMinorDims(result.v), result.v),
&builder);
ComputeAndCompareR0<float>(&builder, expected_vals, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SelfAdjointEigTest, Wrong_Type_Int) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR2Parameter<int>(wrong_type_4x4_, 0, "a", &builder, &a);
auto result = SelfAdjointEig(a);
EXPECT_FALSE(result.v.valid());
EXPECT_FALSE(result.w.valid());
}
Array2D<float> GenerateRandomSymmetricMatrix(int size) {
Array2D<float> result{size, size, 0.0};
result.FillRandom(10 , 2 , 12346 );
for (int i = 0; i < size; ++i) {
for (int j = 0; j < i; ++j) {
result({j, i}) = result({i, j});
}
}
return result;
}
using EighTestCase = int64_t;
class RandomEighTest : public ClientLibraryTestBase,
public ::testing::WithParamInterface<EighTestCase> {};
XLA_TEST_P(RandomEighTest, Random) {
XlaBuilder builder(TestName());
int64_t size = GetParam();
Array2D<float> a_val = GenerateRandomSymmetricMatrix(size);
XlaOp a;
auto a_data = CreateR2Parameter<float>(a_val, 0, "a", &builder, &a);
auto result = SelfAdjointEig(a);
GetAverageAbsoluteError(ComputeMatmulVWVt(result, &builder), a, &builder);
double kExpected = 0.00300000003;
ComputeAndCompareR0<float>(&builder, kExpected, {a_data.get()},
ErrorSpec(kExpected, 0));
}
#ifndef XLA_TEST_BACKEND_CPU
INSTANTIATE_TEST_SUITE_P(
RandomEighTestInstantiation, RandomEighTest,
::testing::Values(0, 1, 2, 3, 8, 16, 32, 77, 129, 203, 256, 257, 493, 511,
512,
513, 1000),
[](const ::testing::TestParamInfo<EighTestCase>& info) {
const int64_t size = info.param;
return absl::StrCat(size);
});
#else
INSTANTIATE_TEST_SUITE_P(
RandomEighTestInstantiation, RandomEighTest,
::testing::Values(0, 1, 2, 3, 8, 16, 32, 77, 129, 203, 256, 257, 493, 511,
512),
[](const ::testing::TestParamInfo<EighTestCase>& info) {
const int64_t size = info.param;
return absl::StrCat(size);
});
#endif
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/self_adjoint_eig.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/self_adjoint_eig_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ac09648e-4986-4aff-bf5f-113cdfb15936 | cpp | google/tensorstore | int4 | tensorstore/util/int4.h | tensorstore/util/int4_test.cc | #ifndef TENSORSTORE_UTIL_INT4_H_
#define TENSORSTORE_UTIL_INT4_H_
#include <cmath>
#include <cstdint>
#include <limits>
#include <type_traits>
#include <nlohmann/json_fwd.hpp>
namespace tensorstore {
class Int4Padded;
}
namespace std {
template <>
struct numeric_limits<::tensorstore::Int4Padded>;
}
namespace tensorstore {
namespace internal {
constexpr int8_t SignedTrunc4(int8_t x) {
return static_cast<int8_t>(static_cast<uint8_t>(x) << 4) >> 4;
}
}
class Int4Padded {
public:
constexpr Int4Padded() : rep_(0) {}
template <typename T,
typename = std::enable_if_t<std::is_convertible_v<T, int8_t>>>
constexpr explicit Int4Padded(T x)
: rep_(internal::SignedTrunc4(static_cast<int8_t>(x))) {}
constexpr operator int8_t() const {
return internal::SignedTrunc4(rep_);
}
Int4Padded& operator=(bool v) { return *this = static_cast<Int4Padded>(v); }
template <typename T>
std::enable_if_t<std::numeric_limits<T>::is_integer, Int4Padded&> operator=(
T v) {
return *this = static_cast<Int4Padded>(v);
}
#define TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(OP) \
friend Int4Padded operator OP(Int4Padded a, Int4Padded b) { \
return Int4Padded(a.rep_ OP b.rep_); \
} \
template <typename T> \
friend std::enable_if_t<std::numeric_limits<T>::is_integer, Int4Padded> \
operator OP(Int4Padded a, T b) { \
return Int4Padded(a.rep_ OP b); \
} \
template <typename T> \
friend std::enable_if_t<std::numeric_limits<T>::is_integer, Int4Padded> \
operator OP(T a, Int4Padded b) { \
return Int4Padded(a OP b.rep_); \
} \
#define TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(OP) \
friend Int4Padded& operator OP##=(Int4Padded& a, Int4Padded b) { \
return a = Int4Padded(a.rep_ OP b.rep_); \
} \
template <typename T> \
friend std::enable_if_t<std::numeric_limits<T>::is_integer, Int4Padded&> \
operator OP##=(Int4Padded& a, T b) { \
return a = Int4Padded(a.rep_ OP b); \
} \
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(+)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(+)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(-)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(-)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(*)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(*)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(/)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(/)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(%)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(%)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(&)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(&)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(|)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(|)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(^)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(^)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(<<)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(<<)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(>>)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(>>)
#undef TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP
#undef TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP
friend Int4Padded operator~(Int4Padded a) {
Int4Padded result;
result.rep_ = internal::SignedTrunc4(~a.rep_);
return result;
}
friend Int4Padded operator-(Int4Padded a) {
Int4Padded result;
result.rep_ = internal::SignedTrunc4(-a.rep_);
return result;
}
friend Int4Padded operator+(Int4Padded a) { return a; }
friend Int4Padded operator++(Int4Padded& a) {
a += Int4Padded(1);
return a;
}
friend Int4Padded operator--(Int4Padded& a) {
a -= Int4Padded(1);
return a;
}
friend Int4Padded operator++(Int4Padded& a, int) {
Int4Padded original_value = a;
++a;
return original_value;
}
friend Int4Padded operator--(Int4Padded& a, int) {
Int4Padded original_value = a;
--a;
return original_value;
}
template <template <typename U, typename V, typename... Args>
class ObjectType ,
template <typename U, typename... Args>
class ArrayType ,
class StringType , class BooleanType ,
class NumberIntegerType ,
class NumberUnsignedType ,
class NumberFloatType ,
template <typename U> class AllocatorType ,
template <typename T, typename SFINAE = void>
class JSONSerializer ,
class BinaryType >
friend void to_json(
::nlohmann::basic_json<ObjectType, ArrayType, StringType, BooleanType,
NumberIntegerType, NumberUnsignedType,
NumberFloatType, AllocatorType, JSONSerializer,
BinaryType>& j,
Int4Padded v) {
j = static_cast<NumberIntegerType>(v);
}
constexpr friend bool operator==(const Int4Padded& a, const Int4Padded& b) {
return internal::SignedTrunc4(a.rep_) == internal::SignedTrunc4(b.rep_);
}
constexpr friend bool operator!=(const Int4Padded& a, const Int4Padded& b) {
return !(a == b);
}
struct bitcast_construct_t {};
explicit constexpr Int4Padded(bitcast_construct_t, int8_t rep) : rep_(rep) {}
int8_t rep_;
};
inline Int4Padded abs(Int4Padded x) {
x.rep_ = internal::SignedTrunc4(::std::abs(x.rep_));
return x;
}
inline Int4Padded pow(Int4Padded x, Int4Padded y) {
return Int4Padded(std::pow(static_cast<int8_t>(x), static_cast<int8_t>(y)));
}
}
namespace std {
template <>
struct numeric_limits<tensorstore::Int4Padded> {
static constexpr bool is_specialized = true;
static constexpr bool is_signed = true;
static constexpr bool is_integer = true;
static constexpr bool is_exact = true;
static constexpr bool has_infinity = false;
static constexpr bool has_quiet_NaN = false;
static constexpr bool has_signaling_NaN = false;
static constexpr bool is_bounded = true;
static constexpr bool is_modulo = true;
static constexpr int digits = 3;
static constexpr int digits10 = 0;
static constexpr int max_digits10 = 0;
static constexpr int radix = 2;
static constexpr tensorstore::Int4Padded min() {
return tensorstore::Int4Padded(
tensorstore::Int4Padded::bitcast_construct_t{}, int8_t{-8});
}
static constexpr tensorstore::Int4Padded lowest() {
return tensorstore::Int4Padded(
tensorstore::Int4Padded::bitcast_construct_t{}, int8_t{-8});
}
static constexpr tensorstore::Int4Padded max() {
return tensorstore::Int4Padded(
tensorstore::Int4Padded::bitcast_construct_t{}, int8_t{7});
}
};
}
#endif | #include "tensorstore/util/int4.h"
#include <cmath>
#include <cstdint>
#include <cstring>
#include <limits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/casts.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json_gtest.h"
namespace {
using Int4 = tensorstore::Int4Padded;
Int4 Bitcast(int8_t x) { return absl::bit_cast<Int4>(x); }
constexpr std::pair<int8_t, Int4> kInt8ToInt4[] = {
{-10, Int4(6)}, {-9, Int4(7)}, {-8, Int4(-8)}, {-7, Int4(-7)},
{-6, Int4(-6)}, {-5, Int4(-5)}, {-4, Int4(-4)}, {-3, Int4(-3)},
{-2, Int4(-2)}, {-1, Int4(-1)}, {0, Int4(0)}, {1, Int4(1)},
{2, Int4(2)}, {3, Int4(3)}, {4, Int4(4)}, {5, Int4(5)},
{6, Int4(6)}, {7, Int4(7)}, {8, Int4(-8)}, {9, Int4(-7)},
{10, Int4(-6)},
};
constexpr std::pair<Int4, int8_t> kInt4ToInt8[] = {
{Int4(-8), -8}, {Int4(-7), -7}, {Int4(-6), -6}, {Int4(-5), -5},
{Int4(-4), -4}, {Int4(-3), -3}, {Int4(-2), -2}, {Int4(-1), -1},
{Int4(0), 0}, {Int4(1), 1}, {Int4(2), 2}, {Int4(3), 3},
{Int4(4), 4}, {Int4(5), 5}, {Int4(6), 6}, {Int4(7), 7},
};
TEST(Int4Test, Int8ToInt4) {
for (const auto& [i8, i4] : kInt8ToInt4) {
EXPECT_EQ(static_cast<Int4>(i8), i4);
}
}
TEST(Int4Test, Int4ToInt8) {
for (const auto& [i4, i8] : kInt4ToInt8) {
EXPECT_EQ(static_cast<int8_t>(i4), i8);
}
}
template <typename X>
void TestInt4ToXToInt4() {
for (const auto& [i4, i8] : kInt4ToInt8) {
EXPECT_EQ(static_cast<Int4>(static_cast<X>(i4)), i4);
}
}
TEST(Int4Test, Int4ToInt32ToInt4) { TestInt4ToXToInt4<int32_t>(); }
TEST(Int4Test, Int4ToFloatToInt4) { TestInt4ToXToInt4<float>(); }
TEST(Int4Test, Int4ToDoubleToInt4) { TestInt4ToXToInt4<double>(); }
TEST(Int4Test, Arithmetic) {
EXPECT_EQ(Int4(1) + Int4(2), Int4(3));
EXPECT_EQ(Int4(7) + Int4(2), Int4(-7));
EXPECT_EQ(Int4(3) - Int4(5), Int4(-2));
EXPECT_EQ(Int4(5) * Int4(-7), Int4(-3));
EXPECT_EQ(Int4(-8) / Int4(3), Int4(-2));
EXPECT_EQ(Int4(-7) % Int4(3), Int4(-1));
}
TEST(Int4Test, BitwiseBinary) {
EXPECT_EQ(Int4(0b0110) & Int4(0b1011), Int4(0b0010));
EXPECT_EQ(Int4(0b0110) | Int4(0b1011), Int4(0b1111));
EXPECT_EQ(Int4(0b0110) ^ Int4(0b1011), Int4(0b1101));
}
TEST(Int4Test, BitwiseUnaryInverse) {
EXPECT_EQ(~Int4(0b1011), Int4(0b0100));
EXPECT_EQ(~Int4(0b0110), Int4(0b1001));
}
TEST(Int4Test, BitwiseShift) {
EXPECT_EQ(Int4(0b0011) << Int4(0), Int4(0b0011));
EXPECT_EQ(Int4(0b0011) << Int4(1), Int4(0b0110));
EXPECT_EQ(Int4(0b0011) << Int4(2), Int4(0b1100));
EXPECT_EQ(Int4(0b0011) << Int4(3), Int4(0b1000));
EXPECT_EQ(Int4(0b0011) << Int4(4), Int4(0b0000));
EXPECT_EQ(Int4(0b0011) << Int4(5), Int4(0b0000));
EXPECT_EQ(Int4(0b0011) << int8_t{0}, Int4(0b0011));
EXPECT_EQ(Int4(0b0011) << int8_t{1}, Int4(0b0110));
EXPECT_EQ(Int4(0b0011) << int8_t{2}, Int4(0b1100));
EXPECT_EQ(Int4(0b0011) << int8_t{3}, Int4(0b1000));
EXPECT_EQ(Int4(0b0011) << int8_t{4}, Int4(0b0000));
EXPECT_EQ(Int4(0b0011) << int8_t{5}, Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> Int4(0), Int4(0b0100));
EXPECT_EQ(Int4(0b0100) >> Int4(1), Int4(0b0010));
EXPECT_EQ(Int4(0b0100) >> Int4(2), Int4(0b0001));
EXPECT_EQ(Int4(0b0100) >> Int4(3), Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> Int4(4), Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> Int4(5), Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> int8_t{0}, Int4(0b0100));
EXPECT_EQ(Int4(0b0100) >> int8_t{1}, Int4(0b0010));
EXPECT_EQ(Int4(0b0100) >> int8_t{2}, Int4(0b0001));
EXPECT_EQ(Int4(0b0100) >> int8_t{3}, Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> int8_t{4}, Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> int8_t{5}, Int4(0b0000));
EXPECT_EQ(Int4(0b1010) >> Int4(0), Int4(0b1010));
EXPECT_EQ(Int4(0b1010) >> Int4(1), Int4(0b1101));
EXPECT_EQ(Int4(0b1010) >> Int4(2), Int4(0b1110));
EXPECT_EQ(Int4(0b1010) >> Int4(3), Int4(0b1111));
EXPECT_EQ(Int4(0b1010) >> Int4(4), Int4(0b1111));
EXPECT_EQ(Int4(0b1010) >> Int4(5), Int4(0b1111));
EXPECT_EQ(Int4(0b1010) >> int8_t{0}, Int4(0b1010));
EXPECT_EQ(Int4(0b1010) >> int8_t{1}, Int4(0b1101));
EXPECT_EQ(Int4(0b1010) >> int8_t{2}, Int4(0b1110));
EXPECT_EQ(Int4(0b1010) >> int8_t{3}, Int4(0b1111));
EXPECT_EQ(Int4(0b1010) >> int8_t{4}, Int4(0b1111));
EXPECT_EQ(Int4(0b1010) >> int8_t{5}, Int4(0b1111));
}
TEST(Int4Test, Abs) {
EXPECT_EQ(abs(Int4(7)), Int4(7));
EXPECT_EQ(abs(Int4(0)), Int4(0));
EXPECT_EQ(abs(Int4(-7)), Int4(7));
EXPECT_EQ(abs(Int4(-8)), Int4(-8));
}
TEST(Int4Test, Pow) {
EXPECT_EQ(pow(Int4(2), Int4(0)), Int4(1));
EXPECT_EQ(pow(Int4(2), Int4(1)), Int4(2));
EXPECT_EQ(pow(Int4(2), Int4(2)), Int4(4));
}
TEST(Int4Test, Comparison) {
for (int i = 0; i <= 15; i++) {
const Int4 a = kInt4ToInt8[i].first;
EXPECT_EQ(a, a);
EXPECT_LE(a, a);
EXPECT_GE(a, a);
for (int j = i + 1; j <= 15; j++) {
const Int4 b = kInt4ToInt8[j].first;
EXPECT_NE(a, b);
EXPECT_LT(a, b);
EXPECT_LE(a, b);
EXPECT_GT(b, a);
EXPECT_GE(b, a);
}
}
}
TEST(Int4Test, EquivalentRepresentationsCompareEqual) {
for (int low_nibble = 0; low_nibble <= 15; low_nibble++) {
const Int4 answer = Int4(low_nibble);
for (int high_nibble_a = 0; high_nibble_a <= 15; high_nibble_a++) {
for (int high_nibble_b = 0; high_nibble_b <= 15; high_nibble_b++) {
const int8_t a = low_nibble | (high_nibble_a << 4);
const int8_t b = low_nibble | (high_nibble_b << 4);
const Int4 a4 = Bitcast(a);
const Int4 b4 = Bitcast(b);
EXPECT_EQ(a4, answer);
EXPECT_EQ(b4, answer);
EXPECT_EQ(a4, b4);
}
}
}
}
TEST(Int4Test, NonCanonicalRepresentationsCompareCorrectly) {
EXPECT_LT(Bitcast(0xD3), Bitcast(0xE5));
EXPECT_LE(Bitcast(0xD3), Bitcast(0xE5));
EXPECT_GT(Bitcast(0x33), Bitcast(0x4A));
EXPECT_GE(Bitcast(0x33), Bitcast(0x4A));
}
TEST(Int4Test, JsonConversion) {
for (const auto& [i4, i8] : kInt4ToInt8) {
EXPECT_THAT(::nlohmann::json(i4), tensorstore::MatchesJson(i8));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/int4.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/int4_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b1b7d91e-5a2a-4393-9417-f758eb6e6396 | cpp | tensorflow/tensorflow | rematerializer | tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.cc | tensorflow/compiler/mlir/lite/experimental/remat/rematerializer_test.cc | #include "tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.h"
#include <algorithm>
#include <map>
#include <tuple>
#include <utility>
#include <vector>
namespace mlir {
namespace TFL {
namespace {
std::tuple<std::vector<int>::iterator, bool> Find(const int item,
std::vector<int>& items) {
const auto iter = std::lower_bound(items.begin(), items.end(), item);
return std::make_tuple(iter, iter != items.end() && *iter == item);
}
void Insert(const int item, std::vector<int>& items) {
const auto [iter, found] = Find(item, items);
if (!found) items.insert(iter, item);
}
void Erase(const int item, std::vector<int>& items) {
const auto [iter, found] = Find(item, items);
if (found) items.erase(iter);
}
}
int Rematerializer::AddOperation(const bool is_stateful) {
operations_.emplace_back();
operations_.back().is_stateful = is_stateful;
return operations_.size() - 1;
}
int Rematerializer::AddTensor(const SizeT size) {
tensors_.emplace_back();
tensors_.back().size = size;
return tensors_.size() - 1;
}
void Rematerializer::DelUse(const int ioperation, const int itensor) {
auto& tensor = tensors_[itensor];
auto& operation = operations_[ioperation];
const auto& size = tensor.size;
const bool was_first_use =
(!tensor.operations.empty() && ioperation == tensor.first_use());
const bool was_last_use =
(!tensor.operations.empty() && ioperation == tensor.last_use());
Erase(ioperation, tensor.operations);
Erase(itensor, operation.tensors);
if (was_first_use) {
operation.alloc -= size;
if (!was_last_use) {
operations_[tensor.first_use()].alloc += size;
}
}
if (was_last_use) {
operation.dealloc -= size;
if (!was_first_use) {
operations_[tensor.last_use()].dealloc += size;
}
}
}
void Rematerializer::AddUse(const int ioperation, const int itensor) {
auto& tensor = tensors_[itensor];
auto& operation = operations_[ioperation];
const auto& size = tensor.size;
const bool will_be_first_use =
tensor.operations.empty() || ioperation < tensor.first_use();
const bool will_be_last_use =
tensor.operations.empty() || ioperation > tensor.last_use();
if (will_be_first_use) {
operation.alloc += size;
if (!will_be_last_use) {
operations_[tensor.first_use()].alloc -= size;
}
}
if (will_be_last_use) {
operation.dealloc += size;
if (!will_be_first_use) {
operations_[tensor.last_use()].dealloc -= size;
}
}
Insert(ioperation, tensor.operations);
Insert(itensor, operation.tensors);
}
Rematerializer::SizeT Rematerializer::MaxSavings(const int begin, const int end,
const int peak_loc) const {
SizeT max_savings = 0;
for (int ioperation = begin; ioperation != end; ++ioperation) {
for (const int itensor : operations_[ioperation].tensors) {
if (const Tensor& tensor = tensors_[itensor];
tensor.first_use() == ioperation &&
tensor.last_use() > peak_loc ) {
max_savings += tensor.size;
}
}
}
return max_savings;
}
std::tuple<Rematerializer::SizeT, Rematerializer::RematSpec>
Rematerializer::FindBestRemat(const SizeT min_savings, const int begin_len,
const int end_len) const {
const auto peak = GetPeakMemory();
SizeT best_peak_mem = peak.size;
RematSpec best_remat = {};
for (int len = begin_len; len < end_len; ++len) {
std::vector<std::tuple<SizeT, int, int>> pre_screen;
for (int begin = 0, end = begin + len; end <= peak.op_index;
++begin, ++end) {
if (!std::any_of(operations_.begin() + begin, operations_.begin() + end,
[](const Operation& s) { return s.is_stateful; })) {
if (const auto max_savings = MaxSavings(begin, end, peak.op_index);
max_savings >= min_savings) {
pre_screen.emplace_back(max_savings, begin, end);
}
}
}
std::sort(pre_screen.begin(), pre_screen.end());
for (; !pre_screen.empty(); pre_screen.pop_back()) {
const auto& [max_savings, begin, end] = pre_screen.back();
const auto insert_before = FindBestRematPoint(begin, end, peak.op_index);
if (insert_before == operations_.size()) {
continue;
}
const RematSpec this_remat = {begin, end, insert_before};
if (const auto new_peak = GetPeakMemory(this_remat);
new_peak.size < best_peak_mem &&
peak.size >= new_peak.size + min_savings) {
best_peak_mem = new_peak.size;
best_remat = this_remat;
}
if (peak.size >= max_savings + best_peak_mem) {
break;
}
}
if (peak.size >= min_savings + best_peak_mem) {
break;
}
}
return std::make_tuple(best_peak_mem, best_remat);
}
std::vector<Rematerializer::MemSpec> Rematerializer::GetDeltas(
const RematSpec& remat) const {
std::vector<MemSpec> deltas;
if (remat.begin == remat.end) {
return deltas;
}
const auto source_to_target = [&](int i) {
return i + (remat.insert - remat.begin);
};
struct TensorUse {
int first_use;
int last_use;
};
std::map<int, TensorUse> source_uses;
for (int ioperation = remat.begin; ioperation < remat.end; ++ioperation) {
const auto& operation = operations_[ioperation];
for (const int itensor : operation.tensors) {
const auto [iter, inserted] = source_uses.emplace(
itensor,
TensorUse{ioperation, ioperation});
if (!inserted) {
iter->second.last_use = ioperation;
}
}
}
deltas.reserve(2 * source_uses.size());
for (const auto& [itensor, source] : source_uses) {
auto& tensor = tensors_[itensor];
const TensorUse global = {tensor.first_use(), tensor.last_use()};
auto add_alloc = [&](int pos) { deltas.emplace_back(pos, tensor.size); };
auto add_dealloc = [&](int pos) {
deltas.emplace_back(pos + 1, -tensor.size);
};
auto del_dealloc = [&](int pos) {
deltas.emplace_back(pos + 1, tensor.size);
};
if (global.first_use < remat.begin) {
if (global.last_use < remat.insert) {
del_dealloc(global.last_use);
add_dealloc(source_to_target(source.last_use));
}
} else {
add_alloc(source_to_target(source.first_use));
if (global.last_use < remat.insert) {
add_dealloc(source_to_target(source.last_use));
} else {
add_dealloc(*std::partition_point(
tensor.operations.rbegin(), tensor.operations.rend(),
[&](int i) { return i >= remat.insert; }));
}
}
}
std::sort(deltas.begin(), deltas.end(), ByOpIndex);
return deltas;
}
Rematerializer::MemProfile Rematerializer::GetMemProfile(
const RematSpec& remat) const {
const auto num_inserted = remat.end - remat.begin;
std::vector<SizeT> profile(operations_.size() + num_inserted);
MapMem([&](const MemSpec& m) { profile[m.op_index] = m.size; }, remat);
return profile;
}
Rematerializer::MemSpec Rematerializer::GetPeakMemory(
const RematSpec& remat) const {
MemSpec peak;
MapMem([&](const MemSpec& m) { peak = std::max(m, peak, BySize); }, remat);
return peak;
}
int Rematerializer::FindBestRematPoint(const int begin, const int end,
const int peak_loc) const {
int best = operations_.size();
for (int ioperation = begin; ioperation < end; ++ioperation) {
for (const int itensor : operations_[ioperation].tensors) {
if (const auto& tensor = tensors_[itensor];
tensor.first_use() >= begin && tensor.first_use() < end &&
tensor.last_use() > peak_loc) {
for (const int ioperation : tensor.operations) {
if (ioperation > peak_loc && ioperation < best) {
best = ioperation;
break;
}
}
}
}
}
return best;
}
void Rematerializer::Remat(const RematSpec& remat) {
const int num_inserted = remat.end - remat.begin;
for (auto& tensor : tensors_) {
std::for_each(std::lower_bound(tensor.operations.begin(),
tensor.operations.end(), remat.insert),
tensor.operations.end(),
[&](int& iop) { iop += num_inserted; });
}
operations_.insert(operations_.begin() + remat.insert, num_inserted, {});
std::vector<std::pair<int, int>> new_tensors;
for (int iop_old = remat.begin, iop_new = remat.insert; iop_old < remat.end;
++iop_old, ++iop_new) {
for (const auto itensor : operations_[iop_old].tensors) {
if (tensors_[itensor].first_use() == iop_old) {
new_tensors.emplace_back(itensor, AddTensor(tensors_[itensor].size));
}
AddUse(iop_new, itensor);
}
}
std::sort(new_tensors.begin(), new_tensors.end());
for (int iop = remat.insert; iop < operations_.size(); ++iop) {
for (const int old_tensor : std::vector<int>(operations_[iop].tensors)) {
const auto new_tensor =
std::lower_bound(new_tensors.begin(), new_tensors.end(),
std::make_pair(old_tensor, 0));
if (new_tensor != new_tensors.end() && new_tensor->first == old_tensor) {
DelUse(iop, old_tensor);
AddUse(iop, new_tensor->second);
}
}
}
}
void Rematerializer::RunGreedyAlgorithm(const int max_cost,
const int max_block_length,
const SizeT min_savings) {
const bool unlimited_cost = (max_cost < 0);
for (int min_block_length = 1, cost = 0;
min_block_length <= max_block_length &&
(unlimited_cost || cost <= max_cost);
min_block_length *= 2) {
while (unlimited_cost || cost <= max_cost) {
const auto [peak, remat] = FindBestRemat(
min_savings,
min_block_length,
std::min(1 + (unlimited_cost
? max_block_length
: std::min(max_block_length, max_cost - cost)),
2 * min_block_length));
if (remat.begin == remat.end) break;
Remat(remat);
ApplyRemat(remat);
cost += (remat.end - remat.begin);
}
}
}
}
} | #include "tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.h"
#include <algorithm>
#include <array>
#include <cstdlib>
#include <initializer_list>
#include <random>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace mlir {
namespace TFL {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Eq;
using ::testing::FieldsAre;
using ::testing::StrictMock;
class RematTest : public ::testing::Test {
protected:
class TestableRematerializer : public Rematerializer {
public:
using Rematerializer::AddOperation;
using Rematerializer::AddTensor;
using Rematerializer::AddUse;
using Rematerializer::DelUse;
using Rematerializer::Remat;
};
TestableRematerializer r_;
};
TEST_F(RematTest, TensorUseSimple) {
for (int i = 0; i < 6; ++i) {
r_.AddOperation(false);
r_.AddTensor(1 << i);
}
r_.AddUse(2, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 4, 0, 0, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(2), Eq(4)));
r_.AddUse(2, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 4, 0, 0, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(2), Eq(4)));
r_.AddUse(4, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 4, 4, 4, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(4), Eq(4)));
r_.DelUse(2, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 0, 0, 4, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(4), Eq(4)));
r_.DelUse(2, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 0, 0, 4, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(4), Eq(4)));
r_.DelUse(4, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 0, 0, 0, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(5), Eq(0)));
}
TEST_F(RematTest, TensorUseMany) {
constexpr int n = 6;
for (int i = 0; i < n; ++i) {
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(1 << (n - i - 1)));
}
for (int i = 0; i < n; ++i) {
r_.AddUse(r_.AddOperation(false),
n - 1 - i);
}
EXPECT_THAT(r_.GetMemProfile(), ElementsAreArray({32, 48, 56, 60, 62, 63, 63,
62, 60, 56, 48, 32}));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(6), Eq(63)));
}
TEST_F(RematTest, PeakTiesAreBrokenInFavorOfLaterOperations) {
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(100));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(1));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(100));
ASSERT_THAT(r_.GetMemProfile(), ElementsAreArray({100, 1, 100}));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(2), Eq(100)));
}
TEST_F(RematTest, RematRecreatesOutput) {
r_.AddUse(r_.AddOperation(false), r_.AddTensor(100));
r_.AddOperation(false);
ASSERT_THAT(r_.GetMemProfile(), ElementsAre(100, 0));
EXPECT_THAT(r_.GetMemProfile({0, 1, 2}),
ElementsAre(100, 0, 100));
r_.Remat({0, 1, 2});
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(100, 0, 100));
EXPECT_THAT(r_.AddTensor(0), 2);
}
TEST_F(RematTest, RematExtendsInputAndRecreatesOutput) {
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(1));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(100));
r_.AddUse(1, 0);
r_.AddOperation(false);
r_.AddOperation(false);
ASSERT_THAT(r_.GetMemProfile(), ElementsAre(1, 101, 0, 0));
EXPECT_THAT(r_.GetMemProfile({1, 2, 3}),
ElementsAre(1, 101, 1, 101, 0));
r_.Remat({1, 2, 3});
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(1, 101, 1, 101, 0));
EXPECT_THAT(r_.AddTensor(0), 3);
}
TEST_F(RematTest, BlockRematDuplicatesIntraBlockValues) {
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(1));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(10));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(100));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(1000));
r_.AddOperation(false);
r_.AddUse(1, 0);
r_.AddUse(2, 0);
r_.AddUse(2, 1);
r_.AddUse(3, 0);
r_.AddUse(3, 1);
r_.AddUse(3, 2);
ASSERT_THAT(r_.GetMemProfile(), ElementsAre(1, 11, 111, 1111, 0));
EXPECT_THAT(r_.GetMemProfile({1, 4, 5}),
ElementsAre(1, 11, 111, 1111, 1, 11, 111, 1111));
r_.Remat({1, 4, 5});
EXPECT_THAT(r_.GetMemProfile(),
ElementsAre(1, 11, 111, 1111, 1, 11, 111, 1111));
EXPECT_THAT(r_.AddTensor(0), 7);
}
class RematSimulationTest : public testing::Test {
protected:
class RandomRemat : public Rematerializer {
public:
using Rematerializer::Remat;
RandomRemat(const int num_operations, const int num_tensors,
const int num_uses, std::mt19937& rng) {
std::uniform_int_distribution<int> some_size_log(0, 16);
std::uniform_int_distribution<int> some_tensor(0, num_tensors - 1);
std::uniform_int_distribution<int> some_operation(0, num_operations - 1);
for (int i = 0; i < num_tensors; ++i) {
AddTensor(SizeT{1} << some_size_log(rng));
}
for (int i = 0; i < num_operations; ++i) {
AddOperation(false);
}
for (int i = 0; i < num_uses; ++i) {
AddUse(some_operation(rng), some_tensor(rng));
}
}
};
};
TEST_F(RematSimulationTest, SimulationAgreesWithReality) {
constexpr int kNumOperations = 128;
constexpr int kNumTensors = 32;
constexpr int kNumUses = kNumOperations * kNumTensors / 4;
std::mt19937 rng;
for (int i = 0; i < 1024; ++i) {
RandomRemat remat(kNumOperations, kNumTensors, kNumUses, rng);
std::array<int, 3> randos;
const auto& [begin, end, insert] = randos;
for (int i = 0, num_operations = kNumOperations; i < 4;
++i, num_operations += end - begin) {
std::uniform_int_distribution<int> some_op(0, num_operations - 1);
for (auto& rando : randos) {
rando = some_op(rng);
}
std::sort(randos.begin(), randos.end());
const Rematerializer::RematSpec spec{begin, end, insert};
const auto simulated_profile = remat.GetMemProfile(spec);
remat.Remat(spec);
const auto actual_profile = remat.GetMemProfile();
EXPECT_THAT(simulated_profile, ElementsAreArray(actual_profile));
}
}
}
class GreedyRematTest : public testing::Test {
protected:
class RainbowRemat : public Rematerializer {
public:
explicit RainbowRemat(const std::vector<std::vector<int>>& sizes,
int extra_ops = 0, SizeT extra_size = 0) {
for (const auto& rainbow : sizes) {
int tensor = 0;
int op = 0;
for (const auto& size : rainbow) {
for (int i = 0; i < extra_ops; ++i) {
op = AddOperation(false);
if (i != 0) {
AddUse(op, tensor);
}
tensor = AddTensor(extra_size);
AddUse(op, tensor);
}
op = AddOperation(size < 0);
if (extra_ops > 0) {
AddUse(op, tensor);
}
tensor = AddTensor(std::abs(size));
AddUse(op, tensor);
}
for (int i = 0; i < rainbow.size(); ++i) {
op = AddOperation(false);
AddUse(op, tensor - i);
}
}
}
};
class MlpRemat : public Rematerializer {
public:
explicit MlpRemat(const std::vector<int>& sizes) {
int forward_tensor = -1;
int backward_tensor = -1;
int op = -1;
for (const int size : sizes) {
op = AddOperation(false);
if (forward_tensor >= 0) AddUse(op, forward_tensor);
forward_tensor = AddTensor(size);
AddUse(op, forward_tensor);
}
for (; forward_tensor >= 0; --forward_tensor) {
op = AddOperation(false);
AddUse(op, forward_tensor);
if (backward_tensor >= 0) AddUse(op, backward_tensor);
backward_tensor = AddTensor(sizes[forward_tensor]);
AddUse(op, backward_tensor);
}
}
MOCK_METHOD(void, ApplyRemat, (const RematSpec&));
};
};
TEST_F(GreedyRematTest, MlpBasic) {
StrictMock<MlpRemat> remat(std::vector<int>({1, 1, 1}));
ASSERT_THAT(remat.GetMemProfile(), ElementsAreArray({1, 2, 3, 4, 4, 3}));
EXPECT_CALL(remat, ApplyRemat(FieldsAre(0,
1,
5)));
remat.RunGreedyAlgorithm(-1, 1,
1);
EXPECT_THAT(remat.GetMemProfile(), ElementsAreArray({1, 2, 2, 3, 3, 2, 3}));
}
TEST_F(GreedyRematTest, MlpBinary) {
StrictMock<MlpRemat> remat(std::vector<int>({1, 2, 4, 8}));
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 23, 19, 9, 4}));
EXPECT_CALL(remat, ApplyRemat(FieldsAre(2,
3,
5)));
EXPECT_CALL(remat, ApplyRemat(FieldsAre(0,
1,
8)));
remat.RunGreedyAlgorithm(-1, 4,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 6, 14, 18, 14, 18, 8, 3, 4}));
}
TEST_F(GreedyRematTest, SimpleMax) {
RainbowRemat remat({{1, 2, 4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 31, 31, 15, 7, 3, 1}));
remat.RunGreedyAlgorithm(-1, 1,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 2, 4, 8, 16, 16, 8, 8, 4, 4, 2, 2, 1, 1}));
}
TEST_F(GreedyRematTest, SimpleMaxLongWindow) {
RainbowRemat remat({{1, 2, 4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 31, 31, 15, 7, 3, 1}));
remat.RunGreedyAlgorithm(-1, 4,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 2, 4, 8, 16, 16, 8, 8, 4, 4, 2, 2, 1, 1}));
}
TEST_F(GreedyRematTest, SimpleSizeThreshold) {
RainbowRemat remat({{1, 2, 4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 31, 31, 15, 7, 3, 1}));
remat.RunGreedyAlgorithm(-1, 1,
4);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 11, 19, 19, 11, 11, 7, 7, 3, 1}));
}
TEST_F(GreedyRematTest, SimpleCostThreshold) {
RainbowRemat remat({{1, 2, 4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 31, 31, 15, 7, 3, 1}));
remat.RunGreedyAlgorithm(1, 1,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 23, 23, 15, 15, 7, 3, 1}));
}
TEST_F(GreedyRematTest, SimpleForbiddenOps) {
RainbowRemat remat({{1, 2, -4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 31, 31, 15, 7, 3, 1}));
remat.RunGreedyAlgorithm(-1, 1,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 2, 4, 12, 20, 20, 12, 12, 4, 2, 2, 1, 1}));
}
TEST_F(GreedyRematTest, DoubleMax) {
RainbowRemat remat({{1, 2, 4, 8, 16}, {4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray(
{1, 3, 7, 15, 31, 31, 15, 7, 3, 1, 4, 12, 28, 28, 12, 4}));
remat.RunGreedyAlgorithm(-1, 1,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 2, 4, 8, 16, 16, 8, 8, 4, 4, 2,
2, 1, 1, 4, 8, 16, 16, 8, 8, 4, 4}));
}
TEST_F(GreedyRematTest, DoubleCostThreshold) {
RainbowRemat remat({{1, 2, 4, 8, 16}, {4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray(
{1, 3, 7, 15, 31, 31, 15, 7, 3, 1, 4, 12, 28, 28, 12, 4}));
remat.RunGreedyAlgorithm(2, 1,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 23, 23, 15, 15, 7, 3, 1, 4, 12, 20,
20, 12, 12, 4}));
}
TEST_F(GreedyRematTest, SingleLongerBlocksByWindowSize) {
std::vector<Rematerializer::SizeT> best_for_window_size;
for (int window_size : {0, 1, 2, 3, 4, 5}) {
RainbowRemat remat({{1, 2, 4, 8}}, 2, 16);
remat.RunGreedyAlgorithm(-1, window_size,
1);
best_for_window_size.push_back(remat.GetPeakMemory().size);
}
EXPECT_THAT(best_for_window_size, ElementsAreArray({44, 36, 36, 32, 32, 32}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/experimental/remat/rematerializer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
715a0fe5-3ad4-439c-9e62-b63e79c3c1be | cpp | tensorflow/tensorflow | logical_id_thunk | third_party/xla/xla/backends/cpu/runtime/logical_id_thunk.cc | third_party/xla/xla/backends/cpu/runtime/logical_id_thunk_test.cc | #include "xla/backends/cpu/runtime/logical_id_thunk.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/computation_placer.h"
#include "xla/service/global_device_id.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla::cpu::internal {
static Thunk::Kind ToThunkKind(LogicalIdKind logical_id_kind) {
switch (logical_id_kind) {
case LogicalIdKind::kPartitionId:
return Thunk::Kind::kPartitionId;
case LogicalIdKind::kReplicaId:
return Thunk::Kind::kReplicaId;
}
}
template <LogicalIdKind logical_id_kind>
absl::StatusOr<std::unique_ptr<LogicalIdThunk<logical_id_kind>>>
LogicalIdThunk<logical_id_kind>::Create(
Info info, BufferAllocation::Slice logical_id_buffer) {
return absl::WrapUnique(
new LogicalIdThunk(std::move(info), logical_id_buffer));
}
template <LogicalIdKind logical_id_kind>
LogicalIdThunk<logical_id_kind>::LogicalIdThunk(
Info info, BufferAllocation::Slice logical_id_buffer)
: Thunk(ToThunkKind(logical_id_kind), info),
logical_id_buffer_(logical_id_buffer) {}
template <LogicalIdKind logical_id_kind>
static constexpr auto ToString() {
if constexpr (logical_id_kind == LogicalIdKind::kPartitionId) {
return "Partition";
} else if constexpr (logical_id_kind == LogicalIdKind::kReplicaId) {
return "Replica";
}
}
template <LogicalIdKind logical_id_kind>
absl::StatusOr<int32_t> LogicalIdThunk<logical_id_kind>::GetIdForDevice(
const DeviceAssignment* device_assignment, GlobalDeviceId device_id) const {
if constexpr (logical_id_kind == LogicalIdKind::kPartitionId) {
return device_assignment->PartitionIdForDevice(device_id);
} else if constexpr (logical_id_kind == LogicalIdKind::kReplicaId) {
return device_assignment->ReplicaIdForDevice(device_id);
}
}
template <LogicalIdKind logical_id_kind>
tsl::AsyncValueRef<typename LogicalIdThunk<logical_id_kind>::ExecuteEvent>
LogicalIdThunk<logical_id_kind>::Execute(const ExecuteParams& params) {
tsl::profiler::TraceMe trace([&] { return TraceMeEncode(); });
TF_ASSIGN_OR_RETURN(
se::DeviceMemoryBase logical_id_data,
params.buffer_allocations->GetDeviceAddress(logical_id_buffer_));
TF_RET_CHECK(logical_id_data.size() == sizeof(int32_t))
<< "Logical id buffer must be able to fit logical id value";
TF_RET_CHECK(params.collective_params)
<< ToString<logical_id_kind>() << " id requires collective params";
TF_ASSIGN_OR_RETURN(
int32_t logical_id,
GetIdForDevice(params.collective_params->device_assignment,
params.collective_params->global_device_id));
VLOG(3) << absl::StreamFormat("%s id: %d", ToString<logical_id_kind>(),
logical_id);
VLOG(3) << absl::StreamFormat(" logical_id: slice %s (%p)",
logical_id_buffer_.ToString(),
logical_id_data.opaque());
std::memcpy(logical_id_data.opaque(), &logical_id, sizeof(int32_t));
return OkExecuteEvent();
}
template <LogicalIdKind logical_id_kind>
Thunk::BufferUses LogicalIdThunk<logical_id_kind>::buffer_uses() const {
return {BufferUse::Write(logical_id_buffer_)};
}
template class LogicalIdThunk<LogicalIdKind::kReplicaId>;
template class LogicalIdThunk<LogicalIdKind::kPartitionId>;
} | #include "xla/backends/cpu/runtime/logical_id_thunk.h"
#include <cstdint>
#include <limits>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/backends/cpu/runtime/buffer_allocations.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/executable_run_options.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
absl::StatusOr<DeviceAssignment> CreateDeviceAssignment(
std::vector<std::vector<int64_t>> devices) {
const auto computation_count = devices.size();
if (devices.empty()) {
return absl::InternalError("Devices must not be empty.");
}
const auto replica_count = devices[0].size();
DeviceAssignment device_assignment(replica_count, computation_count);
for (int64_t partition = 0; partition < computation_count; ++partition) {
for (int64_t replica = 0; replica < replica_count; ++replica) {
device_assignment(replica, partition) = devices[partition][replica];
}
}
return device_assignment;
}
TEST(LogicalIdThunkTest, GetReplicaId) {
std::vector<int32_t> dst(1, std::numeric_limits<int32_t>::min());
std::vector<MaybeOwningDeviceMemory> buffers;
buffers.emplace_back(se::DeviceMemoryBase(dst.data(), sizeof(int32_t)));
BufferAllocation alloc(0, sizeof(int32_t), 0);
BufferAllocation::Slice id_slice(&alloc, 0,
sizeof(int32_t));
std::string name(Thunk::KindToString(Thunk::Kind::kReplicaId));
TF_ASSERT_OK_AND_ASSIGN(auto thunk, ReplicaIdThunk::Create({name}, id_slice));
BufferAllocations allocations(buffers);
TF_ASSERT_OK_AND_ASSIGN(DeviceAssignment device_assn,
CreateDeviceAssignment({{0, 1}}));
ExecutableRunOptions run_options;
run_options.set_device_ordinal(0);
run_options.set_device_assignment(&device_assn);
TF_ASSERT_OK_AND_ASSIGN(Thunk::CollectiveExecuteParams collective_params,
Thunk::CollectiveExecuteParams::Create(&run_options));
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
params.collective_params = &collective_params;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
EXPECT_EQ(dst[0], 0);
}
TEST(LogicalIdThunkTest, GetPartitionId) {
std::vector<int32_t> dst(2, std::numeric_limits<int32_t>::min());
std::vector<MaybeOwningDeviceMemory> buffers;
static constexpr auto kDataSize = 2 * sizeof(int32_t);
buffers.emplace_back(se::DeviceMemoryBase(dst.data(), kDataSize));
BufferAllocation alloc(0, kDataSize, 0);
BufferAllocation::Slice id_slice(&alloc, sizeof(int32_t),
sizeof(int32_t));
std::string name(Thunk::KindToString(Thunk::Kind::kPartitionId));
TF_ASSERT_OK_AND_ASSIGN(auto thunk,
PartitionIdThunk::Create({name}, id_slice));
BufferAllocations allocations(buffers);
TF_ASSERT_OK_AND_ASSIGN(DeviceAssignment device_assn,
CreateDeviceAssignment({{0}, {1}}));
ExecutableRunOptions run_options;
run_options.set_device_ordinal(0);
run_options.set_device_assignment(&device_assn);
TF_ASSERT_OK_AND_ASSIGN(Thunk::CollectiveExecuteParams collective_params,
Thunk::CollectiveExecuteParams::Create(&run_options));
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
params.collective_params = &collective_params;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
EXPECT_EQ(dst[0], std::numeric_limits<int32_t>::min());
EXPECT_EQ(dst[1], 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/logical_id_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/logical_id_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4838c2d9-38ad-4e47-8b5e-fc837fc96c8d | cpp | tensorflow/tensorflow | op_requires | tensorflow/core/framework/op_requires.h | tensorflow/core/framework/op_requires_test.cc | #ifndef TENSORFLOW_CORE_FRAMEWORK_OP_REQUIRES_H_
#define TENSORFLOW_CORE_FRAMEWORK_OP_REQUIRES_H_
#include <utility>
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
#define OP_REQUIRES(CTX, EXP, STATUS) \
do { \
if (!TF_PREDICT_TRUE(EXP)) { \
CheckNotInComputeAsync((CTX), "OP_REQUIRES_ASYNC"); \
(CTX)->CtxFailure(__FILE__, __LINE__, (STATUS)); \
return; \
} \
} while (0)
#define OP_REQUIRES_OK(CTX, ...) \
do { \
if (!TF_PREDICT_TRUE( \
::tensorflow::op_requires_internal::OkImpl<::absl::Status>( \
(CTX), __FILE__, __LINE__, \
static_cast<const ::absl::Status&>(__VA_ARGS__)))) { \
return; \
} \
} while (0)
#define OP_REQUIRES_OK_OR_SET_PAYLOAD(CTX, PAYLOAD_KEY, PAYLOAD_VALUE, STATUS) \
do { \
if (!TF_PREDICT_TRUE(STATUS.ok())) { \
CheckNotInComputeAsync((CTX), "OP_REQUIRES_OK_ASYNC"); \
if (!PAYLOAD_VALUE.empty()) { \
STATUS.SetPayload(PAYLOAD_KEY, absl::Cord(PAYLOAD_VALUE)); \
} \
(CTX)->CtxFailureWithWarning(__FILE__, __LINE__, STATUS); \
return; \
} \
} while (0)
#define OP_REQUIRES_ASYNC(CTX, EXP, STATUS, CALLBACK) \
do { \
if (!TF_PREDICT_TRUE(EXP)) { \
(CTX)->CtxFailure(__FILE__, __LINE__, (STATUS)); \
(CALLBACK)(); \
return; \
} \
} while (0)
#define OP_REQUIRES_OK_ASYNC(CTX, STATUS, CALLBACK) \
do { \
if (!TF_PREDICT_TRUE( \
::tensorflow::op_requires_internal::OkAsyncImpl<::absl::Status>( \
(CTX), __FILE__, __LINE__, (STATUS)))) { \
(CALLBACK)(); \
return; \
} \
} while (0)
#define OP_REQUIRES_VALUE(lhs, ctx, rexpr) \
OP_REQUIRES_VALUE_IMPL( \
TF_STATUS_MACROS_CONCAT_NAME(_status_or_value, __COUNTER__), lhs, ctx, \
rexpr)
#define OP_REQUIRES_VALUE_IMPL(statusor, lhs, ctx, rexpr) \
auto statusor = (rexpr); \
OP_REQUIRES_OK(ctx, statusor.status()); \
lhs = std::move(statusor.value())
namespace op_requires_internal {
template <typename S, typename Ctx>
bool OkImpl(Ctx&& ctx, const char* file, int line, const S& s) {
if (!TF_PREDICT_TRUE(s.ok())) {
CheckNotInComputeAsync(ctx, "OP_REQUIRES_OK_ASYNC");
ctx->CtxFailureWithWarning(file, line, s);
return false;
} else {
return true;
}
}
template <typename S, typename Ctx>
bool OkAsyncImpl(Ctx&& ctx, const char* file, int line, const S& s) {
if (!TF_PREDICT_TRUE(s.ok())) {
ctx->CtxFailureWithWarning(file, line, s);
return false;
} else {
return true;
}
}
}
}
#endif | #include "tensorflow/core/framework/op_requires.h"
#include <optional>
#include <utility>
#include "absl/status/status.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using ::tensorflow::testing::StatusIs;
using ::testing::Optional;
class Holder {
public:
explicit Holder()
: fine_(absl::OkStatus()), foul_(absl::InternalError("test")) {}
const absl::Status& Fine() const { return fine_; }
const absl::Status& Foul() const { return foul_; }
private:
absl::Status fine_;
absl::Status foul_;
};
struct TestContext {
public:
void CtxFailureWithWarning(const char* file, int line, absl::Status status) {
stored_status.emplace(std::move(status));
}
friend void CheckNotInComputeAsync(TestContext* ctx, const char* msg) {}
std::optional<absl::Status> stored_status = std::nullopt;
};
void TestFunction(TestContext& ctx, bool success, bool& reached) {
if (success) {
OP_REQUIRES_OK(&ctx, Holder().Fine());
} else {
OP_REQUIRES_OK(&ctx, Holder().Foul());
}
reached = true;
}
TEST(OpRequires, RequiresOkWithOkStatus) {
TestContext ctx;
bool reached = false;
TestFunction(ctx, true, reached);
EXPECT_FALSE(ctx.stored_status.has_value());
EXPECT_TRUE(reached);
}
TEST(OpRequires, RequiresOkWithFailedStatus) {
TestContext ctx;
bool reached = false;
TestFunction(ctx, false, reached);
EXPECT_THAT(ctx.stored_status,
Optional(StatusIs(absl::StatusCode::kInternal)));
EXPECT_FALSE(reached);
}
void TestFunctionAsync(TestContext& ctx, bool success, bool& reached,
bool& handled) {
auto done = gtl::MakeCleanup([&handled]() { handled = true; });
if (success) {
OP_REQUIRES_OK_ASYNC(&ctx, Holder().Fine(), done.release());
} else {
OP_REQUIRES_OK_ASYNC(&ctx, Holder().Foul(), done.release());
}
reached = true;
}
TEST(OpRequires, RequiresOkAsyncWithOkStatus) {
TestContext ctx;
bool reached = false;
bool handled = false;
TestFunctionAsync(ctx, true, reached, handled);
EXPECT_FALSE(ctx.stored_status.has_value());
EXPECT_TRUE(reached);
EXPECT_TRUE(handled);
}
TEST(OpRequires, RequiresOkAsyncWithFailedStatus) {
TestContext ctx;
bool reached = false;
bool handled = false;
TestFunctionAsync(ctx, false, reached, handled);
EXPECT_THAT(ctx.stored_status,
Optional(StatusIs(absl::StatusCode::kInternal)));
EXPECT_FALSE(reached);
EXPECT_TRUE(handled);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_requires.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_requires_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0cade028-d640-4047-8bf8-abf849d918d7 | cpp | tensorflow/tensorflow | grpc_service_impl | third_party/xla/xla/python/ifrt_proxy/server/grpc_service_impl.cc | third_party/xla/xla/python/ifrt_proxy/server/grpc_service_impl_test.cc | #include "xla/python/ifrt_proxy/server/grpc_service_impl.h"
#include <atomic>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/cleanup/cleanup.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "grpcpp/server_context.h"
#include "grpcpp/support/status.h"
#include "grpcpp/support/sync_stream.h"
#include "xla/pjrt/distributed/util.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/proto_util.h"
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include "xla/python/ifrt_proxy/server/version.h"
namespace xla {
namespace ifrt {
namespace proxy {
::grpc::Status GrpcServiceImpl::GetVersion(::grpc::ServerContext* context,
const GrpcGetVersionRequest* request,
GrpcGetVersionResponse* response) {
auto protocol_version =
ChooseVersion(request->min_version().protocol_version(),
request->max_version().protocol_version());
if (!protocol_version.ok()) {
return xla::ToGrpcStatus(protocol_version.status());
}
response->mutable_version()->set_protocol_version(*protocol_version);
return ::grpc::Status::OK;
}
::grpc::Status GrpcServiceImpl::IfrtSession(
::grpc::ServerContext* context,
::grpc::ServerReaderWriter<IfrtResponse, IfrtRequest>* stream) {
GrpcIfrtSessionMetadata metadata;
{
const auto it = context->client_metadata().find(
"ifrt-proxy-grpc-ifrt-session-metadata-bin");
if (it == context->client_metadata().end()) {
return ::grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT,
"Missing metadata for GrpcIfrtService.IfrtSession: "
"ifrt-proxy-grpc-ifrt-session-metadata-bin");
}
if (!metadata.ParseFromString(AsProtoStringData(
absl::string_view(it->second.data(), it->second.size())))) {
return ::grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT,
"Unable to parse GrpcIfrtSessionMetadata");
}
}
const uint64_t session_id =
next_session_id_.fetch_add(1, std::memory_order_relaxed);
VLOG(0) << "Starting a new IFRT session with session_id=" << session_id;
auto host_buffer_store =
std::make_shared<xla::ifrt::proxy::HostBufferStore>();
{
absl::MutexLock l(&host_buffer_store_mu_);
CHECK(host_buffer_stores_.insert({session_id, host_buffer_store}).second);
}
absl::Cleanup cleanup = [&] {
absl::MutexLock l(&host_buffer_store_mu_);
CHECK_GT(host_buffer_stores_.erase(session_id), 0);
};
auto backend = backend_factory_(metadata.version(), session_id,
std::move(host_buffer_store));
if (!backend.ok()) {
LOG(INFO) << "Creating IFRT backend " << session_id
<< " failed: " << backend.status();
return xla::ToGrpcStatus(backend.status());
}
absl::Mutex writer_mu;
bool first_request_read = false;
while (true) {
auto request = std::make_unique<IfrtRequest>();
if (!stream->Read(request.get())) {
break;
}
if (!first_request_read) {
VLOG(0) << "First request read for session " << session_id;
first_request_read = true;
}
const uint64_t op_id = request->request_metadata().op_id();
auto response = (*backend)->Process(std::move(request));
response.OnReady(
[op_id, stream,
&writer_mu](absl::StatusOr<std::shared_ptr<IfrtResponse>> response) {
absl::MutexLock l(&writer_mu);
if (response.ok()) {
stream->Write(**response);
} else {
stream->Write(*NewIfrtResponse(op_id, response.status()));
}
});
}
backend->reset();
VLOG(0) << "Finishing IFRT session " << session_id;
return ::grpc::Status::OK;
}
::grpc::Status GrpcServiceImpl::HostBufferStore(
::grpc::ServerContext* context,
::grpc::ServerReader<GrpcHostBufferStoreRequest>* stream,
GrpcHostBufferStoreResponse* response) {
const auto it = context->client_metadata().find(
"ifrt-proxy-grpc-host-buffer-store-metadata-bin");
if (it == context->client_metadata().end()) {
return ::grpc::Status(
::grpc::StatusCode::INTERNAL,
"Missing gRPC metadata for GrpcHostBufferService.Store");
}
GrpcHostBufferStoreMetadata metadata;
if (!metadata.ParseFromString(AsProtoStringData(
absl::string_view(it->second.data(), it->second.size())))) {
return ::grpc::Status(::grpc::StatusCode::DATA_LOSS,
"Unable to parse GrpcHostBufferStoreMetadata");
}
std::string data;
data.reserve(metadata.buffer_size());
GrpcHostBufferStoreRequest request;
while (stream->Read(&request)) {
data.append(request.data());
}
if (data.size() != metadata.buffer_size()) {
return ::grpc::Status(
::grpc::StatusCode::DATA_LOSS,
absl::StrCat("Potential data loss for host buffers: expected ",
metadata.buffer_size(), " bytes but got ", data.size(),
" bytes"));
}
auto store = GetHostBufferStore(metadata.session_id());
if (!store.ok()) {
return xla::ToGrpcStatus(store.status());
}
return xla::ToGrpcStatus((*store)->Store(metadata.handle(), std::move(data)));
}
::grpc::Status GrpcServiceImpl::HostBufferLookup(
::grpc::ServerContext* context, const GrpcHostBufferLookupRequest* request,
::grpc::ServerWriter<GrpcHostBufferLookupResponse>* stream) {
static constexpr int64_t kChunkSize = 1024 * 1024;
auto store = GetHostBufferStore(request->session_id());
if (!store.ok()) {
return xla::ToGrpcStatus(store.status());
}
auto data = (*store)->Lookup(request->handle());
if (!data.ok()) {
return xla::ToGrpcStatus(data.status());
}
GrpcHostBufferLookupResponse response;
if (!(*data)->empty()) {
for (int64_t offset = 0; offset < (*data)->size(); offset += kChunkSize) {
#if defined(PLATFORM_GOOGLE)
response.set_alias_data(
absl::string_view(**data).substr(offset, kChunkSize));
#else
response.set_data((*data)->substr(offset, kChunkSize));
#endif
stream->Write(response);
response.Clear();
}
} else {
stream->Write(response);
}
return ::grpc::Status::OK;
}
::grpc::Status GrpcServiceImpl::HostBufferDelete(
::grpc::ServerContext* context, const GrpcHostBufferDeleteRequest* request,
GrpcHostBufferDeleteResponse* response) {
auto store = GetHostBufferStore(request->session_id());
if (!store.ok()) {
return xla::ToGrpcStatus(store.status());
}
return xla::ToGrpcStatus((*store)->Delete(request->handle()));
}
bool GrpcServiceImpl::Test_InsertHostBufferStore(
uint64_t session_id,
std::shared_ptr<xla::ifrt::proxy::HostBufferStore> store) {
absl::MutexLock l(&host_buffer_store_mu_);
return host_buffer_stores_.insert({session_id, std::move(store)}).second;
}
bool GrpcServiceImpl::Test_DeleteHostBufferStore(uint64_t session_id) {
absl::MutexLock l(&host_buffer_store_mu_);
return host_buffer_stores_.erase(session_id) > 0;
}
absl::StatusOr<std::shared_ptr<xla::ifrt::proxy::HostBufferStore>>
GrpcServiceImpl::GetHostBufferStore(uint64_t session_id) {
absl::MutexLock l(&host_buffer_store_mu_);
const auto it = host_buffer_stores_.find(session_id);
if (it == host_buffer_stores_.end()) {
return absl::NotFoundError(
absl::StrCat("Session id ", session_id, " does not exist"));
}
return it->second;
}
}
}
} | #include "xla/python/ifrt_proxy/server/grpc_service_impl.h"
#include <cstdint>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "grpcpp/server.h"
#include "grpcpp/server_builder.h"
#include "grpcpp/support/channel_arguments.h"
#include "grpcpp/support/status.h"
#include "xla/python/ifrt_proxy/client/grpc_host_buffer.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/server/grpc_server.h"
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include "xla/python/ifrt_proxy/server/version.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::tsl::testing::IsOk;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
IfrtProxyVersion Version() {
IfrtProxyVersion version;
version.set_protocol_version(kServerMaxVersion);
return version;
}
absl::StatusOr<std::unique_ptr<GrpcServer>> MakeGrpcServer() {
auto addr = absl::StrCat("[::1]:", tsl::testing::PickUnusedPortOrDie());
return GrpcServer::CreateFromIfrtClientFactory(addr, []() {
return absl::UnimplementedError(
"IFRT client creation fails. This test is not expected to "
"instantiate any IFRT client");
});
}
TEST(GrpcServiceImplTest, CanBeUsedToSetupAnGrpcServer) {
ASSERT_THAT(MakeGrpcServer(), IsOk());
}
class GrpcIfrtServiceImplHostBufferTest
: public testing::TestWithParam<int64_t> {
protected:
GrpcIfrtServiceImplHostBufferTest()
: impl_([](IfrtProxyVersion version, uint64_t session_id,
std::shared_ptr<HostBufferStore> host_buffer_store) {
return absl::UnimplementedError(
"IFRT backend creation is not implemented");
}) {
::grpc::ServerBuilder builder;
builder.RegisterService(&impl_);
server_ = builder.BuildAndStart();
stub_ = grpc::GrpcIfrtService::NewStub(
server_->InProcessChannel(::grpc::ChannelArguments()));
}
std::string GetTestData() const {
std::string data;
for (int i = 0; i < GetParam(); ++i) {
data.push_back(i % 7);
}
return data;
}
GrpcServiceImpl impl_;
std::unique_ptr<::grpc::Server> server_;
std::shared_ptr<grpc::GrpcIfrtService::Stub> stub_;
};
TEST_P(GrpcIfrtServiceImplHostBufferTest, StoreAndLookupStringView) {
static constexpr uint64_t kSessionId = 1;
auto store = std::make_shared<HostBufferStore>();
ASSERT_TRUE(impl_.Test_InsertHostBufferStore(kSessionId, store));
GrpcClientHostBufferStore client(stub_, Version(), kSessionId);
constexpr uint64_t kHandle = 2;
const std::string data = GetTestData();
absl::string_view source(data);
ASSERT_THAT(client.Store(kHandle, source).Await(), IsOk());
EXPECT_THAT(client.Lookup(kHandle).Await(), IsOkAndHolds(data));
EXPECT_TRUE(impl_.Test_DeleteHostBufferStore(kSessionId));
}
TEST_P(GrpcIfrtServiceImplHostBufferTest, StoreAndLookupCord) {
static constexpr uint64_t kSessionId = 1;
auto store = std::make_shared<HostBufferStore>();
ASSERT_TRUE(impl_.Test_InsertHostBufferStore(kSessionId, store));
GrpcClientHostBufferStore client(stub_, Version(), kSessionId);
constexpr uint64_t kHandle = 2;
const std::string data = GetTestData();
absl::Cord source(data);
ASSERT_THAT(client.Store(kHandle, source).Await(), IsOk());
EXPECT_THAT(client.Lookup(kHandle).Await(), IsOkAndHolds(data));
EXPECT_TRUE(impl_.Test_DeleteHostBufferStore(kSessionId));
}
TEST_P(GrpcIfrtServiceImplHostBufferTest, Lookup) {
static constexpr uint64_t kSessionId = 1;
auto store = std::make_shared<HostBufferStore>();
ASSERT_TRUE(impl_.Test_InsertHostBufferStore(kSessionId, store));
GrpcClientHostBufferStore client(stub_, Version(), kSessionId);
constexpr uint64_t kHandle = 2;
const std::string data = GetTestData();
ASSERT_THAT(store->Store(kHandle, data), IsOk());
EXPECT_THAT(client.Lookup(kHandle).Await(), IsOkAndHolds(data));
EXPECT_TRUE(impl_.Test_DeleteHostBufferStore(kSessionId));
}
TEST_P(GrpcIfrtServiceImplHostBufferTest, Delete) {
static constexpr uint64_t kSessionId = 1;
auto store = std::make_shared<HostBufferStore>();
ASSERT_TRUE(impl_.Test_InsertHostBufferStore(kSessionId, store));
GrpcClientHostBufferStore client(stub_, Version(), kSessionId);
constexpr uint64_t kHandle = 2;
const std::string data = GetTestData();
ASSERT_THAT(store->Store(kHandle, data), IsOk());
ASSERT_THAT(client.Delete(kHandle).Await(), IsOk());
EXPECT_THAT(client.Lookup(kHandle).Await(),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_TRUE(impl_.Test_DeleteHostBufferStore(kSessionId));
}
INSTANTIATE_TEST_SUITE_P(
DataSize, GrpcIfrtServiceImplHostBufferTest,
testing::Values(0,
16,
3 * 1024 * 1024));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/grpc_service_impl.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/grpc_service_impl_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8260e578-71f7-49bc-94f4-3c2cd85c6dbc | cpp | google/tensorstore | async_write_array | tensorstore/internal/async_write_array.cc | tensorstore/internal/async_write_array_test.cc | #include "tensorstore/internal/async_write_array.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/masked_array.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/rank.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
Index AsyncWriteArray::Spec::GetNumInBoundsElements(BoxView<> domain) const {
const DimensionIndex rank = this->rank();
assert(domain.rank() == rank);
Index product = 1;
const BoxView<> bounds = this->valid_data_bounds;
for (DimensionIndex i = 0; i < rank; ++i) {
product *= Intersect(bounds[i], domain[i]).size();
}
return product;
}
SharedArrayView<const void> AsyncWriteArray::Spec::GetFillValueForDomain(
BoxView<> domain) const {
const DimensionIndex rank = domain.rank();
assert(Contains(overall_fill_value.domain(), domain));
return SharedArrayView<const void>(
AddByteOffset(
overall_fill_value.element_pointer(),
IndexInnerProduct(rank, overall_fill_value.byte_strides().data(),
domain.origin().data())),
StridedLayoutView<>(rank, domain.shape().data(),
overall_fill_value.byte_strides().data()));
}
Result<NDIterable::Ptr> AsyncWriteArray::Spec::GetReadNDIterable(
SharedArrayView<const void> array, BoxView<> domain,
IndexTransform<> chunk_transform, Arena* arena) const {
if (!array.valid()) array = GetFillValueForDomain(domain);
assert(internal::RangesEqual(array.shape(), domain.shape()));
StridedLayoutView<dynamic_rank, offset_origin> data_layout(
domain, array.byte_strides());
TENSORSTORE_ASSIGN_OR_RETURN(
chunk_transform,
ComposeLayoutAndTransform(data_layout, std::move(chunk_transform)));
return GetTransformedArrayNDIterable(
{AddByteOffset(std::move(array.element_pointer()),
-data_layout.origin_byte_offset()),
std::move(chunk_transform)},
arena);
}
AsyncWriteArray::MaskedArray::MaskedArray(DimensionIndex rank) : mask(rank) {}
void AsyncWriteArray::MaskedArray::WriteFillValue(const Spec& spec,
BoxView<> domain) {
array = {};
mask.Reset();
mask.num_masked_elements = domain.num_elements();
mask.region = domain;
}
AsyncWriteArray::WritebackData
AsyncWriteArray::MaskedArray::GetArrayForWriteback(
const Spec& spec, BoxView<> domain,
const SharedArrayView<const void>& read_array,
bool read_state_already_integrated) {
assert(domain.rank() == spec.rank());
const auto must_store = [&](ArrayView<const void> array) {
if (spec.store_if_equal_to_fill_value) return true;
return !AreArraysEqual(array, spec.GetFillValueForDomain(domain),
spec.fill_value_comparison_kind);
};
const auto get_writeback_from_array = [&] {
WritebackData writeback;
writeback.array = array;
writeback.must_store = must_store(writeback.array);
if (!writeback.must_store) {
array = {};
writeback.array = spec.GetFillValueForDomain(domain);
writeback.may_retain_reference_to_array_indefinitely = true;
} else {
writeback.may_retain_reference_to_array_indefinitely =
(array_capabilities <= kImmutableAndCanRetainIndefinitely);
}
return writeback;
};
if (!array.valid()) {
if (IsFullyOverwritten(spec, domain)) {
WritebackData writeback;
writeback.array = spec.GetFillValueForDomain(domain);
writeback.must_store = false;
writeback.may_retain_reference_to_array_indefinitely = true;
return writeback;
}
if (IsUnmodified()) {
WritebackData writeback;
writeback.must_store = read_array.valid() && must_store(read_array);
if (writeback.must_store) {
writeback.array = read_array;
} else {
writeback.array = spec.GetFillValueForDomain(domain);
}
writeback.may_retain_reference_to_array_indefinitely = true;
return writeback;
}
if (!read_state_already_integrated && read_array.valid()) {
array_capabilities = kMutableArray;
array = tensorstore::MakeCopy(spec.GetFillValueForDomain(domain),
{c_order, include_repeated_elements});
RebaseMaskedArray(domain, ArrayView<const void>(read_array), array, mask);
return get_writeback_from_array();
}
WritebackData writeback;
writeback.array = spec.GetFillValueForDomain(domain);
writeback.must_store = false;
writeback.may_retain_reference_to_array_indefinitely = true;
return writeback;
}
if (!read_state_already_integrated &&
mask.num_masked_elements != domain.num_elements()) {
EnsureWritable(spec);
RebaseMaskedArray(
domain,
read_array.valid()
? ArrayView<const void>(read_array)
: ArrayView<const void>(spec.GetFillValueForDomain(domain)),
array, mask);
}
return get_writeback_from_array();
}
size_t AsyncWriteArray::MaskedArray::EstimateSizeInBytes(
const Spec& spec, tensorstore::span<const Index> shape) const {
size_t total = 0;
if (array.valid()) {
total += GetByteExtent(array);
}
if (mask.mask_array) {
const Index num_elements = ProductOfExtents(shape);
total += num_elements * sizeof(bool);
}
return total;
}
void AsyncWriteArray::MaskedArray::EnsureWritable(const Spec& spec) {
assert(array.valid());
auto new_array =
tensorstore::AllocateArray(array.shape(), tensorstore::c_order,
tensorstore::default_init, spec.dtype());
CopyArray(array, new_array);
array = std::move(new_array);
array_capabilities = kMutableArray;
}
Result<TransformedSharedArray<void>>
AsyncWriteArray::MaskedArray::GetWritableTransformedArray(
const Spec& spec, BoxView<> domain, IndexTransform<> chunk_transform) {
if (!array.valid()) {
this->array =
tensorstore::AllocateArray(domain.shape(), tensorstore::c_order,
tensorstore::default_init, spec.dtype());
array_capabilities = kMutableArray;
if (IsFullyOverwritten(spec, domain)) {
CopyArray(spec.GetFillValueForDomain(domain), this->array);
} else {
assert(IsUnmodified());
}
} else if (array_capabilities != kMutableArray) {
EnsureWritable(spec);
}
StridedLayoutView<dynamic_rank, offset_origin> data_layout{
domain, this->array.byte_strides()};
TENSORSTORE_ASSIGN_OR_RETURN(
chunk_transform,
ComposeLayoutAndTransform(data_layout, std::move(chunk_transform)));
return {std::in_place,
UnownedToShared(
AddByteOffset(ElementPointer<void>(this->array.element_pointer()),
-data_layout.origin_byte_offset())),
std::move(chunk_transform)};
}
Result<NDIterable::Ptr> AsyncWriteArray::MaskedArray::BeginWrite(
const Spec& spec, BoxView<> domain, IndexTransform<> chunk_transform,
Arena* arena) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto transformed_array,
GetWritableTransformedArray(spec, domain, std::move(chunk_transform)));
return GetTransformedArrayNDIterable(std::move(transformed_array), arena);
}
void AsyncWriteArray::MaskedArray::EndWrite(
const Spec& spec, BoxView<> domain, IndexTransformView<> chunk_transform,
Arena* arena) {
WriteToMask(&mask, domain, chunk_transform, arena);
}
void AsyncWriteArray::MaskedArray::Clear() {
mask.Reset();
array = {};
}
AsyncWriteArray::AsyncWriteArray(DimensionIndex rank) : write_state(rank) {}
AsyncWriteArray::WritebackData AsyncWriteArray::GetArrayForWriteback(
const Spec& spec, BoxView<> domain,
const SharedArrayView<const void>& read_array,
const StorageGeneration& read_generation) {
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array, read_generation == this->read_generation);
if (write_state.array.valid()) this->read_generation = read_generation;
return writeback_data;
}
Result<NDIterable::Ptr> AsyncWriteArray::GetReadNDIterable(
const Spec& spec, BoxView<> domain, SharedArrayView<const void> read_array,
const StorageGeneration& read_generation, IndexTransform<> chunk_transform,
Arena* arena) {
if (!read_array.valid()) read_array = spec.GetFillValueForDomain(domain);
if (!write_state.IsUnmodified()) {
if (write_state.IsFullyOverwritten(spec, domain)) {
if (!write_state.array.valid()) {
read_array = spec.GetFillValueForDomain(domain);
}
} else if (this->read_generation != read_generation) {
assert(write_state.array.valid());
if (write_state.array_capabilities != MaskedArray::kMutableArray) {
write_state.EnsureWritable(spec);
}
RebaseMaskedArray(domain, read_array, write_state.array,
write_state.mask);
this->read_generation = read_generation;
}
if (write_state.array.valid()) {
read_array = write_state.array;
}
}
return spec.GetReadNDIterable(std::move(read_array), domain,
std::move(chunk_transform), arena);
}
namespace {
bool ZeroCopyToWriteArray(
const AsyncWriteArray::Spec& spec, BoxView<> domain,
IndexTransformView<> chunk_transform,
TransformedSharedArray<const void> source_array,
AsyncWriteArray::WriteArraySourceCapabilities source_capabilities,
AsyncWriteArray::MaskedArray& write_state) {
assert(source_capabilities !=
AsyncWriteArray::WriteArraySourceCapabilities::kCannotRetain);
const DimensionIndex dest_rank = domain.rank();
assert(spec.rank() == dest_rank);
assert(chunk_transform.output_rank() == dest_rank);
IndexTransformView<> source_transform = source_array.transform();
const DimensionIndex input_rank = chunk_transform.input_rank();
assert(source_transform.input_rank() == input_rank);
assert(source_transform.domain().box() == chunk_transform.domain().box());
Index new_byte_strides[kMaxRank];
DimensionIndex dest_dim_for_input_dim[kMaxRank];
std::fill_n(dest_dim_for_input_dim, input_rank, DimensionIndex(-1));
std::fill_n(new_byte_strides, dest_rank, Index(0));
for (DimensionIndex dest_dim = 0; dest_dim < dest_rank; ++dest_dim) {
if (domain.shape()[dest_dim] == 1) continue;
auto map = chunk_transform.output_index_map(dest_dim);
if (map.method() != OutputIndexMethod::single_input_dimension) {
continue;
}
[[maybe_unused]] DimensionIndex prev_dest_dim =
std::exchange(dest_dim_for_input_dim[map.input_dimension()], dest_dim);
assert(prev_dest_dim == -1);
}
const DimensionIndex source_output_rank = source_transform.output_rank();
Index source_offset = 0;
for (DimensionIndex source_output_dim = 0;
source_output_dim < source_output_rank; ++source_output_dim) {
auto map = source_transform.output_index_map(source_output_dim);
source_offset =
internal::wrap_on_overflow::Add(source_offset, map.offset());
switch (map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
const DimensionIndex dest_dim = dest_dim_for_input_dim[input_dim];
const Index source_stride = map.stride();
if (dest_dim == -1) {
assert(source_transform.input_shape()[input_dim] == 1);
const Index source_origin =
source_transform.input_origin()[input_dim];
source_offset = internal::wrap_on_overflow::Add(
source_offset, internal::wrap_on_overflow::Multiply(
source_origin, source_stride));
break;
}
const auto dest_map = chunk_transform.output_index_map(dest_dim);
const Index dest_stride = dest_map.stride();
assert(dest_stride == 1 || dest_stride == -1);
new_byte_strides[dest_dim] = internal::wrap_on_overflow::Add(
new_byte_strides[dest_dim],
internal::wrap_on_overflow::Multiply(source_stride, dest_stride));
break;
}
case OutputIndexMethod::array:
return false;
}
}
for (DimensionIndex dest_dim = 0; dest_dim < dest_rank; ++dest_dim) {
auto map = chunk_transform.output_index_map(dest_dim);
source_offset = internal::wrap_on_overflow::Subtract(
source_offset, internal::wrap_on_overflow::Multiply(
new_byte_strides[dest_dim], map.offset()));
}
auto& new_array = write_state.array;
new_array.layout() =
StridedLayoutView<>(dest_rank, domain.shape().data(), new_byte_strides);
source_offset = internal::wrap_on_overflow::Add(
source_offset,
IndexInnerProduct(dest_rank, domain.origin().data(), new_byte_strides));
new_array.element_pointer() = AddByteOffset(
SharedElementPointer<void>(internal::const_pointer_cast<void>(std::move(
source_array.element_pointer().pointer())),
spec.dtype()),
source_offset);
using WriteArraySourceCapabilities =
AsyncWriteArray::WriteArraySourceCapabilities;
using MaskedArray = AsyncWriteArray::MaskedArray;
switch (source_capabilities) {
case WriteArraySourceCapabilities::kCannotRetain:
ABSL_UNREACHABLE();
case WriteArraySourceCapabilities::kMutable:
write_state.array_capabilities = MaskedArray::kMutableArray;
break;
case WriteArraySourceCapabilities::kImmutableAndCanRetainIndefinitely:
write_state.array_capabilities =
MaskedArray::kImmutableAndCanRetainIndefinitely;
break;
case WriteArraySourceCapabilities::kImmutableAndCanRetainUntilCommit:
write_state.array_capabilities =
MaskedArray::kImmutableAndCanRetainUntilCommit;
break;
}
return true;
}
}
absl::Status AsyncWriteArray::WriteArray(
const Spec& spec, BoxView<> domain, IndexTransformView<> chunk_transform,
absl::FunctionRef<Result<std::pair<TransformedSharedArray<const void>,
WriteArraySourceCapabilities>>()>
get_source_array) {
[[maybe_unused]] const DimensionIndex dest_rank = spec.rank();
assert(domain.rank() == dest_rank);
assert(chunk_transform.output_rank() == dest_rank);
Box<dynamic_rank(kMaxRank)> output_range(spec.rank());
TENSORSTORE_ASSIGN_OR_RETURN(
bool output_range_exact,
tensorstore::GetOutputRange(chunk_transform, output_range));
if (!output_range_exact || output_range != domain) {
return absl::CancelledError();
}
TENSORSTORE_ASSIGN_OR_RETURN(auto source_array_info, get_source_array());
auto source_capabilities = std::get<1>(source_array_info);
if (source_capabilities == WriteArraySourceCapabilities::kCannotRetain) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto dest_transformed_array,
write_state.GetWritableTransformedArray(spec, domain, chunk_transform));
TENSORSTORE_RETURN_IF_ERROR(CopyTransformedArray(
std::get<0>(source_array_info), dest_transformed_array));
} else {
if (!ZeroCopyToWriteArray(spec, domain, chunk_transform,
std::get<0>(source_array_info),
source_capabilities, write_state)) {
return absl::CancelledError();
}
}
write_state.mask.Reset();
write_state.mask.num_masked_elements = domain.num_elements();
write_state.mask.region = domain;
return absl::OkStatus();
}
Result<NDIterable::Ptr> AsyncWriteArray::BeginWrite(
const Spec& spec, BoxView<> domain, IndexTransform<> chunk_transform,
Arena* arena) {
return write_state.BeginWrite(spec, domain, std::move(chunk_transform),
arena);
}
void AsyncWriteArray::EndWrite(const Spec& spec, BoxView<> domain,
IndexTransformView<> chunk_transform,
bool success, Arena* arena) {
if (!success) {
InvalidateReadState();
return;
}
write_state.EndWrite(spec, domain, chunk_transform, arena);
}
}
} | #include "tensorstore/internal/async_write_array.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <limits>
#include <random>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/array_testutil.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_array.h"
#include "tensorstore/internal/nditerable_copy.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::Index;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::MakeScalarArray;
using ::tensorstore::ReferencesSameDataAs;
using ::tensorstore::StorageGeneration;
using ::tensorstore::internal::Arena;
using ::tensorstore::internal::AsyncWriteArray;
using MaskedArray = AsyncWriteArray::MaskedArray;
using Spec = AsyncWriteArray::Spec;
tensorstore::SharedArray<void> CopyNDIterable(
tensorstore::internal::NDIterable::Ptr source_iterable,
tensorstore::span<const Index> shape, Arena* arena) {
auto dest_array = tensorstore::AllocateArray(shape, tensorstore::c_order,
tensorstore::default_init,
source_iterable->dtype());
auto dest_iterable =
tensorstore::internal::GetArrayNDIterable(dest_array, arena);
tensorstore::internal::NDIterableCopier copier(*source_iterable,
*dest_iterable, shape, arena);
TENSORSTORE_EXPECT_OK(copier.Copy());
return dest_array;
}
template <typename Target>
void TestWrite(Target* target, const Spec& spec, BoxView<> domain,
tensorstore::SharedOffsetArrayView<const void> source_array) {
Arena arena;
auto transform = tensorstore::IdentityTransform(source_array.domain());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto dest_iterable, target->BeginWrite(spec, domain, transform, &arena));
auto source_iterable =
tensorstore::internal::GetArrayNDIterable(source_array, &arena);
tensorstore::internal::NDIterableCopier copier(
*source_iterable, *dest_iterable, source_array.shape(), &arena);
TENSORSTORE_EXPECT_OK(copier.Copy());
if constexpr (std::is_same_v<Target, AsyncWriteArray>) {
target->EndWrite(spec, domain, transform, true, &arena);
} else {
target->EndWrite(spec, domain, transform, &arena);
}
}
TEST(SpecTest, Basic) {
auto overall_fill_value = MakeOffsetArray<int32_t>(
{-2, 0}, {
{1, 2, 3, 4, 5, 6, 7, 8, 9},
{11, 12, 13, 14, 15, 16, 17, 18, 19},
{21, 22, 23, 24, 25, 26, 27, 28, 29},
{31, 32, 33, 34, 35, 36, 37, 38, 39},
});
tensorstore::Box<> component_bounds({-1, -kInfIndex}, {3, kInfSize});
Spec spec{overall_fill_value, component_bounds};
EXPECT_EQ(6, spec.GetNumInBoundsElements(BoxView<>({0, 0}, {2, 3})));
EXPECT_EQ(3, spec.GetNumInBoundsElements(BoxView<>({-2, 0}, {2, 3})));
EXPECT_EQ(2, spec.rank());
EXPECT_EQ(tensorstore::dtype_v<int32_t>, spec.dtype());
EXPECT_EQ(0, spec.EstimateReadStateSizeInBytes(
false, tensorstore::span<const Index>({2, 3})));
EXPECT_EQ(2 * 3 * sizeof(int32_t),
spec.EstimateReadStateSizeInBytes(
true, tensorstore::span<const Index>({2, 3})));
{
auto read_array = MakeArray<int32_t>({{7, 8, 9}, {10, 11, 12}});
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
spec.GetReadNDIterable(
read_array, BoxView<>({2, 6}, {2, 3}),
tensorstore::IdentityTransform(tensorstore::Box<>({2, 6}, {2, 2})),
&arena));
EXPECT_EQ(MakeArray<int32_t>({{7, 8}, {10, 11}}),
CopyNDIterable(std::move(iterable),
tensorstore::span<const Index>({2, 2}), &arena));
}
}
TEST(MaskedArrayTest, Basic) {
auto overall_fill_value = MakeOffsetArray<int32_t>(
{-2, 0}, {
{1, 2, 3, 4, 5, 6, 7, 8, 9},
{11, 12, 13, 14, 15, 16, 17, 18, 19},
{21, 22, 23, 24, 25, 26, 27, 28, 29},
{31, 32, 33, 34, 35, 36, 37, 38, 39},
});
auto fill_value_copy = MakeArray<int32_t>({{21, 22, 23}, {31, 32, 33}});
tensorstore::Box<> component_bounds({-1, -kInfIndex}, {3, kInfSize});
Spec spec{overall_fill_value, component_bounds};
MaskedArray write_state(2);
Box<> domain({0, 0}, {2, 3});
EXPECT_EQ(0, write_state.EstimateSizeInBytes(spec, domain.shape()));
EXPECT_TRUE(write_state.IsUnmodified());
EXPECT_FALSE(write_state.array.valid());
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, {},
false);
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(writeback_data.must_store);
}
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, fill_value_copy,
false);
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(writeback_data.must_store);
}
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
false);
EXPECT_EQ(read_array, writeback_data.array);
EXPECT_TRUE(writeback_data.must_store);
}
TestWrite(&write_state, spec, domain,
tensorstore::AllocateArray<int32_t>(
tensorstore::BoxView<>({1, 1}, {0, 0})));
EXPECT_TRUE(write_state.array.valid());
EXPECT_TRUE(write_state.IsUnmodified());
EXPECT_FALSE(write_state.IsFullyOverwritten(spec, domain));
EXPECT_EQ(2 * 3 * sizeof(int32_t),
write_state.EstimateSizeInBytes(spec, domain.shape()));
std::fill_n(static_cast<int32_t*>(write_state.array.data()),
domain.num_elements(), 0);
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({1, 1}, {{7, 8}}));
EXPECT_EQ(MakeArray<int32_t>({{0, 0, 0}, {0, 7, 8}}),
write_state.shared_array_view(spec));
EXPECT_FALSE(write_state.IsUnmodified());
EXPECT_FALSE(write_state.IsFullyOverwritten(spec, domain));
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{9}}));
EXPECT_EQ(MakeArray<int32_t>({{9, 0, 0}, {0, 7, 8}}),
write_state.shared_array_view(spec));
EXPECT_EQ(MakeArray<bool>({{1, 0, 0}, {0, 1, 1}}),
tensorstore::Array(write_state.mask.mask_array.get(), {2, 3}));
EXPECT_FALSE(write_state.IsUnmodified());
EXPECT_FALSE(write_state.IsFullyOverwritten(spec, domain));
EXPECT_EQ(2 * 3 * (sizeof(int32_t) + sizeof(bool)),
write_state.EstimateSizeInBytes(spec, domain.shape()));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, {},
false);
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(MakeArray<int32_t>({{9, 22, 23}, {31, 7, 8}}),
writeback_data.array);
}
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
true);
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(MakeArray<int32_t>({{9, 22, 23}, {31, 7, 8}}),
writeback_data.array);
}
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
false);
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(MakeArray<int32_t>({{9, 12, 13}, {14, 7, 8}}),
writeback_data.array);
}
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{9}, {9}}));
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{10, 10, 10}}));
EXPECT_FALSE(write_state.IsUnmodified());
EXPECT_TRUE(write_state.IsFullyOverwritten(spec, domain));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
false);
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(MakeArray<int32_t>({{10, 10, 10}, {9, 7, 8}}),
writeback_data.array);
}
TestWrite(&write_state, spec, domain, fill_value_copy);
EXPECT_TRUE(write_state.array.valid());
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
false);
EXPECT_FALSE(writeback_data.must_store);
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(write_state.array.valid());
}
write_state.Clear();
EXPECT_TRUE(write_state.IsUnmodified());
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({1, 1}, {{7, 8}}));
write_state.WriteFillValue(spec, domain);
EXPECT_FALSE(write_state.IsUnmodified());
EXPECT_FALSE(write_state.array.valid());
EXPECT_EQ(0, write_state.EstimateSizeInBytes(spec, domain.shape()));
EXPECT_TRUE(write_state.IsFullyOverwritten(spec, domain));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
false);
EXPECT_FALSE(writeback_data.must_store);
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(write_state.array.valid());
}
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({1, 1}, {{7, 8}}));
EXPECT_EQ(MakeArray<int32_t>({{21, 22, 23}, {31, 7, 8}}),
write_state.shared_array_view(spec));
}
TEST(MaskedArrayTest, PartialChunk) {
auto overall_fill_value = MakeOffsetArray<int32_t>(
{-2, 0}, {
{1, 2, 3, 4, 5, 6, 7, 8, 9},
{11, 12, 13, 14, 15, 16, 17, 18, 19},
{21, 22, 23, 24, 25, 26, 27, 28, 29},
{31, 32, 33, 34, 35, 36, 37, 38, 39},
});
tensorstore::Box<> component_bounds({-1, -kInfIndex}, {3, kInfSize});
Spec spec{overall_fill_value, component_bounds};
Box<> domain{{-2, 0}, {2, 3}};
MaskedArray write_state(2);
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({-1, 0}, {{7, 8, 9}}));
EXPECT_TRUE(write_state.IsFullyOverwritten(spec, domain));
}
TEST(MaskedArrayTest, StoreIfEqualToFillValue) {
auto overall_fill_value = MakeScalarArray<int32_t>(42);
tensorstore::Box<> component_bounds;
Spec spec{overall_fill_value, component_bounds};
spec.store_if_equal_to_fill_value = true;
MaskedArray write_state(0);
TestWrite(&write_state, spec, {}, tensorstore::MakeScalarArray<int32_t>(42));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, {}, {},
false);
EXPECT_EQ(overall_fill_value, writeback_data.array);
EXPECT_TRUE(writeback_data.must_store);
}
auto read_array = MakeScalarArray<int32_t>(50);
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, {}, read_array,
false);
EXPECT_EQ(overall_fill_value, writeback_data.array);
EXPECT_TRUE(writeback_data.must_store);
}
}
TEST(MaskedArrayTest, CompareFillValueIdenticallyEqual) {
auto fill_value =
MakeScalarArray<float>(std::numeric_limits<float>::quiet_NaN());
tensorstore::Box<> component_bounds;
Spec spec{fill_value, component_bounds};
spec.fill_value_comparison_kind =
tensorstore::EqualityComparisonKind::identical;
MaskedArray write_state(0);
TestWrite(&write_state, spec, {},
tensorstore::MakeScalarArray<float>(
std::numeric_limits<float>::signaling_NaN()));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, {}, {},
false);
EXPECT_TRUE(AreArraysIdenticallyEqual(
tensorstore::MakeScalarArray<float>(
std::numeric_limits<float>::signaling_NaN()),
writeback_data.array));
EXPECT_TRUE(writeback_data.must_store);
}
TestWrite(&write_state, spec, {},
tensorstore::MakeScalarArray<float>(
std::numeric_limits<float>::quiet_NaN()));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, {}, {},
false);
EXPECT_TRUE(
AreArraysIdenticallyEqual(tensorstore::MakeScalarArray<float>(
std::numeric_limits<float>::quiet_NaN()),
writeback_data.array));
EXPECT_FALSE(writeback_data.must_store);
EXPECT_EQ(fill_value.data(), writeback_data.array.data());
}
}
TEST(AsyncWriteArrayTest, Basic) {
AsyncWriteArray async_write_array(2);
auto overall_fill_value = MakeOffsetArray<int32_t>(
{-2, 0}, {
{1, 2, 3, 4, 5, 6, 7, 8, 9},
{11, 12, 13, 14, 15, 16, 17, 18, 19},
{21, 22, 23, 24, 25, 26, 27, 28, 29},
{31, 32, 33, 34, 35, 36, 37, 38, 39},
});
tensorstore::Box<> component_bounds({-1, -kInfIndex}, {3, kInfSize});
Spec spec{overall_fill_value, component_bounds};
Box<> domain{{0, 0}, {2, 3}};
auto fill_value_copy = MakeArray<int32_t>({{21, 22, 23}, {31, 32, 33}});
{
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, {},
StorageGeneration::FromString("a"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 1}, {2, 2})),
&arena));
EXPECT_EQ(MakeArray<int32_t>({{22, 23}, {32, 33}}),
CopyNDIterable(std::move(iterable),
tensorstore::span<const Index>({2, 2}), &arena));
}
{
auto read_array = MakeArray<int32_t>({{21, 22, 23}, {24, 25, 26}});
Arena arena;
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, read_array,
StorageGeneration::FromString("b"));
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(read_array, writeback_data.array);
EXPECT_EQ(StorageGeneration::Invalid(), async_write_array.read_generation);
}
TestWrite(&async_write_array, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{8}}));
{
auto* data_ptr = async_write_array.write_state.array.data();
TestWrite(&async_write_array, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{7}}));
EXPECT_EQ(data_ptr, async_write_array.write_state.array.data());
}
{
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, {},
StorageGeneration::FromString("a"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 0}, {2, 3})),
&arena));
EXPECT_EQ(MakeArray<int32_t>({{7, 22, 23}, {31, 32, 33}}),
CopyNDIterable(std::move(iterable),
tensorstore::span<const Index>({2, 3}), &arena));
}
{
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, read_array,
StorageGeneration::FromString("a"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 0}, {2, 3})),
&arena));
EXPECT_EQ(MakeArray<int32_t>({{7, 22, 23}, {31, 32, 33}}),
CopyNDIterable(std::move(iterable),
tensorstore::span<const Index>({2, 3}), &arena));
}
tensorstore::SharedArray<const void> prev_writeback_array;
{
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
Arena arena;
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, read_array,
StorageGeneration::FromString("a"));
EXPECT_TRUE(writeback_data.must_store);
prev_writeback_array = writeback_data.array;
EXPECT_EQ(MakeArray<int32_t>({{7, 22, 23}, {31, 32, 33}}),
writeback_data.array);
}
{
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, read_array,
StorageGeneration::FromString("b"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 0}, {2, 3})),
&arena));
EXPECT_EQ(MakeArray<int32_t>({{7, 12, 13}, {14, 15, 16}}),
CopyNDIterable(std::move(iterable),
tensorstore::span<const Index>({2, 3}), &arena));
}
{
auto read_array = MakeArray<int32_t>({{21, 22, 23}, {24, 25, 26}});
Arena arena;
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, read_array,
StorageGeneration::FromString("c"));
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(MakeArray<int32_t>({{7, 22, 23}, {24, 25, 26}}),
writeback_data.array);
EXPECT_EQ(StorageGeneration::FromString("c"),
async_write_array.read_generation);
EXPECT_NE(prev_writeback_array, writeback_data.array);
}
async_write_array.write_state.WriteFillValue(spec, domain);
{
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, read_array,
StorageGeneration::FromString("b"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 0}, {2, 3})),
&arena));
EXPECT_EQ(fill_value_copy,
CopyNDIterable(std::move(iterable),
tensorstore::span<const Index>({2, 3}), &arena));
}
TestWrite(&async_write_array, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{9}}));
{
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, read_array,
StorageGeneration::FromString("b"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 0}, {2, 3})),
&arena));
EXPECT_EQ(MakeArray<int32_t>({{9, 22, 23}, {31, 32, 33}}),
CopyNDIterable(std::move(iterable),
tensorstore::span<const Index>({2, 3}), &arena));
}
}
TEST(AsyncWriteArrayTest, Issue144) {
AsyncWriteArray async_write_array(1);
auto overall_fill_value = MakeArray<int32_t>({0, 0});
tensorstore::Box<> component_bounds(1);
Spec spec{overall_fill_value, component_bounds};
Box<> domain{{0}, {2}};
TestWrite(&async_write_array, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({1}, {0}));
{
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, {},
StorageGeneration::FromString("c"));
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(writeback_data.must_store);
}
EXPECT_EQ(1, async_write_array.write_state.mask.num_masked_elements);
EXPECT_FALSE(async_write_array.write_state.array.data());
for (int i = 0; i < 2; ++i) {
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, {},
StorageGeneration::FromString("d"));
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(writeback_data.must_store);
EXPECT_EQ(1, async_write_array.write_state.mask.num_masked_elements);
EXPECT_FALSE(async_write_array.write_state.array.data());
}
{
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, MakeArray<int32_t>({2, 2}),
StorageGeneration::FromString("e"));
EXPECT_EQ(MakeArray<int32_t>({2, 0}), writeback_data.array);
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(1, async_write_array.write_state.mask.num_masked_elements);
EXPECT_TRUE(async_write_array.write_state.array.data());
}
{
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, MakeArray<int32_t>({0, 2}),
StorageGeneration::FromString("f"));
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(writeback_data.must_store);
EXPECT_EQ(1, async_write_array.write_state.mask.num_masked_elements);
EXPECT_FALSE(async_write_array.write_state.array.data());
}
}
using WriteArraySourceCapabilities =
AsyncWriteArray::WriteArraySourceCapabilities;
using ArrayCapabilities = AsyncWriteArray::MaskedArray::ArrayCapabilities;
void TestWriteArraySuccess(
WriteArraySourceCapabilities source_capabilities,
ArrayCapabilities expected_array_capabilities, bool may_retain_writeback,
bool zero_copy, tensorstore::IndexTransformView<> chunk_transform,
tensorstore::TransformedSharedArray<const void> source_array) {
SCOPED_TRACE(tensorstore::StrCat("chunk_transform=", chunk_transform));
AsyncWriteArray async_write_array(chunk_transform.output_rank());
tensorstore::Box<> output_range(chunk_transform.output_rank());
ASSERT_THAT(tensorstore::GetOutputRange(chunk_transform, output_range),
::testing::Optional(true));
auto origin = output_range.origin();
SCOPED_TRACE(tensorstore::StrCat("origin=", origin));
auto fill_value =
tensorstore::AllocateArray(output_range, tensorstore::c_order,
tensorstore::value_init, source_array.dtype());
tensorstore::Box<> component_bounds(chunk_transform.output_rank());
Spec spec{fill_value, component_bounds};
size_t orig_use_count = source_array.element_pointer().pointer().use_count();
TENSORSTORE_ASSERT_OK(async_write_array.WriteArray(
spec, output_range, chunk_transform,
[&] { return std::pair{source_array, source_capabilities}; }));
auto validate_zero_copy = [&](const auto& target_array,
size_t orig_use_count) {
EXPECT_EQ((zero_copy ? orig_use_count + 1 : orig_use_count),
source_array.element_pointer().pointer().use_count());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto materialized_target_array,
target_array |
tensorstore::AllDims().TranslateTo(output_range.origin()) |
chunk_transform | tensorstore::TryConvertToArray());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto materialized_source_array,
source_array | tensorstore::TryConvertToArray());
EXPECT_THAT(
materialized_target_array,
::testing::Conditional(
zero_copy, ReferencesSameDataAs(materialized_source_array),
::testing::Not(ReferencesSameDataAs(materialized_source_array))));
};
{
SCOPED_TRACE(
"Checking async_write_array.write_state.array before calling "
"GetArrayForWriteback");
validate_zero_copy(async_write_array.write_state.array, orig_use_count);
}
EXPECT_EQ(expected_array_capabilities,
async_write_array.write_state.array_capabilities);
{
SCOPED_TRACE("Checking writeback_data");
orig_use_count = source_array.element_pointer().pointer().use_count();
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, output_range, {},
StorageGeneration::Invalid());
validate_zero_copy(writeback_data.array, orig_use_count);
EXPECT_EQ(may_retain_writeback,
writeback_data.may_retain_reference_to_array_indefinitely);
EXPECT_EQ(expected_array_capabilities,
async_write_array.write_state.array_capabilities);
}
}
absl::Status TestWriteArrayError(
WriteArraySourceCapabilities source_capabilities, tensorstore::Box<> box,
tensorstore::IndexTransformView<> chunk_transform,
tensorstore::TransformedSharedArray<const void> source_array) {
AsyncWriteArray async_write_array(chunk_transform.output_rank());
auto fill_value = tensorstore::AllocateArray(
box, tensorstore::c_order, tensorstore::value_init, source_array.dtype());
tensorstore::Box<> component_bounds(chunk_transform.output_rank());
Spec spec{fill_value, component_bounds};
return async_write_array.WriteArray(spec, box, chunk_transform, [&] {
return std::pair{source_array, source_capabilities};
});
}
void TestWriteArrayIdentityTransformSuccess(
WriteArraySourceCapabilities source_capabilities,
ArrayCapabilities expected_array_capabilities, bool may_retain_writeback,
bool zero_copy) {
auto source_array = MakeArray<int32_t>({{7, 8, 9}, {10, 11, 12}});
auto chunk_transform = tensorstore::IdentityTransform(source_array.shape());
TestWriteArraySuccess(source_capabilities, expected_array_capabilities,
may_retain_writeback, zero_copy, chunk_transform,
source_array);
}
TEST(WriteArrayIdentityTransformSuccessTest, kCannotRetain) {
TestWriteArrayIdentityTransformSuccess(
WriteArraySourceCapabilities::kCannotRetain,
AsyncWriteArray::MaskedArray::kMutableArray,
true,
false);
}
TEST(WriteArrayIdentityTransformSuccessTest,
kImmutableAndCanRetainIndefinitely) {
TestWriteArrayIdentityTransformSuccess(
WriteArraySourceCapabilities::kImmutableAndCanRetainIndefinitely,
AsyncWriteArray::MaskedArray::kImmutableAndCanRetainIndefinitely,
true,
true);
}
TEST(WriteArrayIdentityTransformSuccessTest,
kImmutableAndCanRetainUntilCommit) {
TestWriteArrayIdentityTransformSuccess(
WriteArraySourceCapabilities::kImmutableAndCanRetainUntilCommit,
AsyncWriteArray::MaskedArray::kImmutableAndCanRetainUntilCommit,
false,
true);
}
TEST(WriteArrayIdentityTransformSuccessTest, kMutable) {
TestWriteArrayIdentityTransformSuccess(
WriteArraySourceCapabilities::kMutable,
AsyncWriteArray::MaskedArray::kMutableArray,
true,
true);
}
TEST(WriteArrayNonIdentityTransformSuccess, kMutable) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_ASYNC_WRITE_ARRAY")};
tensorstore::SharedArray<const void> base_source_array =
MakeArray<int32_t>({{7, 8, 9}, {10, 11, 12}});
constexpr size_t kNumIterations = 10;
for (size_t iter_i = 0; iter_i < kNumIterations; ++iter_i) {
tensorstore::IndexTransform<> source_transform;
{
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters
p;
p.max_stride = 2;
source_transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, tensorstore::IndexDomain<>(base_source_array.domain()), p);
}
SCOPED_TRACE(tensorstore::StrCat("source_transform=", source_transform));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto source_array,
base_source_array | source_transform);
auto chunk_transform =
tensorstore::internal::MakeRandomStridedIndexTransformForInputSpace(
gen, source_array.domain());
TestWriteArraySuccess(WriteArraySourceCapabilities::kMutable,
AsyncWriteArray::MaskedArray::kMutableArray,
true,
true, chunk_transform, source_array);
}
}
TEST(WriteArrayErrorTest, SourceArrayIndexArrayMap) {
tensorstore::SharedArray<const void> base_source_array =
MakeArray<int32_t>({{7, 8, 9}, {10, 11, 12}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto source_array,
base_source_array | tensorstore::Dims(1).OuterIndexArraySlice(
tensorstore::MakeArray<Index>({1, 0, 1, 1, 2})));
auto chunk_transform = tensorstore::IdentityTransform(source_array.domain());
EXPECT_THAT(TestWriteArrayError(WriteArraySourceCapabilities::kMutable,
tensorstore::Box<>({2, 5}), chunk_transform,
source_array),
tensorstore::MatchesStatus(absl::StatusCode::kCancelled));
}
TEST(WriteArrayErrorTest, ChunkTransformIndexArrayMap) {
tensorstore::SharedArray<const void> base_source_array =
MakeArray<int32_t>({{7, 8, 9}, {10, 11, 12}});
tensorstore::TransformedSharedArray<const void> source_array =
base_source_array;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto chunk_transform,
tensorstore::IdentityTransform(source_array.domain()) |
tensorstore::Dims(1).OuterIndexArraySlice(
tensorstore::MakeArray<Index>({0, 1, 2})));
EXPECT_THAT(TestWriteArrayError(WriteArraySourceCapabilities::kMutable,
tensorstore::Box<>({2, 3}), chunk_transform,
source_array),
tensorstore::MatchesStatus(absl::StatusCode::kCancelled));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/async_write_array.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/async_write_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
7c566eab-5a0b-48f5-8314-6b7a47a159d1 | cpp | google/cel-cpp | optional_value | common/values/optional_value.cc | common/values/optional_value_test.cc | #include <string>
#include <utility>
#include "absl/base/no_destructor.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "common/casting.h"
#include "common/memory.h"
#include "common/native_type.h"
#include "common/value.h"
#include "common/value_kind.h"
namespace cel {
namespace {
class EmptyOptionalValue final : public OptionalValueInterface {
public:
EmptyOptionalValue() = default;
bool HasValue() const override { return false; }
void Value(cel::Value& result) const override {
result = ErrorValue(
absl::FailedPreconditionError("optional.none() dereference"));
}
};
class FullOptionalValue final : public OptionalValueInterface {
public:
explicit FullOptionalValue(cel::Value value) : value_(std::move(value)) {}
bool HasValue() const override { return true; }
void Value(cel::Value& result) const override { result = value_; }
private:
friend struct NativeTypeTraits<FullOptionalValue>;
const cel::Value value_;
};
}
template <>
struct NativeTypeTraits<FullOptionalValue> {
static bool SkipDestructor(const FullOptionalValue& value) {
return NativeType::SkipDestructor(value.value_);
}
};
std::string OptionalValueInterface::DebugString() const {
if (HasValue()) {
return absl::StrCat("optional(", Value().DebugString(), ")");
}
return "optional.none()";
}
OptionalValue OptionalValue::Of(MemoryManagerRef memory_manager,
cel::Value value) {
ABSL_DCHECK(value.kind() != ValueKind::kError &&
value.kind() != ValueKind::kUnknown);
return OptionalValue(
memory_manager.MakeShared<FullOptionalValue>(std::move(value)));
}
OptionalValue OptionalValue::None() {
static const absl::NoDestructor<EmptyOptionalValue> empty;
return OptionalValue(common_internal::MakeShared(&*empty, nullptr));
}
absl::Status OptionalValueInterface::Equal(ValueManager& value_manager,
const cel::Value& other,
cel::Value& result) const {
if (auto other_value = As<OptionalValue>(other); other_value.has_value()) {
if (HasValue() != other_value->HasValue()) {
result = BoolValue{false};
return absl::OkStatus();
}
if (!HasValue()) {
result = BoolValue{true};
return absl::OkStatus();
}
return Value().Equal(value_manager, other_value->Value(), result);
return absl::OkStatus();
}
result = BoolValue{false};
return absl::OkStatus();
}
} | #include <sstream>
#include <utility>
#include "absl/status/status.h"
#include "absl/types/optional.h"
#include "common/casting.h"
#include "common/memory.h"
#include "common/type.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::absl_testing::StatusIs;
using ::testing::An;
using ::testing::Ne;
using ::testing::TestParamInfo;
class OptionalValueTest : public common_internal::ThreadCompatibleValueTest<> {
public:
OptionalValue OptionalNone() { return OptionalValue::None(); }
OptionalValue OptionalOf(Value value) {
return OptionalValue::Of(memory_manager(), std::move(value));
}
};
TEST_P(OptionalValueTest, Kind) {
auto value = OptionalNone();
EXPECT_EQ(value.kind(), OptionalValue::kKind);
EXPECT_EQ(OpaqueValue(value).kind(), OptionalValue::kKind);
EXPECT_EQ(Value(value).kind(), OptionalValue::kKind);
}
TEST_P(OptionalValueTest, Type) {
auto value = OptionalNone();
EXPECT_EQ(value.GetRuntimeType(), OptionalType());
}
TEST_P(OptionalValueTest, DebugString) {
auto value = OptionalNone();
{
std::ostringstream out;
out << value;
EXPECT_EQ(out.str(), "optional.none()");
}
{
std::ostringstream out;
out << OpaqueValue(value);
EXPECT_EQ(out.str(), "optional.none()");
}
{
std::ostringstream out;
out << Value(value);
EXPECT_EQ(out.str(), "optional.none()");
}
{
std::ostringstream out;
out << OptionalOf(IntValue());
EXPECT_EQ(out.str(), "optional(0)");
}
}
TEST_P(OptionalValueTest, SerializeTo) {
absl::Cord value;
EXPECT_THAT(OptionalValue().SerializeTo(value_manager(), value),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(OptionalValueTest, ConvertToJson) {
EXPECT_THAT(OptionalValue().ConvertToJson(value_manager()),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(OptionalValueTest, InstanceOf) {
auto value = OptionalNone();
EXPECT_TRUE(InstanceOf<OptionalValue>(value));
EXPECT_TRUE(InstanceOf<OptionalValue>(OpaqueValue(value)));
EXPECT_TRUE(InstanceOf<OptionalValue>(Value(value)));
}
TEST_P(OptionalValueTest, Cast) {
auto value = OptionalNone();
EXPECT_THAT(Cast<OptionalValue>(value), An<OptionalValue>());
EXPECT_THAT(Cast<OptionalValue>(OpaqueValue(value)), An<OptionalValue>());
EXPECT_THAT(Cast<OptionalValue>(Value(value)), An<OptionalValue>());
}
TEST_P(OptionalValueTest, As) {
auto value = OptionalNone();
EXPECT_THAT(As<OptionalValue>(OpaqueValue(value)), Ne(absl::nullopt));
EXPECT_THAT(As<OptionalValue>(Value(value)), Ne(absl::nullopt));
}
TEST_P(OptionalValueTest, HasValue) {
auto value = OptionalNone();
EXPECT_FALSE(value.HasValue());
value = OptionalOf(IntValue());
EXPECT_TRUE(value.HasValue());
}
TEST_P(OptionalValueTest, Value) {
auto value = OptionalNone();
auto element = value.Value();
ASSERT_TRUE(InstanceOf<ErrorValue>(element));
EXPECT_THAT(Cast<ErrorValue>(element).NativeValue(),
StatusIs(absl::StatusCode::kFailedPrecondition));
value = OptionalOf(IntValue());
element = value.Value();
ASSERT_TRUE(InstanceOf<IntValue>(element));
EXPECT_EQ(Cast<IntValue>(element), IntValue());
}
INSTANTIATE_TEST_SUITE_P(
OptionalValueTest, OptionalValueTest,
::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
OptionalValueTest::ToString);
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/optional_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/optional_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
d5e0aae3-f28b-414d-a342-b691170f8ef7 | cpp | google/tensorstore | zip_key_value_store | tensorstore/kvstore/zip/zip_key_value_store.cc | tensorstore/kvstore/zip/zip_key_value_store_test.cc | #include <stddef.h>
#include <algorithm>
#include <cassert>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "riegeli/bytes/cord_reader.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/cache_pool_resource.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/compression/zip_details.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/common_metrics.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/kvstore/zip/zip_dir_cache.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#include "absl/base/attributes.h"
#include "tensorstore/internal/cache_key/std_vector.h"
#include "tensorstore/serialization/absl_time.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/std_vector.h"
#include "tensorstore/util/garbage_collection/std_vector.h"
using ::tensorstore::internal_zip_kvstore::Directory;
using ::tensorstore::internal_zip_kvstore::ZipDirectoryCache;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListReceiver;
namespace tensorstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
ABSL_CONST_INIT internal_log::VerboseFlag zip_logging("zip");
auto zip_metrics = TENSORSTORE_KVSTORE_COMMON_READ_METRICS(zip);
struct ZipKvStoreSpecData {
kvstore::Spec base;
Context::Resource<internal::CachePoolResource> cache_pool;
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.base, x.cache_pool, x.data_copy_concurrency);
};
constexpr static auto default_json_binder = jb::Object(
jb::Member("base", jb::Projection<&ZipKvStoreSpecData::base>()),
jb::Member(internal::CachePoolResource::id,
jb::Projection<&ZipKvStoreSpecData::cache_pool>()),
jb::Member(
internal::DataCopyConcurrencyResource::id,
jb::Projection<&ZipKvStoreSpecData::data_copy_concurrency>())
);
};
class ZipKvStoreSpec
: public internal_kvstore::RegisteredDriverSpec<ZipKvStoreSpec,
ZipKvStoreSpecData> {
public:
static constexpr char id[] = "zip";
Future<kvstore::DriverPtr> DoOpen() const override;
absl::Status ApplyOptions(kvstore::DriverSpecOptions&& options) override {
return data_.base.driver.Set(std::move(options));
}
Result<kvstore::Spec> GetBase(std::string_view path) const override {
return data_.base;
}
};
class ZipKvStore
: public internal_kvstore::RegisteredDriver<ZipKvStore, ZipKvStoreSpec> {
public:
Future<ReadResult> Read(Key key, ReadOptions options) override;
std::string DescribeKey(std::string_view key) override {
return tensorstore::StrCat(QuoteString(key), " in ",
base_.driver->DescribeKey(base_.path));
}
void ListImpl(ListOptions options, ListReceiver receiver) override;
absl::Status GetBoundSpecData(ZipKvStoreSpecData& spec) const {
spec = spec_data_;
return absl::OkStatus();
}
kvstore::SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final {
return base_.driver->GetSupportedFeatures(KeyRange::Singleton(base_.path));
}
Result<KvStore> GetBase(std::string_view path,
const Transaction& transaction) const override {
return KvStore(base_.driver, base_.path, transaction);
}
const Executor& executor() const {
return spec_data_.data_copy_concurrency->executor;
}
ZipKvStoreSpecData spec_data_;
kvstore::KvStore base_;
internal::PinnedCacheEntry<ZipDirectoryCache> cache_entry_;
};
Future<kvstore::DriverPtr> ZipKvStoreSpec::DoOpen() const {
return MapFutureValue(
InlineExecutor{},
[spec = internal::IntrusivePtr<const ZipKvStoreSpec>(this)](
kvstore::KvStore& base_kvstore) mutable
-> Result<kvstore::DriverPtr> {
std::string cache_key;
internal::EncodeCacheKey(&cache_key, base_kvstore.driver,
base_kvstore.path,
spec->data_.data_copy_concurrency);
auto& cache_pool = *spec->data_.cache_pool;
auto directory_cache = internal::GetCache<ZipDirectoryCache>(
cache_pool.get(), cache_key, [&] {
return std::make_unique<ZipDirectoryCache>(
base_kvstore.driver,
spec->data_.data_copy_concurrency->executor);
});
auto driver = internal::MakeIntrusivePtr<ZipKvStore>();
driver->base_ = std::move(base_kvstore);
driver->spec_data_ = std::move(spec->data_);
driver->cache_entry_ =
GetCacheEntry(directory_cache, driver->base_.path);
return driver;
},
kvstore::Open(data_.base));
}
struct ReadState : public internal::AtomicReferenceCount<ReadState> {
internal::IntrusivePtr<ZipKvStore> owner_;
kvstore::Key key_;
kvstore::ReadOptions options_;
void OnDirectoryReady(Promise<kvstore::ReadResult> promise) {
TimestampedStorageGeneration stamp;
kvstore::ReadOptions options;
options.staleness_bound = options_.staleness_bound;
options.byte_range = OptionalByteRangeRequest{};
size_t seek_pos = 0;
{
ZipDirectoryCache::ReadLock<ZipDirectoryCache::ReadData> lock(
*(owner_->cache_entry_));
stamp = lock.stamp();
assert(lock.data());
const ZipDirectoryCache::ReadData& dir = *lock.data();
ABSL_LOG_IF(INFO, zip_logging) << dir;
auto it = std::lower_bound(
dir.entries.begin(), dir.entries.end(), key_,
[](const auto& e, const std::string& k) { return e.filename < k; });
if (it == dir.entries.end() || it->filename != key_) {
promise.SetResult(kvstore::ReadResult::Missing(std::move(stamp)));
return;
}
if (!options_.generation_conditions.Matches(stamp.generation)) {
promise.SetResult(kvstore::ReadResult::Unspecified(std::move(stamp)));
return;
}
if (dir.full_read) {
seek_pos = it->local_header_offset;
} else {
seek_pos = 0;
options.byte_range = OptionalByteRangeRequest::Range(
it->local_header_offset,
it->local_header_offset + it->estimated_size);
}
}
options.generation_conditions.if_equal = stamp.generation;
Link(WithExecutor(owner_->executor(),
[self = internal::IntrusivePtr<ReadState>(this),
seek_pos](Promise<kvstore::ReadResult> promise,
ReadyFuture<kvstore::ReadResult> ready) {
self->OnValueRead(std::move(promise), std::move(ready),
seek_pos);
}),
std::move(promise),
kvstore::Read(owner_->base_, {}, std::move(options)));
}
void OnValueRead(Promise<kvstore::ReadResult> promise,
ReadyFuture<kvstore::ReadResult> ready, size_t seek_pos) {
if (!promise.result_needed()) return;
if (!ready.status().ok()) {
promise.SetResult(ready.status());
return;
}
internal_zip::ZipEntry local_header{};
auto result = [&]() -> Result<kvstore::ReadResult> {
kvstore::ReadResult read_result = std::move(ready.value());
if (!read_result.has_value()) {
return read_result;
}
absl::Cord source = std::move(read_result.value);
riegeli::CordReader reader(&source);
reader.Seek(seek_pos);
TENSORSTORE_RETURN_IF_ERROR(ReadLocalEntry(reader, local_header));
TENSORSTORE_RETURN_IF_ERROR(ValidateEntryIsSupported(local_header));
TENSORSTORE_ASSIGN_OR_RETURN(
auto byte_range,
options_.byte_range.Validate(local_header.uncompressed_size));
TENSORSTORE_ASSIGN_OR_RETURN(
auto entry_reader, internal_zip::GetReader(&reader, local_header));
if (byte_range.inclusive_min > 0) {
entry_reader->Skip(byte_range.inclusive_min);
}
if (!entry_reader->Read(byte_range.size(), read_result.value)) {
if (entry_reader->status().ok()) {
return absl::OutOfRangeError("Failed to read range");
}
return entry_reader->status();
}
return read_result;
}();
ABSL_LOG_IF(INFO, zip_logging && !result.ok()) << result.status() << "\n"
<< local_header;
promise.SetResult(std::move(result));
}
};
Future<kvstore::ReadResult> ZipKvStore::Read(Key key, ReadOptions options) {
auto state = internal::MakeIntrusivePtr<ReadState>();
state->owner_ = internal::IntrusivePtr<ZipKvStore>(this);
state->key_ = std::move(key);
state->options_ = options;
zip_metrics.read.Increment();
return PromiseFuturePair<kvstore::ReadResult>::LinkValue(
WithExecutor(
executor(),
[state = std::move(state)](Promise<ReadResult> promise,
ReadyFuture<const void>) {
if (!promise.result_needed()) return;
state->OnDirectoryReady(std::move(promise));
}),
cache_entry_->Read({options.staleness_bound}))
.future;
}
struct ListState : public internal::AtomicReferenceCount<ListState> {
internal::IntrusivePtr<ZipKvStore> owner_;
kvstore::ListOptions options_;
ListReceiver receiver_;
Promise<void> promise_;
Future<void> future_;
ListState(internal::IntrusivePtr<ZipKvStore>&& owner,
kvstore::ListOptions&& options, ListReceiver&& receiver)
: owner_(std::move(owner)),
options_(std::move(options)),
receiver_(std::move(receiver)) {
auto [promise, future] = PromiseFuturePair<void>::Make(MakeResult());
this->promise_ = std::move(promise);
this->future_ = std::move(future);
future_.Force();
execution::set_starting(receiver_, [promise = promise_] {
promise.SetResult(absl::CancelledError(""));
});
}
~ListState() {
auto& r = promise_.raw_result();
if (r.ok()) {
execution::set_done(receiver_);
} else {
execution::set_error(receiver_, r.status());
}
execution::set_stopping(receiver_);
}
void OnDirectoryReady() {
auto dir = ZipDirectoryCache::ReadLock<ZipDirectoryCache::ReadData>(
*(owner_->cache_entry_))
.shared_data();
assert(dir);
auto it = std::lower_bound(
dir->entries.begin(), dir->entries.end(), options_.range.inclusive_min,
[](const auto& e, const std::string& k) { return e.filename < k; });
for (; it != dir->entries.end(); ++it) {
if (KeyRange::CompareKeyAndExclusiveMax(
it->filename, options_.range.exclusive_max) >= 0) {
break;
}
if (it->filename.size() >= options_.strip_prefix_length) {
execution::set_value(
receiver_,
ListEntry{it->filename.substr(options_.strip_prefix_length),
ListEntry::checked_size(it->uncompressed_size)});
}
}
}
};
void ZipKvStore::ListImpl(ListOptions options, ListReceiver receiver) {
auto state = internal::MakeIntrusivePtr<ListState>(
internal::IntrusivePtr<ZipKvStore>(this), std::move(options),
std::move(receiver));
auto* state_ptr = state.get();
zip_metrics.list.Increment();
LinkValue(WithExecutor(executor(),
[state = std::move(state)](Promise<void> promise,
ReadyFuture<const void>) {
state->OnDirectoryReady();
}),
state_ptr->promise_,
cache_entry_->Read({state_ptr->options_.staleness_bound}));
}
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(tensorstore::ZipKvStore)
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::ZipKvStoreSpec>
registration;
} | #include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/flags/flag.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/notification.h"
#include <nlohmann/json.hpp>
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/bytes/read_all.h"
#include "tensorstore/context.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
ABSL_FLAG(std::string, tensorstore_test_data, "",
"Path to internal/compression/testdata/data.zip");
namespace {
namespace kvstore = tensorstore::kvstore;
using ::tensorstore::Context;
using ::tensorstore::KvStore;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::MatchesKvsReadResult;
using ::tensorstore::internal::MatchesKvsReadResultNotFound;
static constexpr unsigned char kReadOpZip[] = {
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x5b,
0x19, 0x57, 0x93, 0xc0, 0x3a, 0x94, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00,
0x00, 0x00, 0x03, 0x00, 0x1c, 0x00, 0x6b, 0x65, 0x79, 0x55, 0x54, 0x09,
0x00, 0x03, 0x1b, 0xf3, 0xe8, 0x64, 0x1c, 0xf3, 0xe8, 0x64, 0x75, 0x78,
0x0b, 0x00, 0x01, 0x04, 0x6c, 0x35, 0x00, 0x00, 0x04, 0x53, 0x5f, 0x01,
0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b,
0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x50, 0x4b, 0x01, 0x02, 0x1e, 0x03, 0x0a,
0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x5b, 0x19, 0x57, 0x93, 0xc0, 0x3a,
0x94, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xa4, 0x81, 0x00,
0x00, 0x00, 0x00, 0x6b, 0x65, 0x79, 0x55, 0x54, 0x05, 0x00, 0x03, 0x1b,
0xf3, 0xe8, 0x64, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0x6c, 0x35, 0x00,
0x00, 0x04, 0x53, 0x5f, 0x01, 0x00, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00,
0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x49, 0x00, 0x00, 0x00, 0x4d, 0x00,
0x00, 0x00, 0x00, 0x00,
};
absl::Cord GetReadOpZip() {
return absl::MakeCordFromExternal(
std::string_view(reinterpret_cast<const char*>(kReadOpZip),
sizeof(kReadOpZip)),
[](auto) {});
}
absl::Cord GetTestZipFileData() {
ABSL_CHECK(!absl::GetFlag(FLAGS_tensorstore_test_data).empty());
absl::Cord filedata;
TENSORSTORE_CHECK_OK(riegeli::ReadAll(
riegeli::FdReader(absl::GetFlag(FLAGS_tensorstore_test_data)), filedata));
ABSL_CHECK_EQ(filedata.size(), 319482);
return filedata;
}
class ZipKeyValueStoreTest : public ::testing::Test {
public:
ZipKeyValueStoreTest() : context_(Context::Default()) {}
void PrepareMemoryKvstore(absl::Cord value) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
tensorstore::KvStore memory,
tensorstore::kvstore::Open({{"driver", "memory"}}, context_).result());
TENSORSTORE_CHECK_OK(
tensorstore::kvstore::Write(memory, "data.zip", value).result());
}
tensorstore::Context context_;
};
TEST_F(ZipKeyValueStoreTest, Simple) {
PrepareMemoryKvstore(GetTestZipFileData());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", "zip"},
{"base", {{"driver", "memory"}, {"path", "data.zip"}}}},
context_)
.result());
for (int i = 0; i < 2; ++i) {
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
kvstore::List(store, {}),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: data/a.png",
"set_value: data/bb.png", "set_value: data/c.png",
"set_done", "set_stopping"))
<< i;
}
{
kvstore::ListOptions options;
options.range = options.range.Prefix("data/b");
options.strip_prefix_length = 5;
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
kvstore::List(store, options),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: bb.png", "set_done",
"set_stopping"));
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto read_result, kvstore::Read(store, "data/bb.png").result());
EXPECT_THAT(read_result,
MatchesKvsReadResult(
::testing::_,
::testing::Not(tensorstore::StorageGeneration::Unknown())));
EXPECT_THAT(read_result.value.size(), 106351);
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto read_result, kvstore::Read(store, "data/zz.png").result());
EXPECT_THAT(read_result, MatchesKvsReadResultNotFound());
}
}
TEST_F(ZipKeyValueStoreTest, ReadOps) {
PrepareMemoryKvstore(GetReadOpZip());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", "zip"},
{"base", {{"driver", "memory"}, {"path", "data.zip"}}}},
context_)
.result());
::tensorstore::internal::TestKeyValueStoreReadOps(
store, "key", absl::Cord("abcdefghijklmnop"), "missing_key");
}
TEST_F(ZipKeyValueStoreTest, InvalidSpec) {
auto context = tensorstore::Context::Default();
EXPECT_THAT(
kvstore::Open({{"driver", "zip"}, {"extra", "key"}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST_F(ZipKeyValueStoreTest, SpecRoundtrip) {
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.check_data_persists = false;
options.check_write_read = false;
options.check_data_after_serialization = false;
options.check_store_serialization = true;
options.full_spec = {{"driver", "zip"},
{"base", {{"driver", "memory"}, {"path", "abc.zip"}}}};
options.full_base_spec = {{"driver", "memory"}, {"path", "abc.zip"}};
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zip/zip_key_value_store.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zip/zip_key_value_store_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
96b1761e-033d-4e9d-a447-5c2336991e90 | cpp | tensorflow/tensorflow | saved_tensor_slice_util | tensorflow/core/util/saved_tensor_slice_util.cc | tensorflow/core/util/saved_tensor_slice_util_test.cc | #include "tensorflow/core/util/saved_tensor_slice_util.h"
#include <vector>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/ordered_code.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace checkpoint {
const char kSavedTensorSlicesKey[] = "";
string EncodeTensorNameSlice(const string& name, const TensorSlice& slice) {
string buffer;
tensorflow::strings::OrderedCode::WriteNumIncreasing(&buffer, 0);
tensorflow::strings::OrderedCode::WriteString(&buffer, name);
tensorflow::strings::OrderedCode::WriteNumIncreasing(&buffer, slice.dims());
for (int d = 0; d < slice.dims(); ++d) {
tensorflow::strings::OrderedCode::WriteSignedNumIncreasing(&buffer,
slice.start(d));
tensorflow::strings::OrderedCode::WriteSignedNumIncreasing(&buffer,
slice.length(d));
}
return buffer;
}
Status DecodeTensorNameSlice(const string& code, string* name,
tensorflow::TensorSlice* slice) {
StringPiece src(code);
uint64 x;
if (!tensorflow::strings::OrderedCode::ReadNumIncreasing(&src, &x)) {
return errors::Internal("Failed to parse the leading number: src = ", src);
}
if (x != 0) {
return errors::Internal(
"The leading number should always be 0 for any valid key: src = ", src);
}
if (!tensorflow::strings::OrderedCode::ReadString(&src, name)) {
return errors::Internal("Failed to parse the tensor name: src = ", src);
}
if (!tensorflow::strings::OrderedCode::ReadNumIncreasing(&src, &x)) {
return errors::Internal("Failed to parse the tensor rank: src = ", src);
}
if (x == 0) {
return errors::Internal("Expecting positive rank of the tensor, got ", x,
", src = ", src);
}
if (x >= kint32max) {
return errors::Internal("Too many elements ", x);
}
slice->SetFullSlice(x);
for (int d = 0; d < static_cast<int32>(x); ++d) {
int64_t start, length;
if (!tensorflow::strings::OrderedCode::ReadSignedNumIncreasing(&src,
&start)) {
return errors::Internal("Failed to parse start: src = ", src);
}
if (!tensorflow::strings::OrderedCode::ReadSignedNumIncreasing(&src,
&length)) {
return errors::Internal("Failed to parse length: src = ", src);
}
if (length >= 0) {
slice->set_start(d, start);
slice->set_length(d, length);
}
}
return absl::OkStatus();
}
Status ParseShapeAndSlice(const string& shape_and_slice, TensorShape* shape,
TensorSlice* slice, TensorShape* shape_slice) {
CHECK(!shape_and_slice.empty());
std::vector<string> splits = str_util::Split(shape_and_slice, ' ');
if (splits.size() < 2) {
return errors::InvalidArgument(
"Need least two elements in shape_and_slice specification: ",
shape_and_slice);
}
slice->Clear();
auto status = slice->Parse(splits.back(), slice);
if (!status.ok()) return status;
splits.pop_back();
shape->Clear();
for (const auto& s : splits) {
int64_t dim;
if (!strings::safe_strto64(s, &dim)) {
return errors::InvalidArgument(
"Non numerical dimension in shape_and_slice: ", shape_and_slice);
}
shape->AddDim(dim);
}
return slice->SliceTensorShape(*shape, shape_slice);
}
}
} | #include "tensorflow/core/util/saved_tensor_slice_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace checkpoint {
namespace {
TEST(TensorShapeUtilTest, TensorNameSliceToOrderedCode) {
{
TensorSlice s = TensorSlice::ParseOrDie("-:-:1,3:4,5");
string buffer = EncodeTensorNameSlice("foo", s);
string name;
s.Clear();
TF_CHECK_OK(DecodeTensorNameSlice(buffer, &name, &s));
EXPECT_EQ("foo", name);
EXPECT_EQ("-:-:1,3:4,5", s.DebugString());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/saved_tensor_slice_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/saved_tensor_slice_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ef2e10a4-b4e8-4a54-96cd-ad5096524559 | cpp | tensorflow/tensorflow | ptrvec | third_party/xla/xla/hlo/ir/ptrvec.h | third_party/xla/xla/hlo/ir/ptrvec_test.cc | #ifndef XLA_HLO_IR_PTRVEC_H_
#define XLA_HLO_IR_PTRVEC_H_
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <limits>
#include <type_traits>
#include <vector>
#include "absl/log/check.h"
#include "tsl/platform/logging.h"
namespace xla {
template <typename T>
class PtrVec {
public:
static_assert(std::is_pointer<T>::value);
PtrVec();
~PtrVec();
PtrVec(const PtrVec& x);
PtrVec& operator=(const PtrVec& x);
PtrVec(PtrVec&& x);
PtrVec& operator=(PtrVec&& x);
using difference_type = std::ptrdiff_t;
using value_type = T;
using pointer = T*;
using reference = T&;
using const_reference = T const&;
using const_iterator = T const*;
const_iterator begin() const;
const_iterator end() const;
size_t size() const;
bool empty() const;
T* data();
T const* data() const;
T& operator[](size_t i);
T operator[](size_t i) const;
T at(size_t i) const;
T front() const;
T back() const;
void clear();
void pop_back();
void push_back(T x);
void erase(const_iterator iter);
operator std::vector<T>() const;
private:
static constexpr uintptr_t kEmptyTag = 0x1;
static constexpr uintptr_t kBigTag = 0x3;
static constexpr uintptr_t kTagMask = 0x3;
struct Big {
size_t size;
size_t capacity;
T data[];
};
inline static bool can_inline(T ptr) {
if constexpr (alignof(decltype(*ptr)) >= 2) {
DCHECK_EQ(reinterpret_cast<uintptr_t>(ptr) & 0x1, 0);
return true;
}
return ((reinterpret_cast<uintptr_t>(ptr) & 0x1) == 0);
}
inline bool is_big() const { return (rep_ & kTagMask) == kBigTag; }
inline Big* big() const {
DCHECK(is_big());
return reinterpret_cast<Big*>(rep_ & ~kTagMask);
}
inline static size_t big_size(size_t n) {
static constexpr size_t kMaxFit =
(std::numeric_limits<size_t>::max() - sizeof(Big)) / sizeof(T);
DCHECK_LE(n, kMaxFit);
const size_t result = sizeof(Big) + n * sizeof(T);
DCHECK_GE(result, sizeof(Big));
return result;
}
inline Big* MakeBig(size_t capacity) {
Big* big = static_cast<Big*>(malloc(big_size(capacity)));
big->size = 0;
big->capacity = capacity;
rep_ = reinterpret_cast<uintptr_t>(big) | kBigTag;
return big;
}
inline static void FreeBig(Big* big) { free(big); }
uintptr_t rep_;
};
template <class T>
inline PtrVec<T>::PtrVec() : rep_(kEmptyTag) {}
template <class T>
inline PtrVec<T>::~PtrVec() {
if (is_big()) FreeBig(big());
}
template <class T>
inline PtrVec<T>::PtrVec(const PtrVec& x) : rep_(kEmptyTag) {
*this = x;
}
template <class T>
inline PtrVec<T>& PtrVec<T>::operator=(const PtrVec& x) {
if (this == &x) {
return *this;
}
const size_t n = x.size();
Big* b;
if (!is_big()) {
if (n < 2) {
if (n == 0) {
rep_ = kEmptyTag;
return *this;
}
T single = x.front();
if (can_inline(single)) {
rep_ = reinterpret_cast<uintptr_t>(single);
DCHECK(!empty());
DCHECK(!is_big());
return *this;
}
}
b = MakeBig(x.size());
} else {
if (n == 0) {
clear();
return *this;
}
b = big();
if (b->capacity < n) {
FreeBig(b);
b = MakeBig(n);
}
}
memcpy(b->data, x.data(), n * sizeof(T));
b->size = n;
return *this;
}
template <class T>
inline PtrVec<T>::PtrVec(PtrVec&& x) : rep_(x.rep_) {
x.rep_ = kEmptyTag;
}
template <class T>
inline PtrVec<T>& PtrVec<T>::operator=(PtrVec&& x) {
if (this != &x) {
if (is_big()) {
FreeBig(big());
}
rep_ = x.rep_;
x.rep_ = kEmptyTag;
}
return *this;
}
template <class T>
inline size_t PtrVec<T>::size() const {
return is_big() ? big()->size : (rep_ != kEmptyTag ? 1 : 0);
}
template <class T>
inline bool PtrVec<T>::empty() const {
return rep_ == kEmptyTag;
}
template <class T>
inline T* PtrVec<T>::data() {
return is_big() ? big()->data : reinterpret_cast<T*>(&rep_);
}
template <class T>
inline T const* PtrVec<T>::data() const {
return is_big() ? big()->data : reinterpret_cast<T const*>(&rep_);
}
template <class T>
inline T& PtrVec<T>::operator[](size_t i) {
DCHECK_LT(i, size());
return *(data() + i);
}
template <class T>
inline T PtrVec<T>::operator[](size_t i) const {
DCHECK_LT(i, size());
return *(data() + i);
}
template <class T>
inline T PtrVec<T>::at(size_t i) const {
DCHECK_LT(i, size());
return *(data() + i);
}
template <class T>
inline T PtrVec<T>::front() const {
return (*this)[0];
}
template <class T>
inline T PtrVec<T>::back() const {
return (*this)[size() - 1];
}
template <class T>
inline typename PtrVec<T>::const_iterator PtrVec<T>::begin() const {
return data();
}
template <class T>
inline typename PtrVec<T>::const_iterator PtrVec<T>::end() const {
return data() + size();
}
template <class T>
inline void PtrVec<T>::clear() {
if (is_big()) {
FreeBig(big());
}
rep_ = kEmptyTag;
}
template <class T>
inline void PtrVec<T>::pop_back() {
DCHECK(!empty());
if (is_big()) {
big()->size--;
if (big()->size == 0) {
clear();
}
} else {
rep_ = kEmptyTag;
}
}
template <class T>
inline void PtrVec<T>::push_back(T x) {
if (!is_big()) {
if (rep_ == kEmptyTag) {
if (can_inline(x)) {
rep_ = reinterpret_cast<uintptr_t>(x);
DCHECK(!empty());
DCHECK(!is_big());
} else {
Big* b = MakeBig(1);
b->size = 1;
b->data[0] = x;
}
} else {
T singleton = front();
Big* b = MakeBig(2);
b->size = 2;
b->data[0] = singleton;
b->data[1] = x;
}
} else {
Big* b = big();
const size_t n = b->size;
DCHECK_LE(n, b->capacity);
if (n == b->capacity) {
Big* old = b;
b = MakeBig(std::max<size_t>(2, 2 * old->capacity));
memcpy(b->data, old->data, n * sizeof(T));
FreeBig(old);
}
b->data[n] = x;
b->size = n + 1;
}
}
template <class T>
inline void PtrVec<T>::erase(const_iterator iter) {
DCHECK_GE(iter, begin());
DCHECK_LT(iter, end());
if (!is_big()) {
rep_ = kEmptyTag;
} else {
Big* b = big();
const size_t index = iter - b->data;
memmove(b->data + index, b->data + index + 1,
(b->size - index - 1) * sizeof(T));
b->size--;
if (b->size == 0) {
clear();
}
}
}
template <class T>
inline PtrVec<T>::operator std::vector<T>() const {
if (empty()) return {};
return std::vector<T>(begin(), end());
}
template <typename T>
bool operator==(const PtrVec<T>& a, const PtrVec<T>& b) {
auto a_data = a.data();
auto b_data = b.data();
return std::equal(a_data, a_data + a.size(), b_data, b_data + b.size());
}
template <typename T>
bool operator!=(const PtrVec<T>& a, const PtrVec<T>& b) {
return !(a == b);
}
}
#endif | #include "xla/hlo/ir/ptrvec.h"
#include <cstdint>
#include <initializer_list>
#include <memory>
#include <utility>
#include <vector>
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
class PtrVecTest : public testing::Test {
public:
int* NewInt(int v) {
ints_.push_back(std::make_unique<int>(v));
return ints_.back().get();
}
void Fill(PtrVec<int*>& dst, absl::Span<const int> src) {
for (int v : src) {
dst.push_back(NewInt(v));
}
}
std::vector<int> Pointees(const PtrVec<int*>& src) {
std::vector<int> result;
result.reserve(src.size());
for (int* ptr : src) {
result.push_back(*ptr);
}
return result;
}
private:
std::vector<std::unique_ptr<int>> ints_;
};
std::vector<std::vector<int>> TestCases() {
return std::vector<std::vector<int>>{
{},
{100},
{200, 300},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
};
}
TEST_F(PtrVecTest, Accessors) {
for (const auto& c : TestCases()) {
SCOPED_TRACE(c.size());
PtrVec<int*> v;
Fill(v, c);
ASSERT_EQ(v.empty(), c.empty());
ASSERT_EQ(v.size(), c.size());
if (!c.empty()) {
ASSERT_EQ(*v.front(), c.front());
ASSERT_EQ(*v.back(), c.back());
}
}
}
TEST_F(PtrVecTest, Iteration) {
for (const auto& c : TestCases()) {
SCOPED_TRACE(c.size());
PtrVec<int*> v;
Fill(v, c);
int i = 0;
for (auto ptr : v) {
ASSERT_EQ(*ptr, c[i]);
i++;
}
}
}
TEST_F(PtrVecTest, Indexing) {
for (const auto& c : TestCases()) {
SCOPED_TRACE(c.size());
PtrVec<int*> v;
Fill(v, c);
for (int i = 0; i < c.size(); i++) {
ASSERT_EQ(*v[i], c[i]);
ASSERT_EQ(*v.at(i), c[i]);
}
}
}
TEST_F(PtrVecTest, Data) {
for (const auto& c : TestCases()) {
SCOPED_TRACE(c.size());
PtrVec<int*> v;
Fill(v, c);
int** data = v.data();
for (int i = 0; i < c.size(); i++) {
ASSERT_EQ(*data[i], c[i]);
}
}
}
TEST_F(PtrVecTest, ConversionToVector) {
for (const auto& c : TestCases()) {
SCOPED_TRACE(c.size());
PtrVec<int*> v;
Fill(v, c);
std::vector<int*> vec = v;
ASSERT_EQ(vec.size(), c.size());
for (int i = 0; i < c.size(); i++) {
ASSERT_EQ(*vec[i], c[i]);
}
}
}
TEST_F(PtrVecTest, Clear) {
for (const auto& c : TestCases()) {
SCOPED_TRACE(c.size());
PtrVec<int*> v;
Fill(v, c);
v.clear();
EXPECT_EQ(Pointees(v), std::vector<int>{});
}
}
TEST_F(PtrVecTest, PopBack) {
for (const auto& c : TestCases()) {
SCOPED_TRACE(c.size());
PtrVec<int*> v;
Fill(v, c);
auto model = c;
while (!model.empty()) {
model.pop_back();
v.pop_back();
EXPECT_EQ(Pointees(v), model);
}
}
}
TEST_F(PtrVecTest, Erase) {
for (const auto& c : TestCases()) {
if (c.empty()) {
continue;
}
SCOPED_TRACE(c.size());
PtrVec<int*> v;
Fill(v, c);
auto model = c;
int offset = c.size() / 2;
model.erase(model.begin() + offset);
v.erase(v.begin() + offset);
EXPECT_EQ(Pointees(v), model);
}
}
TEST_F(PtrVecTest, Assign) {
const auto cases = TestCases();
for (const auto& x : cases) {
for (const auto& y : cases) {
SCOPED_TRACE(absl::StrFormat("from %d to %d", x.size(), y.size()));
{
PtrVec<int*> b;
Fill(b, y);
PtrVec<int*> a = b;
ASSERT_EQ(Pointees(a), y);
}
{
PtrVec<int*> b;
Fill(b, y);
PtrVec<int*> a = std::move(b);
ASSERT_EQ(Pointees(a), y);
ASSERT_EQ(Pointees(b), std::vector<int>{});
}
{
PtrVec<int*> a;
Fill(a, x);
ASSERT_EQ(Pointees(a), x);
PtrVec<int*> b;
Fill(b, y);
a = b;
ASSERT_EQ(Pointees(a), y);
}
{
PtrVec<int*> a;
Fill(a, x);
PtrVec<int*> b;
Fill(b, y);
a = std::move(b);
ASSERT_EQ(Pointees(a), y);
ASSERT_EQ(Pointees(b), std::vector<int>{});
}
}
}
}
TEST_F(PtrVecTest, ReducedAlignment) {
const char* str = "hello world";
for (int i = 0; i < 11; i++) {
PtrVec<const char*> vec;
vec.push_back(&str[i]);
EXPECT_EQ(vec.size(), 1);
EXPECT_EQ(vec[0], &str[i]);
PtrVec<const char*> copy;
copy = vec;
EXPECT_EQ(copy.size(), 1);
EXPECT_EQ(copy[0], &str[i]);
}
}
struct Elem {
int64_t number;
};
void BM_PtrVecIter(::testing::benchmark::State& state) {
const int n = state.range(0);
std::vector<Elem> storage(n);
PtrVec<Elem*> vec;
for (int i = 0; i < n; i++) {
storage[i].number = i;
vec.push_back(&storage[i]);
}
uintptr_t sum = 0;
for (auto s : state) {
for (int i = 0; i < vec.size(); i++) {
sum += reinterpret_cast<uintptr_t>(vec[i]);
}
}
VLOG(1) << sum;
}
BENCHMARK(BM_PtrVecIter)->Arg(0)->Arg(1)->Arg(2)->Arg(4)->Arg(8)->Arg(1024);
void BM_StdVecIter(::testing::benchmark::State& state) {
const int n = state.range(0);
std::vector<Elem> storage(n);
std::vector<Elem*> vec;
for (int i = 0; i < n; i++) {
storage[i].number = i;
vec.push_back(&storage[i]);
}
uintptr_t sum = 0;
for (auto s : state) {
for (int i = 0; i < vec.size(); i++) {
sum += reinterpret_cast<uintptr_t>(vec[i]);
}
}
VLOG(1) << sum;
}
BENCHMARK(BM_StdVecIter)->Arg(0)->Arg(1)->Arg(2)->Arg(4)->Arg(8)->Arg(1024);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/ptrvec.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/ptrvec_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ef944928-2288-4478-a959-a120c27ef729 | cpp | tensorflow/tensorflow | graph_def_splitter | tensorflow/tools/proto_splitter/cc/graph_def_splitter.cc | tensorflow/tools/proto_splitter/cc/graph_def_splitter_test.cc | #include "tensorflow/tools/proto_splitter/cc/graph_def_splitter.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/tools/proto_splitter/cc/composable_splitter.h"
#include "tensorflow/tools/proto_splitter/cc/composable_splitter_base.h"
#include "tensorflow/tools/proto_splitter/cc/large_node_splitter.h"
#include "tensorflow/tools/proto_splitter/cc/max_size.h"
#include "tensorflow/tools/proto_splitter/cc/repeated_field_splitter.h"
#include "tensorflow/tools/proto_splitter/cc/size_splitter.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::tools::proto_splitter {
namespace {
using namespace std::string_literals;
class ConstantSplitter : public SizeSplitter {
public:
using SizeSplitter::SizeSplitter;
absl::StatusOr<int> BuildChunksReturnSize() override {
NodeDef* node = tsl::protobuf::DynamicCastToGenerated<NodeDef>(message());
std::vector<FieldType> tensor_field = {"attr"s, "value"s, "tensor"s};
std::vector<FieldType> content_field = {"attr"s, "value"s, "tensor"s,
"tensor_content"s};
TF_ASSIGN_OR_RETURN(auto ret, GetMutableField(node, tensor_field));
auto tensor_msg =
ret.parent->GetReflection()->MutableMessage(ret.parent, ret.field);
TensorProto* tensor_proto =
tsl::protobuf::DynamicCastToGenerated<TensorProto>(tensor_msg);
int size_diff;
if (tensor_proto->tensor_content().empty()) {
Tensor t;
if (!t.FromProto(*tensor_proto)) {
return absl::InvalidArgumentError(
"Invalid Const NodeDef.attr[\"value\"].tensor value.");
}
TensorProto container;
t.AsProtoTensorContent(&container);
size_diff = container.tensor_content().size();
auto x = std::make_unique<std::string>(
std::move(*container.mutable_tensor_content()));
auto y = std::make_unique<MessageBytes>(std::move(*x));
TF_RETURN_IF_ERROR(AddChunk(std::move(y), &content_field));
} else {
size_diff = tensor_proto->tensor_content().size();
auto x = std::make_unique<std::string>(
std::move(*tensor_proto->mutable_tensor_content()));
auto y = std::make_unique<MessageBytes>(std::move(*x));
TF_RETURN_IF_ERROR(AddChunk(std::move(y), &content_field));
}
auto dtype = tensor_proto->dtype();
auto tensor_shape = tensor_proto->tensor_shape();
auto version_number = tensor_proto->version_number();
tensor_proto->Clear();
tensor_proto->set_dtype(dtype);
*tensor_proto->mutable_tensor_shape() = tensor_shape;
tensor_proto->set_version_number(version_number);
return size_diff;
}
};
class ConstantSplitterFactory : public SizeSplitterFactory {
public:
using SizeSplitterFactory::SizeSplitterFactory;
absl::StatusOr<std::unique_ptr<SizeSplitter>> CreateSplitter(
tsl::protobuf::Message* message, ComposableSplitterBase* parent_splitter,
std::vector<FieldType>* fields_in_parent, int size) override {
if (size < GetMaxSize()) return nullptr;
NodeDef* node = tsl::protobuf::DynamicCastToGenerated<NodeDef>(message);
if (node->op() != "Const")
return absl::UnimplementedError(absl::StrCat(
"Currently only able to split 'Const' nodes that are larger than the "
"2GB maximum proto size. Got node of type '",
node->op(), "' with size: ", size, "."));
ConstantSplitter* splitter =
new ConstantSplitter(message, parent_splitter, fields_in_parent);
return absl::WrapUnique(splitter);
}
};
class FunctionDefSplitter : public SizeSplitter {
public:
using SizeSplitter::SizeSplitter;
absl::StatusOr<int> BuildChunksReturnSize() override {
size_t current_size = GetInitialSize();
uint64_t max_size = GetMaxSize();
std::vector<FieldType> fields = {};
if (LARGE_SIZE_CHECK(current_size, max_size) && current_size < max_size) {
auto splitter = LargeNodeSplitter<FunctionDef>(message(), this, &fields);
splitter.SetInitialSize(current_size);
return splitter.BuildChunksReturnSize();
} else if (current_size > max_size) {
ConstantSplitterFactory constant_splitter_factory;
LargeNodeSplitterFactory<NodeDef> large_node_splitter_factory;
std::vector<SizeSplitterFactory*> factories = {
&constant_splitter_factory, &large_node_splitter_factory};
auto ret = RepeatedFieldSplitter<FunctionDef, NodeDef>::Create(
message(), this, &fields, "node_def"s, &factories);
if (!ret.ok()) return ret.status();
auto splitter = ret.value();
return splitter.BuildChunksReturnSize();
}
return 0;
}
};
class FunctionDefSplitterFactory : public SizeSplitterFactory {
public:
using SizeSplitterFactory::SizeSplitterFactory;
absl::StatusOr<std::unique_ptr<SizeSplitter>> CreateSplitter(
tsl::protobuf::Message* message, ComposableSplitterBase* parent_splitter,
std::vector<FieldType>* fields_in_parent, int size) override {
FunctionDefSplitter* splitter =
new FunctionDefSplitter(message, parent_splitter, fields_in_parent);
return absl::WrapUnique(splitter);
}
};
}
absl::Status GraphDefSplitter::BuildChunks() {
TF_RETURN_IF_ERROR(SetMessageAsBaseChunk());
GraphDef* g = tsl::protobuf::DynamicCastToGenerated<GraphDef>(message());
uint64_t max_size = GetMaxSize();
size_t graph_size = GetInitialSize();
if (graph_size < max_size) return absl::OkStatus();
std::vector<FieldType> field_in_parent = {};
ConstantSplitterFactory constant_splitter_factory;
LargeNodeSplitterFactory<NodeDef> large_node_splitter_factory;
std::vector<SizeSplitterFactory*> factories = {&constant_splitter_factory,
&large_node_splitter_factory};
auto node_splitter_ret = RepeatedFieldSplitter<GraphDef, NodeDef>::Create(
g, this, &field_in_parent, "node"s, &factories);
if (!node_splitter_ret.ok()) return node_splitter_ret.status();
auto node_splitter = node_splitter_ret.value();
FunctionDefSplitterFactory function_splitter_factory;
std::vector<FieldType> library_field = {"library"s};
std::vector<SizeSplitterFactory*> fn_factories = {&function_splitter_factory};
auto library_splitter_ret =
RepeatedFieldSplitter<FunctionDefLibrary, FunctionDef>::Create(
g->mutable_library(), this, &library_field, "function"s,
&fn_factories);
if (!library_splitter_ret.ok()) return library_splitter_ret.status();
auto library_splitter = library_splitter_ret.value();
size_t library_size = g->library().ByteSizeLong();
library_splitter.SetInitialSize(library_size);
size_t approx_node_size = graph_size - library_size;
node_splitter.SetInitialSize(approx_node_size);
if (library_size > approx_node_size) {
TF_ASSIGN_OR_RETURN(int size_diff,
library_splitter.BuildChunksReturnSize());
library_size -= size_diff;
if (approx_node_size + library_size > max_size) {
TF_ASSIGN_OR_RETURN(int size_diff, node_splitter.BuildChunksReturnSize());
approx_node_size -= size_diff;
}
} else {
TF_ASSIGN_OR_RETURN(int size_diff, node_splitter.BuildChunksReturnSize());
approx_node_size -= size_diff;
if (approx_node_size + library_size > max_size) {
TF_ASSIGN_OR_RETURN(int size_diff,
library_splitter.BuildChunksReturnSize());
library_size -= size_diff;
}
}
if (g->ByteSizeLong() > max_size) {
LargeNodeSplitter<FunctionDefLibrary> entire_library_splitter(
g->mutable_library(), this, &library_field);
int index = 1;
entire_library_splitter.SetChunkIndex(&index);
TF_RETURN_IF_ERROR(entire_library_splitter.BuildChunks());
}
return absl::OkStatus();
}
} | #include "tensorflow/tools/proto_splitter/cc/graph_def_splitter.h"
#include <cstdint>
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/tools/proto_splitter/cc/max_size.h"
#include "tensorflow/tools/proto_splitter/cc/test_util.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/testdata/test_message.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tools::proto_splitter {
namespace {
using ::tensorflow::proto_splitter::ChunkedMessage;
TEST(GraphDefSplitterTest, TestLargeConstant) {
GraphDef proto;
const std::string graph_def_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-large-constant.pb");
int64_t max_size = 500;
DebugSetMaxSize(max_size);
TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
graph_def_path, &proto));
EXPECT_GE(proto.ByteSizeLong(), GetMaxSize());
std::string large_constant_1, large_constant_2;
const std::variant<std::string, absl::Cord>& tensor_constant_1 =
proto.node(2).attr().at("value").tensor().tensor_content();
const std::variant<std::string, absl::Cord>& tensor_constant_2 =
proto.node(4).attr().at("value").tensor().tensor_content();
if (std::holds_alternative<std::string>(tensor_constant_1)) {
large_constant_1 = std::get<std::string>(tensor_constant_1);
} else {
absl::CopyCordToString(std::get<absl::Cord>(tensor_constant_1),
&large_constant_1);
}
if (std::holds_alternative<std::string>(tensor_constant_2)) {
large_constant_2 = std::get<std::string>(tensor_constant_2);
} else {
absl::CopyCordToString(std::get<absl::Cord>(tensor_constant_2),
&large_constant_2);
}
GraphDefSplitter splitter(&proto);
TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split());
ChunkedMessage* chunked_message = x.chunked_message;
ASSERT_NE(chunked_message, nullptr);
EXPECT_THAT(*chunked_message,
EqualsProto(R"pb(chunk_index: 0
chunked_fields {
field_tag { field: 1 }
field_tag { index: 2 }
field_tag { field: 5 }
field_tag { map_key { s: "value" } }
field_tag { field: 8 }
field_tag { field: 4 }
message { chunk_index: 1 }
}
chunked_fields {
field_tag { field: 1 }
field_tag { index: 4 }
field_tag { field: 5 }
field_tag { map_key { s: "value" } }
field_tag { field: 8 }
field_tag { field: 4 }
message { chunk_index: 2 }
})pb"));
std::vector<MessageBytes>* chunks = x.chunks;
ASSERT_NE(chunks, nullptr);
EXPECT_CHUNK_SIZES(chunks, max_size);
EXPECT_THAT((*chunks)[1],
::testing::VariantWith<std::string>(large_constant_1));
EXPECT_THAT((*chunks)[2],
::testing::VariantWith<std::string>(large_constant_2));
}
TEST(GraphDefSplitterTest, TestLargeNodes) {
GraphDef proto;
const std::string graph_def_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-large-nodes.pb");
int64_t max_size = 200;
DebugSetMaxSize(max_size);
TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
graph_def_path, &proto));
EXPECT_GE(proto.ByteSize(), GetMaxSize());
NodeDef node_1 = proto.node(1);
NodeDef node_2 = proto.node(2);
NodeDef node_3 = proto.node(3);
NodeDef node_5 = proto.node(5);
GraphDefSplitter splitter(&proto);
TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split());
ChunkedMessage* chunked_message = x.chunked_message;
ASSERT_NE(chunked_message, nullptr);
EXPECT_THAT(*chunked_message, EqualsProto(R"pb(chunk_index: 0
chunked_fields {
field_tag { field: 1 }
field_tag { index: 1 }
message { chunk_index: 1 }
}
chunked_fields {
field_tag { field: 1 }
field_tag { index: 2 }
message { chunk_index: 2 }
}
chunked_fields {
field_tag { field: 1 }
field_tag { index: 3 }
message { chunk_index: 3 }
}
chunked_fields {
field_tag { field: 1 }
field_tag { index: 5 }
message { chunk_index: 4 }
})pb"));
std::vector<MessageBytes>* chunks = x.chunks;
ASSERT_NE(chunks, nullptr);
EXPECT_CHUNK_SIZES(chunks, max_size);
EXPECT_TRUE(std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>(
(*chunks)[1]));
EXPECT_TRUE(std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>(
(*chunks)[2]));
EXPECT_TRUE(std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>(
(*chunks)[3]));
EXPECT_TRUE(std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>(
(*chunks)[4]));
EXPECT_THAT(
*std::get<std::shared_ptr<tsl::protobuf::Message>>((*chunks)[1]).get(),
EqualsProto(node_1));
EXPECT_THAT(
*std::get<std::shared_ptr<tsl::protobuf::Message>>((*chunks)[2]).get(),
EqualsProto(node_2));
EXPECT_THAT(
*std::get<std::shared_ptr<tsl::protobuf::Message>>((*chunks)[3]).get(),
EqualsProto(node_3));
EXPECT_THAT(
*std::get<std::shared_ptr<tsl::protobuf::Message>>((*chunks)[4]).get(),
EqualsProto(node_5));
}
TEST(GraphDefSplitterTest, TestLotsNodes) {
GraphDef proto;
const std::string graph_def_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-lots-nodes.pb");
int64_t max_size = 96 * 5;
DebugSetMaxSize(max_size);
TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
graph_def_path, &proto));
EXPECT_GE(proto.ByteSize(), GetMaxSize());
int expected_node_size = proto.node_size();
GraphDefSplitter splitter(&proto);
TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split());
ChunkedMessage* chunked_message = x.chunked_message;
ASSERT_NE(chunked_message, nullptr);
EXPECT_THAT(
*chunked_message,
EqualsProto(R"pb(chunk_index: 0
chunked_fields { message { chunk_index: 1 } }
chunked_fields { message { chunk_index: 2 } }
chunked_fields { message { chunk_index: 3 } }
chunked_fields { message { chunk_index: 4 } })pb"));
std::vector<MessageBytes>* chunks = x.chunks;
ASSERT_NE(chunks, nullptr);
EXPECT_CHUNK_SIZES(chunks, max_size);
int actual_node_size = 0;
for (MessageBytes& chunk : *chunks) {
GraphDef* message = nullptr;
if (std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>(
chunk)) {
message = tsl::protobuf::DynamicCastToGenerated<GraphDef>(
std::get<std::shared_ptr<tsl::protobuf::Message>>(chunk).get());
} else if (std::holds_alternative<tsl::protobuf::Message*>(chunk)) {
message = tsl::protobuf::DynamicCastToGenerated<GraphDef>(
std::get<tsl::protobuf::Message*>(chunk));
} else {
EXPECT_FALSE(std::holds_alternative<std::string>(chunk));
}
actual_node_size += message->node_size();
}
EXPECT_EQ(actual_node_size, expected_node_size);
}
TEST(GraphDefSplitterTest, TestFunctionLotsOfNodes) {
GraphDef proto;
const std::string graph_def_path = io::JoinPath(
testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata",
"function-lots-of-nodes.pb");
int64_t max_size = 500;
DebugSetMaxSize(max_size);
TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
graph_def_path, &proto));
EXPECT_GE(proto.ByteSize(), GetMaxSize());
GraphDefSplitter splitter(&proto);
TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split());
std::vector<MessageBytes>* chunks = x.chunks;
ASSERT_NE(chunks, nullptr);
EXPECT_CHUNK_SIZES(chunks, max_size);
}
TEST(GraphDefSplitterTest, TestFunctionLargeNodes) {
GraphDef proto;
const std::string graph_def_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "function-large-nodes.pb");
int64_t max_size = 200;
DebugSetMaxSize(max_size);
TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
graph_def_path, &proto));
EXPECT_GE(proto.ByteSize(), GetMaxSize());
GraphDefSplitter splitter(&proto);
TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split());
std::vector<MessageBytes>* chunks = x.chunks;
ASSERT_NE(chunks, nullptr);
EXPECT_CHUNK_SIZES(chunks, max_size);
}
TEST(GraphDefSplitterTest, TestGraphAndFunction) {
GraphDef proto;
const std::string graph_def_path = io::JoinPath(
testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata",
"graph-def-and-function.pb");
int64_t max_size = 200;
DebugSetMaxSize(max_size);
TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
graph_def_path, &proto));
EXPECT_GE(proto.ByteSize(), GetMaxSize());
GraphDefSplitter splitter(&proto);
TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split());
std::vector<MessageBytes>* chunks = x.chunks;
ASSERT_NE(chunks, nullptr);
EXPECT_CHUNK_SIZES(chunks, max_size);
TF_ASSERT_OK(splitter.Write("/tmp/hoi"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_splitter/cc/graph_def_splitter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_splitter/cc/graph_def_splitter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4c23af1c-2ff4-49b9-9dfc-748298738328 | cpp | tensorflow/tensorflow | substr_op | tensorflow/core/kernels/substr_op.cc | tensorflow/core/kernels/substr_op_test.cc | #include <cstddef>
#include <cstdlib>
#include <string>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/string_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/bcast.h"
namespace tensorflow {
template <typename T>
class SubstrOp : public OpKernel {
public:
explicit SubstrOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
string unit;
OP_REQUIRES_OK(ctx, ctx->GetAttr("unit", &unit));
OP_REQUIRES_OK(ctx, ParseCharUnit(unit, &unit_));
}
void Compute(OpKernelContext* context) override {
const Tensor& input_tensor = context->input(0);
const Tensor& pos_tensor = context->input(1);
const Tensor& len_tensor = context->input(2);
const TensorShape& input_shape = input_tensor.shape();
const TensorShape& pos_shape = pos_tensor.shape();
const TensorShape& len_shape = len_tensor.shape();
OP_REQUIRES(context, (pos_shape == len_shape),
errors::InvalidArgument(
"pos and len should have the same shape, got: ",
pos_shape.DebugString(), " vs. ", len_shape.DebugString()));
bool is_scalar = TensorShapeUtils::IsScalar(pos_shape);
if (is_scalar || input_shape == pos_shape) {
auto input = input_tensor.flat<tstring>();
Tensor* output_tensor = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output("output", input_tensor.shape(),
&output_tensor));
auto output = output_tensor->flat<tstring>();
if (is_scalar) {
const T pos =
tensorflow::internal::SubtleMustCopy(pos_tensor.scalar<T>()());
const T len =
tensorflow::internal::SubtleMustCopy(len_tensor.scalar<T>()());
for (size_t i = 0; i < input_tensor.NumElements(); ++i) {
StringPiece in(input(i));
T byte_pos = pos;
T byte_len = len;
switch (unit_) {
case CharUnit::UTF8_CHAR:
OP_REQUIRES(
context, UpdatePosAndLenForUtf8(in, &byte_pos, &byte_len),
errors::InvalidArgument("pos ", pos, " out of range for ",
"string at index ", i));
break;
case CharUnit::BYTE:
byte_pos = AdjustedPosIndex(byte_pos, in);
OP_REQUIRES(
context, FastBoundsCheck(byte_pos, in.size() + 1),
errors::InvalidArgument("pos ", pos, " out of range for ",
"string b'", in, "' at index ", i));
}
StringPiece sub_in = in.substr(byte_pos, byte_len);
output(i).assign(sub_in.data(), sub_in.size());
}
} else {
auto pos_flat = pos_tensor.flat<T>();
auto len_flat = len_tensor.flat<T>();
for (size_t i = 0; i < input_tensor.NumElements(); ++i) {
StringPiece in(input(i));
const T pos = tensorflow::internal::SubtleMustCopy(pos_flat(i));
const T len = tensorflow::internal::SubtleMustCopy(len_flat(i));
T byte_pos = pos;
T byte_len = len;
switch (unit_) {
case CharUnit::UTF8_CHAR:
OP_REQUIRES(
context, UpdatePosAndLenForUtf8(in, &byte_pos, &byte_len),
errors::InvalidArgument("pos ", pos, " out of range for ",
"string at index ", i));
break;
case CharUnit::BYTE:
byte_pos = AdjustedPosIndex(byte_pos, in);
OP_REQUIRES(
context, FastBoundsCheck(byte_pos, in.size() + 1),
errors::InvalidArgument("pos ", pos, " out of range for ",
"string b'", in, "' at index ", i));
}
StringPiece sub_in = in.substr(byte_pos, byte_len);
output(i).assign(sub_in.data(), sub_in.size());
}
}
} else {
BCast bcast(BCast::FromShape(input_shape), BCast::FromShape(pos_shape),
false);
OP_REQUIRES(context, bcast.IsValid(),
errors::InvalidArgument(
"Incompatible shapes: ", input_shape.DebugString(),
" vs. ", pos_shape.DebugString()));
TensorShape output_shape = BCast::ToShape(bcast.result_shape());
int ndims = output_shape.dims();
Tensor* output_tensor = nullptr;
OP_REQUIRES_OK(context, context->allocate_output("output", output_shape,
&output_tensor));
switch (ndims) {
case 1: {
auto input = input_tensor.shaped<tstring, 1>(bcast.x_reshape());
auto output = output_tensor->shaped<tstring, 1>(bcast.result_shape());
auto pos_shaped = pos_tensor.shaped<T, 1>(bcast.y_reshape());
auto len_shaped = len_tensor.shaped<T, 1>(bcast.y_reshape());
Tensor pos_buffer;
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<T>::v(),
output_shape, &pos_buffer));
typename TTypes<T, 1>::Tensor pos_bcast(
pos_buffer.shaped<T, 1>(bcast.result_shape()));
pos_bcast =
pos_shaped.broadcast(BCast::ToIndexArray<1>(bcast.y_bcast()));
Tensor len_buffer;
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<T>::v(),
output_shape, &len_buffer));
typename TTypes<T, 1>::Tensor len_bcast(
len_buffer.shaped<T, 1>(bcast.result_shape()));
len_bcast =
len_shaped.broadcast(BCast::ToIndexArray<1>(bcast.y_bcast()));
for (int i = 0; i < output_shape.dim_size(0); ++i) {
StringPiece in(input(input.dimension(0) > 1 ? i : 0));
const T pos = tensorflow::internal::SubtleMustCopy(pos_bcast(i));
const T len = tensorflow::internal::SubtleMustCopy(len_bcast(i));
T byte_pos = pos;
T byte_len = len;
switch (unit_) {
case CharUnit::UTF8_CHAR:
OP_REQUIRES(
context, UpdatePosAndLenForUtf8(in, &byte_pos, &byte_len),
errors::InvalidArgument("pos ", pos, " out of range for ",
"string at index ", i));
break;
case CharUnit::BYTE:
byte_pos = AdjustedPosIndex(byte_pos, in);
OP_REQUIRES(
context, FastBoundsCheck(byte_pos, in.size() + 1),
errors::InvalidArgument("pos ", pos, " out of range for ",
"string b'", in, "' at index ", i));
}
StringPiece sub_in = in.substr(byte_pos, byte_len);
output(i).assign(sub_in.data(), sub_in.size());
}
break;
}
case 2: {
auto input = input_tensor.shaped<tstring, 2>(bcast.x_reshape());
auto output = output_tensor->shaped<tstring, 2>(bcast.result_shape());
auto pos_shaped = pos_tensor.shaped<T, 2>(bcast.y_reshape());
auto len_shaped = len_tensor.shaped<T, 2>(bcast.y_reshape());
Tensor pos_buffer;
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<T>::v(),
output_shape, &pos_buffer));
typename TTypes<T, 2>::Tensor pos_bcast(
pos_buffer.shaped<T, 2>(bcast.result_shape()));
pos_bcast =
pos_shaped.broadcast(BCast::ToIndexArray<2>(bcast.y_bcast()));
Tensor len_buffer;
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<T>::v(),
output_shape, &len_buffer));
typename TTypes<T, 2>::Tensor len_bcast(
len_buffer.shaped<T, 2>(bcast.result_shape()));
len_bcast =
len_shaped.broadcast(BCast::ToIndexArray<2>(bcast.y_bcast()));
for (int i = 0; i < output_shape.dim_size(0); ++i) {
for (int j = 0; j < output_shape.dim_size(1); ++j) {
StringPiece in(input(input.dimension(0) > 1 ? i : 0,
input.dimension(1) > 1 ? j : 0));
const T pos =
tensorflow::internal::SubtleMustCopy(pos_bcast(i, j));
const T len =
tensorflow::internal::SubtleMustCopy(len_bcast(i, j));
T byte_pos = pos;
T byte_len = len;
switch (unit_) {
case CharUnit::UTF8_CHAR:
OP_REQUIRES(
context, UpdatePosAndLenForUtf8(in, &byte_pos, &byte_len),
errors::InvalidArgument("pos ", pos, " out of range for ",
"string at index ", i));
break;
case CharUnit::BYTE:
byte_pos = AdjustedPosIndex(byte_pos, in);
OP_REQUIRES(
context, FastBoundsCheck(byte_pos, in.size() + 1),
errors::InvalidArgument("pos ", pos, " out of range for ",
"string b'", in, "' at index (",
i, ", ", j, ")"));
}
StringPiece sub_in = in.substr(byte_pos, byte_len);
output(i, j).assign(sub_in.data(), sub_in.size());
}
}
break;
}
default: {
context->SetStatus(errors::Unimplemented(
"Substr broadcast not implemented for ", ndims, " dimensions"));
}
}
}
}
private:
static inline T AdjustedPosIndex(const T pos_requested, const StringPiece s) {
if (pos_requested < 0) {
return s.size() + pos_requested;
}
return pos_requested;
}
static inline bool UpdatePosAndLenForUtf8(const StringPiece in, T* pos,
T* len) {
if (*pos >= 0) {
return UpdatePositivePosAndLenForUtf8(in, *pos, *len, pos, len);
} else {
return UpdateNegativePosAndLenForUtf8(in, *pos, *len, pos, len);
}
}
static bool UpdatePositivePosAndLenForUtf8(const StringPiece in, const T pos,
const T len, T* char_pos,
T* char_len) {
*char_pos = 0;
if (!ForwardNUTF8CharPositions(in, pos, char_pos)) {
return false;
}
*char_len = *char_pos;
ForwardNUTF8CharPositions(in, len, char_len);
*char_len = *char_len - *char_pos;
return true;
}
static bool UpdateNegativePosAndLenForUtf8(const StringPiece in, const T pos,
const T len, T* char_pos,
T* char_len) {
*char_len = in.size();
T utf8_chars_to_skip = -pos - len;
if (utf8_chars_to_skip < 0) {
utf8_chars_to_skip = 0;
}
if (!BackNUTF8CharPositions(in, utf8_chars_to_skip, char_len)) {
return false;
}
*char_pos = *char_len;
if (!BackNUTF8CharPositions(in, -pos - utf8_chars_to_skip, char_pos)) {
return false;
}
*char_len = *char_len - *char_pos;
return true;
}
CharUnit unit_ = CharUnit::BYTE;
};
#define REGISTER_SUBSTR(type) \
REGISTER_KERNEL_BUILDER( \
Name("Substr").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
SubstrOp<type>);
REGISTER_SUBSTR(int32);
REGISTER_SUBSTR(int64_t);
} | #include <string>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
const char* ascii_lines[] = {
"**TensorFlow** is an open source software library for numerical "
"computation using data flow graphs.",
"The graph nodes represent mathematical operations, while the graph edges "
"represent the multidimensional data arrays (tensors) that flow between "
"them.",
"This flexible architecture enables you to deploy computation to one or "
"more CPUs or GPUs in a desktop, server, or mobile device without "
"rewriting code.",
"TensorFlow also includes "
"[TensorBoard](https:
"summaries_and_tensorboard), a data visualization toolkit.",
"TensorFlow was originally developed by researchers and engineers working "
"on the Google Brain team within Google's Machine Intelligence Research "
"organization for the purposes of conducting machine learning and deep "
"neural networks research.",
"The system is general enough to be applicable in a wide variety of other "
"domains, as well.",
"TensorFlow provides stable Python API and C APIs as well as without API "
"backwards compatibility guarantee like C++, Go, Java, JavaScript and "
"Swift."};
const char* unicode_lines[] = {
"TensorFlow\xe6\x98\xaf\xe4\xb8\x80\xe4\xb8\xaa\xe4\xbd\xbf\xe7\x94\xa8\xe6"
"\x95\xb0\xe6\x8d\xae\xe6\xb5\x81\xe5\x9b\xbe\xe8\xbf\x9b\xe8\xa1\x8c\xe6"
"\x95\xb0\xe5\x80\xbc\xe8\xae\xa1\xe7\xae\x97\xe7\x9a\x84\xe5\xbc\x80\xe6"
"\xba\x90\xe8\xbd\xaf\xe4\xbb\xb6\xe5\xba\x93\xe3\x80\x82",
"\xe5\x9b\xbe\xe5\xbd\xa2\xe8\x8a\x82\xe7\x82\xb9\xe8\xa1\xa8\xe7\xa4\xba"
"\xe6\x95\xb0\xe5\xad\xa6\xe8\xbf\x90\xe7\xae\x97\xef\xbc\x8c\xe8\x80\x8c"
"\xe5\x9b\xbe\xe5\xbd\xa2\xe8\xbe\xb9\xe7\xbc\x98\xe8\xa1\xa8\xe7\xa4\xba"
"\xe5\x9c\xa8\xe5\xae\x83\xe4\xbb\xac\xe4\xb9\x8b\xe9\x97\xb4\xe6\xb5\x81"
"\xe5\x8a\xa8\xe7\x9a\x84\xe5\xa4\x9a\xe7\xbb\xb4\xe6\x95\xb0\xe6\x8d\xae"
"\xe9\x98\xb5\xe5\x88\x97\xef\xbc\x88\xe5\xbc\xa0\xe9\x87\x8f\xef\xbc\x89"
"\xe3\x80\x82",
"\xe8\xbf\x99\xe7\xa7\x8d\xe7\x81\xb5\xe6\xb4\xbb\xe7\x9a\x84\xe4\xbd\x93"
"\xe7\xb3\xbb\xe7\xbb\x93\xe6\x9e\x84\xe4\xbd\xbf\xe6\x82\xa8\xe5\x8f\xaf"
"\xe4\xbb\xa5\xe5\xb0\x86\xe8\xae\xa1\xe7\xae\x97\xe9\x83\xa8\xe7\xbd\xb2"
"\xe5\x88\xb0\xe6\xa1\x8c\xe9\x9d\xa2\xef\xbc\x8c\xe6\x9c\x8d\xe5\x8a\xa1"
"\xe5\x99\xa8\xe6\x88\x96\xe7\xa7\xbb\xe5\x8a\xa8\xe8\xae\xbe\xe5\xa4\x87"
"\xe4\xb8\xad\xe7\x9a\x84\xe4\xb8\x80\xe4\xb8\xaa\xe6\x88\x96\xe5\xa4\x9a"
"\xe4\xb8\xaa CPU\xe6\x88\x96GPU\xef\xbc\x8c\xe8\x80\x8c\xe6\x97\xa0\xe9"
"\x9c\x80\xe9\x87\x8d\xe5\x86\x99\xe4\xbb\xa3\xe7\xa0\x81\xe3\x80\x82",
"TensorFlow\xe8\xbf\x98\xe5\x8c\x85\xe6\x8b\xac[TensorBoard]\xef\xbc\x88"
"https:
"\xbc\x8c\xe8\xbf\x99\xe6\x98\xaf\xe4\xb8\x80\xe4\xb8\xaa\xe6\x95\xb0\xe6"
"\x8d\xae\xe5\x8f\xaf\xe8\xa7\x86\xe5\x8c\x96\xe5\xb7\xa5\xe5\x85\xb7\xe5"
"\x8c\x85\xe3\x80\x82",
"TensorFlow\xe6\x9c\x80\xe5\x88\x9d\xe6\x98\xaf\xe7\x94\xb1\xe7\xa0\x94\xe7"
"\xa9\xb6\xe4\xba\xba\xe5\x91\x98\xe5\x92\x8c\xe5\xb7\xa5\xe7\xa8\x8b\xe5"
"\xb8\x88\xe5\x9c\xa8Google\xe6\x9c\xba\xe5\x99\xa8\xe6\x99\xba\xe8\x83\xbd"
"\xe7\xa0\x94\xe7\xa9\xb6\xe7\xbb\x84\xe7\xbb\x87\xe7\x9a\x84Google Brain"
"\xe5\x9b\xa2\xe9\x98\x9f\xe5\xbc\x80\xe5\x8f\x91\xe7\x9a\x84\xef\xbc\x8c"
"\xe7\x9b\xae\xe7\x9a\x84\xe6\x98\xaf\xe8\xbf\x9b\xe8\xa1\x8c\xe6\x9c\xba"
"\xe5\x99\xa8\xe5\xad\xa6\xe4\xb9\xa0\xe5\x92\x8c\xe6\xb7\xb1\xe5\xba\xa6"
"\xe7\xa5\x9e\xe7\xbb\x8f\xe7\xbd\x91\xe7\xbb\x9c\xe7\xa0\x94\xe7\xa9\xb6"
"\xe3\x80\x82",
"\xe8\xaf\xa5\xe7\xb3\xbb\xe7\xbb\x9f\xe8\xb6\xb3\xe4\xbb\xa5\xe9\x80\x82"
"\xe7\x94\xa8\xe4\xba\x8e\xe5\x90\x84\xe7\xa7\x8d\xe5\x85\xb6\xe4\xbb\x96"
"\xe9\xa2\x86\xe5\x9f\x9f\xe4\xb9\x9f\xe6\x98\xaf\xe5\xa6\x82\xe6\xad\xa4"
"\xe3\x80\x82",
"TensorFlow\xe6\x8f\x90\xe4\xbe\x9b\xe7\xa8\xb3\xe5\xae\x9a\xe7\x9a\x84"
"Python API\xe5\x92\x8c C API\xef\xbc\x8c\xe4\xbb\xa5\xe5\x8f\x8a\xe6\xb2"
"\xa1\xe6\x9c\x89 API\xe5\x90\x91\xe5\x90\x8e\xe5\x85\xbc\xe5\xae\xb9\xe6"
"\x80\xa7\xe4\xbf\x9d\xe8\xaf\x81\xef\xbc\x8c\xe5\xa6\x82 C ++\xef\xbc\x8c"
"Go\xef\xbc\x8cJava\xef\xbc\x8cJavaScript\xe5\x92\x8cSwift\xe3\x80\x82",
};
const char* const kByteUnit = "BYTE";
const char* const kUTF8Unit = "UTF8_CHAR";
Tensor GetTestTensor(int batch) {
const int sz = TF_ARRAYSIZE(ascii_lines);
Tensor t(DT_STRING, {batch});
auto s = t.flat<tstring>();
for (int i = 0; i < batch; ++i) {
s(i) = ascii_lines[i % sz];
}
return t;
}
Tensor GetTestUTF8Tensor(int batch) {
const int sz = TF_ARRAYSIZE(unicode_lines);
Tensor t(DT_STRING, {batch});
auto s = t.flat<tstring>();
for (int i = 0; i < batch; ++i) {
s(i) = unicode_lines[i % sz];
}
return t;
}
Graph* SetupSubstrGraph(const Tensor& input, const int32_t pos,
const int32_t len, const char* const unit) {
Graph* g = new Graph(OpRegistry::Global());
Tensor position(DT_INT32, TensorShape({}));
position.flat<int32>().setConstant(pos);
Tensor length(DT_INT32, TensorShape({}));
length.flat<int32>().setConstant(len);
TF_CHECK_OK(NodeBuilder("substr_op", "Substr")
.Input(test::graph::Constant(g, input))
.Input(test::graph::Constant(g, position))
.Input(test::graph::Constant(g, length))
.Attr("unit", unit)
.Finalize(g, nullptr ));
return g;
}
static void BM_SubstrByte(::testing::benchmark::State& state) {
const int batch_size = state.range(0);
Tensor input = GetTestTensor(batch_size);
Graph* g = SetupSubstrGraph(input, 3, 30, kByteUnit);
test::Benchmark("cpu", g, false).Run(state);
state.SetItemsProcessed(state.iterations());
}
static void BM_SubstrUTF8(::testing::benchmark::State& state) {
const int batch_size = state.range(0);
Tensor input = GetTestUTF8Tensor(batch_size);
Graph* g = SetupSubstrGraph(input, 3, 30, kUTF8Unit);
test::Benchmark("cpu", g, false).Run(state);
state.SetItemsProcessed(state.iterations());
}
BENCHMARK(BM_SubstrByte)
->UseRealTime()
->Arg(1)
->Arg(8)
->Arg(16)
->Arg(32)
->Arg(64)
->Arg(128)
->Arg(256);
BENCHMARK(BM_SubstrUTF8)
->UseRealTime()
->Arg(1)
->Arg(8)
->Arg(16)
->Arg(32)
->Arg(64)
->Arg(128)
->Arg(256);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/substr_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/substr_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
662c17fd-2323-497a-ba1c-0d24992c1432 | cpp | tensorflow/tensorflow | quantization_ops | tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops.cc | tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops_test.cc | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops.h"
#include "absl/strings/str_format.h"
#include "tensorflow/cc/ops
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/weights.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
bool IsQuantizeAndDequantizeOp(const Node* node) {
return absl::c_find(kQuantizationOpNames, node->def().op()) !=
kQuantizationOpNames.end();
}
namespace {
template <typename T>
QuantizationScales<T, 1> ComputeQuantizationRange(bool signed_input,
int num_bits,
bool narrow_range,
T* min_range, T* max_range) {
const int64_t min_quantized =
signed_input ? narrow_range ? -(1ULL << (num_bits - 1)) + 1
: -(1ULL << (num_bits - 1))
: 0;
const int64_t max_quantized =
signed_input ? (1ULL << (num_bits - 1)) - 1 : (1ULL << num_bits) - 1;
const T scale_from_min_side = (min_quantized * *min_range > 0)
? min_quantized / *min_range
: std::numeric_limits<T>::max();
const T scale_from_max_side = (max_quantized * *max_range > 0)
? max_quantized / *max_range
: std::numeric_limits<T>::max();
QuantizationScales<T, 1> scales;
if (scale_from_min_side < scale_from_max_side) {
scales.quantize_scale[0] = scale_from_min_side;
scales.dequantize_scale[0] = *min_range / min_quantized;
*max_range = max_quantized * scales.dequantize_scale[0];
} else {
scales.quantize_scale[0] = scale_from_max_side;
scales.dequantize_scale[0] = *max_range / max_quantized;
*min_range = min_quantized * scales.dequantize_scale[0];
}
return scales;
}
StatusOr<nvinfer1::ITensor*> ExlicitQDQInputToTensor(
TRTNetworkBuilder* builder, const OpConverterParams* params,
const TRT_TensorOrWeights& input) {
if (input.is_tensor()) {
return input.tensor()->trt_tensor();
}
if (!IS_TRT_VERSION_GE(8, 0, 0, 0) && input.weights().count() > 1) {
LOG(WARNING) << absl::StrCat(
"QDQ per-channel for weights not "
"implemented, assuming uniform scaling");
}
TRT_ShapedWeights trt_weights = input.weights();
StatusOr<nvinfer1::IConstantLayer*> weights_const =
builder->WeightsToConstant(trt_weights.GetTrtWeights(),
trt_weights.Shape());
TRT_ENSURE_PTR_OK(weights_const);
params->converter->SetLayerName(*weights_const, params->node_def, "const");
nvinfer1::ITensor* qdq_input = (*weights_const)->getOutput(0);
std::string name = absl::StrCat((*weights_const)->getName(), "_output");
qdq_input->setName(name.c_str());
return qdq_input;
}
}
template <typename T>
struct QDQOpSpec {};
template <>
struct QDQOpSpec<ops::QuantizeAndDequantizeV2> {
static constexpr std::array<InputArgSpec, 3> InputSpec() {
return {
InputArgSpec::Create("input", TrtInputArg::kBoth),
InputArgSpec::Create("input_min", TrtInputArg::kWeight),
InputArgSpec::Create("input_max", TrtInputArg::kWeight),
};
}
struct Attrs {
float min_range;
float max_range;
bool narrow_range;
std::string round_mode;
UniformQuantizationScales scales;
};
static Status ValidateQDQForExplicitPrecision(
const std::vector<TRT_TensorOrWeights>& inputs, const NodeDef& node_def,
Attrs* args) {
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "round_mode", &args->round_mode));
if (args->round_mode != "HALF_TO_EVEN") {
LOG(WARNING) << node_def.op() << ": " << node_def.name()
<< " has round_mode=" << args->round_mode
<< ", but for TensorRT conversion, "
"round_mode=HALF_TO_EVEN is recommended.";
}
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "narrow_range", &args->narrow_range));
if (args->narrow_range) {
LOG(WARNING) << node_def.op() << ": " << node_def.name()
<< " has narrow_range=true, but for TensorRT conversion, "
"narrow_range=false is recommended.";
}
args->min_range = inputs.at(1).weights().template GetPointer<float>()[0];
args->max_range = inputs.at(2).weights().template GetPointer<float>()[0];
const int num_bits = 8;
args->scales = ComputeQuantizationRange<float>(
true, num_bits, args->narrow_range, &args->min_range,
&args->max_range);
TRT_ENSURE(args->scales.dequantize_scale[0] != 0);
TRT_ENSURE(args->scales.quantize_scale[0] != 0);
return OkStatus();
}
static Status ConvertExplicit(const OpConverterParams* params,
const Attrs& args) {
const auto& node_def = params->node_def;
StatusOr<TRTNetworkBuilder> builder = TRTNetworkBuilder::Create(
params->converter->network(), params->weight_store);
StatusOr<nvinfer1::ITensor*> qdq_input =
ExlicitQDQInputToTensor(&*builder, params, params->inputs.at(0));
TRT_ENSURE_PTR_OK(qdq_input);
const int required_dims = params->use_implicit_batch ? 3 : 4;
const nvinfer1::Dims idims = (*qdq_input)->getDimensions();
nvinfer1::Dims intermediate_dims = idims;
TRT_ENSURE(idims.nbDims > 0);
if (idims.nbDims < required_dims) {
const int nb_extra_dims = required_dims - idims.nbDims;
intermediate_dims.nbDims = required_dims;
std::vector<int> ones(nb_extra_dims, 1);
TRT_ENSURE(ones.size() == nb_extra_dims && nb_extra_dims > 0);
if (!params->use_implicit_batch) {
intermediate_dims.d[0] = idims.d[0];
std::copy(ones.begin(), ones.end(), intermediate_dims.d + 1);
std::copy_n(idims.d + 1, idims.nbDims - 1,
intermediate_dims.d + ones.size() + 1);
} else {
std::copy(ones.begin(), ones.end(), intermediate_dims.d);
std::copy_n(idims.d, idims.nbDims, intermediate_dims.d + ones.size());
}
LOG(WARNING) << absl::StrCat(
node_def.name(), ":", node_def.op(), ": tensor ",
(*qdq_input)->getName(), " has shape ", DebugString(idims),
" but TRT scale layer requires at least 3 dims excluding batch dim, "
"trying to recover by inserting 1's to create shape ",
DebugString(intermediate_dims));
StatusOr<nvinfer1::IShuffleLayer*> reshape =
builder->Reshape(*qdq_input, intermediate_dims);
TRT_ENSURE_PTR_OK(reshape);
*qdq_input = (*reshape)->getOutput(0);
}
VLOG(1) << "[ExplicitPrecision]" << node_def.op() << ": " << node_def.name()
<< " computed scales: " << args.scales << " from min/max ranges "
<< args.min_range << "/" << args.max_range;
StatusOr<nvinfer1::ILayer*> qdq =
builder->UniformQuantizeDequantizeExplicit(
*qdq_input, args.scales.quantize_scale[0],
args.scales.dequantize_scale[0], node_def.name());
TRT_ENSURE_PTR_OK(qdq);
ITensorProxyPtr final_output = (*qdq)->getOutput(0);
if (idims.nbDims != intermediate_dims.nbDims) {
StatusOr<nvinfer1::IShuffleLayer*> undo_reshape =
builder->Reshape(*qdq_input, idims);
TRT_ENSURE_PTR_OK(undo_reshape);
final_output = (*undo_reshape)->getOutput(0);
}
params->outputs->push_back(final_output);
return OkStatus();
}
};
template <>
struct QDQOpSpec<ops::QuantizeAndDequantizeV3> {
static constexpr std::array<InputArgSpec, 4> InputSpec() {
return {
InputArgSpec::Create("input", TrtInputArg::kBoth),
InputArgSpec::Create("min", TrtInputArg::kWeight),
InputArgSpec::Create("max", TrtInputArg::kWeight),
InputArgSpec::Create("num_bits", TrtInputArg::kWeight),
};
}
using Attrs = QDQOpSpec<ops::QuantizeAndDequantizeV2>::Attrs;
static Status ValidateQDQForExplicitPrecision(
const std::vector<TRT_TensorOrWeights>& inputs, const NodeDef& node_def,
Attrs* args) {
return QDQOpSpec<
ops::QuantizeAndDequantizeV2>::ValidateQDQForExplicitPrecision(inputs,
node_def,
args);
}
static Status ConvertExplicit(const OpConverterParams* params,
const Attrs& args) {
return QDQOpSpec<ops::QuantizeAndDequantizeV2>::ConvertExplicit(params,
args);
}
};
template <>
struct QDQOpSpec<ops::FakeQuantWithMinMaxVars> {
static constexpr std::array<InputArgSpec, 3> InputSpec() {
return {
InputArgSpec::Create("input", TrtInputArg::kBoth),
InputArgSpec::Create("min", TrtInputArg::kWeight),
InputArgSpec::Create("max", TrtInputArg::kWeight),
};
}
struct Attrs {
int num_bits;
bool narrow_range;
};
static Status ValidateQDQForExplicitPrecision(
const std::vector<TRT_TensorOrWeights>& inputs, const NodeDef& node_def,
Attrs* args) {
return errors::Unimplemented("");
}
static Status ConvertExplicit(const OpConverterParams* params,
const Attrs& args) {
return errors::Unimplemented("");
}
};
template <>
struct QDQOpSpec<ops::FakeQuantWithMinMaxArgs> {
static constexpr std::array<InputArgSpec, 1> InputSpec() {
return {
InputArgSpec::Create("input", TrtInputArg::kBoth),
};
}
struct Attrs {
float min;
float max;
int num_bits;
bool narrow_range;
};
static Status ValidateQDQForExplicitPrecision(
const std::vector<TRT_TensorOrWeights>& inputs, const NodeDef& node_def,
Attrs* args) {
return errors::Unimplemented("");
}
static Status ConvertExplicit(const OpConverterParams* params,
const Attrs& args) {
return errors::Unimplemented("");
}
};
Status ConvertDynamicRangeMode(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
float min_range = 0.0f;
float max_range = 0.0f;
const auto& op_name = node_def.op();
if (op_name == "FakeQuantWithMinMaxArgs") {
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "min", &min_range));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "max", &max_range));
} else if (op_name == "FakeQuantWithMinMaxVars" ||
op_name == "QuantizeAndDequantizeV2" ||
op_name == "QuantizeAndDequantizeV3") {
auto get_weights_value = [&inputs](int index) {
const auto* raw_weights = inputs.at(index).weights().GetPointer<float>();
return raw_weights[0];
};
min_range = get_weights_value(1);
max_range = get_weights_value(2);
} else {
return errors::InvalidArgument("Unknown quantization op ", op_name, ", at ",
node_def.name());
}
if (params->validation_only) {
return OkStatus();
}
ITensorProxyPtr input0 = inputs.at(0).tensor();
params->converter->ProvideQuantizationRange(&input0, min_range, max_range);
params->outputs->push_back(inputs.at(0));
return OkStatus();
}
template <typename TFOpType>
class ConvertQDQ : public OpConverterBase<ConvertQDQ<TFOpType>> {
public:
explicit ConvertQDQ(const OpConverterParams* params)
: OpConverterBase<ConvertQDQ<TFOpType>>(params) {}
static constexpr auto InputSpec() { return QDQOpSpec<TFOpType>::InputSpec(); }
static constexpr const char* NodeDefDataTypeAttributeName() { return ""; }
Status ValidateDynamicRangeINT8Mode() {
if (this->params_->validation_only) {
return ConvertDynamicRangeMode(this->params_);
}
return OkStatus();
}
Status Validate() {
if (!this->params_->use_explicit_precision) {
return ValidateDynamicRangeINT8Mode();
}
return OpSpec::ValidateQDQForExplicitPrecision(
this->params_->inputs, this->params_->node_def, &attrs_);
}
Status Convert() {
if (!this->params_->use_explicit_precision) {
return ConvertDynamicRangeMode(this->params_);
}
return OpSpec::ConvertExplicit(this->params_, attrs_);
}
using OpSpec = QDQOpSpec<TFOpType>;
using OpSpecAttrs = typename QDQOpSpec<TFOpType>::Attrs;
OpSpecAttrs attrs_;
};
REGISTER_DEFAULT_TRT_OP_CONVERTER(
MakeConverterFunction<ConvertQDQ<ops::QuantizeAndDequantizeV2>>(),
"QuantizeAndDequantizeV2");
REGISTER_DEFAULT_TRT_OP_CONVERTER(
MakeConverterFunction<ConvertQDQ<ops::QuantizeAndDequantizeV3>>(),
"QuantizeAndDequantizeV3");
REGISTER_DEFAULT_TRT_OP_CONVERTER(
MakeConverterFunction<ConvertQDQ<ops::FakeQuantWithMinMaxVars>>(),
"FakeQuantWithMinMaxVars");
REGISTER_DEFAULT_TRT_OP_CONVERTER(
MakeConverterFunction<ConvertQDQ<ops::FakeQuantWithMinMaxArgs>>(),
"FakeQuantWithMinMaxArgs");
}
}
}
#endif | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/linalg_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/compiler/jit/shape_inference.h"
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/trt_convert_api.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
namespace tensorflow {
namespace tensorrt {
namespace convert {
namespace ops = ::tensorflow::ops;
using ::tensorflow::testing::StatusIs;
namespace {
enum class ConvEpilogueType {
kNone,
kReLU,
kBatchNorm,
kReLUBatchnorm,
kBatchnormReLU
};
std::ostream& operator<<(std::ostream& os, ConvEpilogueType epilogue) {
switch (epilogue) {
case ConvEpilogueType::kNone:
return os << "None";
case ConvEpilogueType::kReLU:
return os << "ReLU only";
case ConvEpilogueType::kBatchNorm:
return os << "BatchNorm Only";
case ConvEpilogueType::kReLUBatchnorm:
return os << "ReLU+Batchnorm";
case ConvEpilogueType::kBatchnormReLU:
return os << "BatchNorm+ReLU";
}
}
std::string DebugString(ConvEpilogueType epilogue) {
std::stringstream ss;
ss << epilogue;
return ss.str();
}
ops::Placeholder AddInput(Scope scope, int input_idx,
const std::string data_format,
std::array<int, 3> size_chw = {1, 3, 3}) {
PartialTensorShape input_shape;
if (data_format == "NCHW") {
input_shape =
PartialTensorShape({1, size_chw[0], size_chw[1], size_chw[2]});
} else if (data_format == "NHWC") {
input_shape =
PartialTensorShape({1, size_chw[1], size_chw[2], size_chw[0]});
} else if (data_format == "NHW") {
input_shape = PartialTensorShape({1, size_chw[1], size_chw[2]});
} else {
LOG(FATAL) << "Unknown input shape type " << data_format;
}
auto input_attrs = ops::Placeholder::Attrs().Shape(input_shape);
return ops::Placeholder(scope.WithOpName(absl::StrCat("input_", input_idx)),
DT_FLOAT, input_attrs);
}
Output AddQDQV2(Scope scope, Input input) {
auto input_min =
ops::Const<float>(scope.WithOpName("in_min"), -1.0f, TensorShape{});
auto input_max =
ops::Const<float>(scope.WithOpName("in_max"), 1.0f, TensorShape{});
return ops::QuantizeAndDequantizeV2(scope.WithOpName("qdq"), input, input_min,
input_max);
}
Output AddOutput(Scope scope, Output input, int idx, bool add_qdq) {
Output out = input;
if (add_qdq) {
out = AddQDQV2(scope, input);
}
return ops::Identity(scope.WithOpName(StrCat("output_", idx)), out);
}
Output AddConv2D(Scope scope, Input input, int in_channels, int out_channels,
std::array<int, 2> filter_size = {1, 1},
std::array<int, 2> stride = {1, 1},
const std::string& data_format = "NCHW", bool with_bias = true,
ConvEpilogueType epilogue = ConvEpilogueType::kBatchnormReLU,
bool qdq_on_output = false) {
auto weights_const = ops::Const(
scope.WithOpName("weights"), 1.0f,
TensorShape({filter_size[0], filter_size[1], in_channels, out_channels}));
auto conv_input =
!qdq_on_output ? AddQDQV2(scope.WithOpName("qdq_input"), input) : input;
Output result = ops::Conv2D(
scope.WithOpName("conv2d"), conv_input, AddQDQV2(scope, weights_const),
{1, 1, 1, 1},
"SAME", ops::Conv2D::Attrs().DataFormat(data_format));
if (with_bias) {
auto bias_const = ops::Const(scope.WithOpName("bias_weights"), 1.0f,
TensorShape({
out_channels,
}));
result = ops::BiasAdd(scope.WithOpName("bias"), result, bias_const,
ops::BiasAdd::Attrs().DataFormat(data_format));
}
auto add_bn = [scope, data_format](Input input,
const int channels) -> Output {
TensorShape constant_shape = TensorShape({channels});
auto bn_scale =
ops::Const(scope.WithOpName("bn_scale"), 1.0f, constant_shape);
auto bn_offset =
ops::Const(scope.WithOpName("bn_offset"), 1.0f, constant_shape);
auto bn_mean =
ops::Const(scope.WithOpName("bn_mean"), 0.1f, TensorShape({channels}));
auto bn_var =
ops::Const(scope.WithOpName("bn_var"), 1.0f, TensorShape({channels}));
Input conv_bn_input = IS_TRT_VERSION_GE(8, 0, 1, 0)
? input
: AddQDQV2(scope.WithOpName("qdq_input"), input);
return ops::FusedBatchNormV3(
scope.WithOpName("bn"), conv_bn_input, bn_scale, bn_offset,
bn_mean, bn_var,
ops::FusedBatchNormV3::Attrs().IsTraining(false).DataFormat(
data_format))
.y;
};
switch (epilogue) {
case ConvEpilogueType::kBatchNorm: {
result = add_bn(result, out_channels);
break;
}
case ConvEpilogueType::kReLU: {
result = ops::Relu(scope.WithOpName("relu"), result);
break;
}
case ConvEpilogueType::kReLUBatchnorm: {
result = ops::Relu(scope.WithOpName("relu"), result);
result = add_bn(result, out_channels);
break;
}
case ConvEpilogueType::kBatchnormReLU: {
result = add_bn(result, out_channels);
result = ops::Relu(scope.WithOpName("relu"), result);
break;
}
case ConvEpilogueType::kNone:
break;
}
if (qdq_on_output) {
result = AddQDQV2(scope.WithOpName("qdq_out"), result);
}
return result;
}
ops::BatchMatMulV2 AddMatMul(Scope scope, const std::string& name,
Input input) {
auto input_qdq = AddQDQV2(scope, input);
auto weights_const =
ops::Const(scope.WithOpName(name + "_weights"),
{1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f},
TensorShape({3, 3}));
auto weights_qdq = AddQDQV2(scope.WithOpName("weights_qdq"), weights_const);
return ops::BatchMatMulV2(scope.WithOpName(name), input_qdq, weights_qdq);
}
}
struct QDQTestOptions {
bool conv_has_bias{true};
std::string data_format{"NCHW"};
bool qdq_on_output{false};
bool final_qdq{true};
ConvEpilogueType conv_epilogue;
TfTrtConversionParams conversion_params{};
};
std::ostream& operator<<(std::ostream& os, const QDQTestOptions opts) {
return os << absl::StrCat(
"QDQTestOptions(conv_has_bias=",
static_cast<int>(opts.conv_has_bias),
", qdq_on_output=", static_cast<int>(opts.qdq_on_output),
", data_format=", opts.data_format,
", conv_epilogue=", DebugString(opts.conv_epilogue),
", final_qdq=", opts.final_qdq, ")");
}
std::vector<QDQTestOptions> EnumerateQDQTestOptions() {
std::vector<QDQTestOptions> result;
for (const absl::string_view data_format : {"NCHW", "NHWC"}) {
for (auto use_bias : {true, false}) {
for (auto qdq_on_output : {false, true}) {
for (auto final_qdq : {true, false}) {
for (auto conv_epilogue :
{ConvEpilogueType::kReLU, ConvEpilogueType::kNone,
ConvEpilogueType::kBatchnormReLU}) {
if (data_format == "NHWC" &&
(conv_epilogue == ConvEpilogueType::kBatchnormReLU ||
conv_epilogue == ConvEpilogueType::kBatchNorm ||
conv_epilogue == ConvEpilogueType::kBatchnormReLU)) {
continue;
}
QDQTestOptions opts{};
opts.conv_has_bias = use_bias;
opts.data_format = data_format;
opts.qdq_on_output = qdq_on_output;
opts.final_qdq = final_qdq;
opts.conv_epilogue = conv_epilogue;
result.push_back(opts);
}
}
}
}
}
return result;
}
class QDQExplicitTest : public ::testing::Test,
public ::testing::WithParamInterface<QDQTestOptions> {
public:
static StatusOr<PartialTensorShape> GetShape(const std::string& name,
const GraphShapeInfo& shapes) {
TRT_ENSURE(shapes.find(name) != shapes.end());
TRT_ENSURE(shapes.at(name).size() == 1);
return shapes.at(name)[0].shape;
}
StatusOr<MetaGraphDef> GetModel(const GraphDef& graph_def,
const std::vector<const NodeDef*>& inputs,
const std::vector<const NodeDef*>& outputs,
const GraphShapeInfo& shapes) {
TRT_ENSURE(!inputs.empty());
TRT_ENSURE(!outputs.empty());
MetaGraphDef out;
out.mutable_graph_def()->CopyFrom(graph_def);
SignatureDef signature_def;
auto& mutable_inputs = *signature_def.mutable_inputs();
for (int i = 0; i < inputs.size(); i++) {
std::string input_name = inputs[i]->name();
auto& input = mutable_inputs[input_name];
input.set_name(input_name);
input.set_dtype(DT_FLOAT);
TRT_ENSURE(shapes.find(input_name) != shapes.end());
TRT_ENSURE(shapes.at(input_name).size() == 1);
PartialTensorShape input_shape = shapes.at(input_name)[0].shape;
input_shape.AsProto(input.mutable_tensor_shape());
}
auto& mutable_outputs = *signature_def.mutable_outputs();
for (int i = 0; i < outputs.size(); i++) {
std::string output_name = outputs[i]->name();
auto& output = mutable_outputs[output_name];
output.set_name(output_name);
output.set_dtype(DT_FLOAT);
TRT_ENSURE(shapes.find(output_name) != shapes.end());
TRT_ENSURE(shapes.at(output_name).size() == 1);
PartialTensorShape output_shape = shapes.at(output_name)[0].shape;
output_shape.AsProto(output.mutable_tensor_shape());
}
(*out.mutable_signature_def())["serving_default"] = signature_def;
return out;
}
static Status CheckTrtNode(const GraphDef& converted_graph_def) {
int n_trt_ops = 0;
string op_name{"TRTEngineOp"};
for (const auto& node : converted_graph_def.node()) {
if (op_name == node.op()) {
n_trt_ops++;
const auto& attr = node.attr();
TRT_ENSURE(attr.at("static_engine").b());
VLOG(2) << "Found serialized segment with size "
<< attr.at("serialized_segment").s().size();
TRT_ENSURE(!attr.at("serialized_segment").s().empty());
}
}
TRT_ENSURE(n_trt_ops == 1);
return OkStatus();
}
Status ConvertAndRun(Scope* scope) {
std::vector<const NodeDef*> inputs;
std::vector<const NodeDef*> outputs;
GraphDef gdef;
TF_RETURN_IF_ERROR(scope->ToGraphDef(&gdef));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_RETURN_IF_ERROR(scope->ToGraph(graph.get()));
GraphShapeInfo shape_info;
TF_RETURN_IF_ERROR(InferShapes(graph.get(), {},
nullptr, &shape_info));
for (const NodeDef& node : gdef.node()) {
if (absl::StartsWith(node.name(), "input_")) {
inputs.push_back(&node);
} else if (absl::StartsWith(node.name(), "output_")) {
outputs.push_back(&node);
}
}
StatusOr<MetaGraphDef> meta_graph_def =
GetModel(gdef, inputs, outputs, shape_info);
TRT_ENSURE_OK(meta_graph_def);
std::vector<Tensor> input_tensors;
std::vector<std::string> input_names;
for (const auto& input : inputs) {
input_names.push_back(input->name());
StatusOr<PartialTensorShape> input_shape =
GetShape(input->name(), shape_info);
TRT_ENSURE_OK(input_shape);
TensorShape shape;
input_shape->AsTensorShape(&shape);
Tensor tensor(DT_FLOAT, shape);
test::FillIota(&tensor, 1.0f);
input_tensors.push_back(tensor);
}
std::vector<std::string> output_names;
for (const auto& output : outputs) {
output_names.push_back(output->name());
}
TfTrtConversionParams conversion_params;
conversion_params.allow_build_at_runtime = true;
conversion_params.precision_mode = TrtPrecisionMode::INT8;
conversion_params.use_calibration = false;
conversion_params.convert_to_static_engine = true;
TRT_ENSURE(input_names.size() == input_tensors.size());
StatusOr<GraphDef> converted_gdef = tensorrt::ConvertAndBuild(
meta_graph_def->graph_def(), input_names, output_names, {input_tensors},
conversion_params);
TRT_ENSURE_OK(converted_gdef);
return CheckTrtNode(*converted_gdef);
}
protected:
TfTrtConversionParams params_;
TrtUniquePtrType<nvinfer1::ICudaEngine> engine_;
};
class TestQDQSuite : public QDQExplicitTest {};
#define EXPECT_QDQ_ON_OUTPUT_FAILURE(params, scope) \
if ((params).qdq_on_output) { \
EXPECT_THAT(ConvertAndRun(&(scope)), StatusIs(error::INTERNAL)); \
return; \
}
#define EXPECT_NO_FINAL_QDQ_FAILURE(params, scope) \
if (!(params).final_qdq) { \
EXPECT_THAT(ConvertAndRun(&(scope)), StatusIs(error::INTERNAL)); \
return; \
}
#define EXPECT_BUILD_OK(scope) TF_EXPECT_OK(ConvertAndRun(&(scope)))
#define POLICY_TRT7(params, scope) \
if (!IS_TRT_VERSION_GE(8, 0, 0, 0)) { \
EXPECT_QDQ_ON_OUTPUT_FAILURE(params, scope); \
EXPECT_NO_FINAL_QDQ_FAILURE(params, scope); \
EXPECT_BUILD_OK(scope); \
}
#define POLICY_TRT8(params, scope) \
if (IS_TRT_VERSION_GE(8, 0, 0, 0)) { \
if (((params).conv_epilogue == ConvEpilogueType::kBatchNorm || \
(params).conv_epilogue == ConvEpilogueType::kBatchnormReLU || \
(params).conv_epilogue == ConvEpilogueType::kReLUBatchnorm) && \
(params).data_format == "NHWC") { \
EXPECT_THAT(ConvertAndRun(&(scope)), StatusIs(error::UNIMPLEMENTED)); \
return; \
} \
EXPECT_BUILD_OK(scope); \
}
#define SKIP_TRT7(x) \
if (!IS_TRT_VERSION_GE(8, 0, 0, 0) && (x)) { \
GTEST_SKIP(); \
}
TEST_P(TestQDQSuite, TestConv2DBasic) {
SKIP_TRT7(GetParam().qdq_on_output);
SKIP_TRT7(GetParam().data_format != "NCHW");
SKIP_TRT7(!GetParam().final_qdq);
Scope scope = Scope::NewRootScope();
auto input = AddInput(scope, 0, GetParam().data_format, {3, 28, 28});
Output out = input;
const int num_conv = 1;
std::array<int, 2> in_channels = {3, 16};
std::array<int, 2> out_channels = {16, 32};
for (int i = 0; i < num_conv; i++) {
out = AddConv2D(scope.WithOpName(absl::StrCat("conv_", i)), out,
in_channels[i], out_channels[i], {3, 3},
{1, 1}, GetParam().data_format,
GetParam().conv_has_bias, GetParam().conv_epilogue,
GetParam().qdq_on_output);
}
out = AddOutput(scope, out, 0, GetParam().final_qdq);
POLICY_TRT7(GetParam(), scope);
POLICY_TRT8(GetParam(), scope);
}
TEST_P(TestQDQSuite, TestMatMulBasic) {
if (GetParam().data_format != "NCHW" || !GetParam().conv_has_bias ||
GetParam().qdq_on_output ||
GetParam().conv_epilogue != ConvEpilogueType::kReLU) {
GTEST_SKIP();
}
Scope scope = Scope::NewRootScope();
auto input = AddInput(scope, 0, "NHW");
auto matmul_op = AddMatMul(scope, "matmul", input);
auto out = AddOutput(scope, matmul_op, 0, GetParam().final_qdq);
TF_EXPECT_OK(ConvertAndRun(&scope));
}
TEST_P(TestQDQSuite, AddBothBranchesQDQConvSingleInput) {
SKIP_TRT7(!GetParam().final_qdq);
SKIP_TRT7(GetParam().data_format != "NCHW");
Scope scope = Scope::NewRootScope();
auto input1 = AddInput(scope, 0, GetParam().data_format,
{3, 28, 28});
auto conv1 =
AddConv2D(scope, input1, 3, 16, {3, 3}, {1, 1},
GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
auto conv2 =
AddConv2D(scope, input1, 3, 16, {3, 3},
{1, 1}, GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
auto add =
ops::Add(scope.WithOpName("add"),
!GetParam().qdq_on_output ? AddQDQV2(scope, conv1) : conv1,
!GetParam().qdq_on_output ? AddQDQV2(scope, conv2) : conv2);
auto conv3 =
AddConv2D(scope.WithOpName("conv3"), conv2, 16, 16, {1, 1}, {1, 1},
GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
auto out =
AddOutput(scope.WithOpName("output"), conv3, 0, GetParam().final_qdq);
POLICY_TRT7(GetParam(), scope);
POLICY_TRT8(GetParam(), scope);
}
TEST_P(TestQDQSuite, AddBothBranchesQDQMultipleInput) {
SKIP_TRT7(true);
Scope scope = Scope::NewRootScope();
auto input1 = AddInput(scope, 0, GetParam().data_format);
auto input2 = AddInput(scope, 1, GetParam().data_format);
auto add =
ops::Add(scope.WithOpName("add"),
!GetParam().qdq_on_output ? AddQDQV2(scope, input1) : input1,
!GetParam().qdq_on_output ? AddQDQV2(scope, input2) : input2);
auto output = AddOutput(scope, add, 0, true);
TF_EXPECT_OK(ConvertAndRun(&scope));
}
TEST_P(TestQDQSuite, TestConvMaxpool) {
SKIP_TRT7(!GetParam().final_qdq);
SKIP_TRT7(GetParam().data_format != "NCHW");
Scope scope = Scope::NewRootScope();
auto input = AddInput(scope, 0, GetParam().data_format,
{3, 28, 28});
auto conv1 =
AddConv2D(scope, input, 3, 16, {3, 3}, {1, 1},
GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
ops::MaxPool maxpool =
ops::MaxPool(scope.WithOpName("maxpool"),
AddQDQV2(scope.WithOpName("mp_qdq_in"), conv1), {1, 1, 1, 1},
{1, 1, 1, 1}, "SAME",
ops::MaxPool::Attrs().DataFormat(GetParam().data_format));
auto output =
AddOutput(scope.WithOpName("output"), maxpool, 0, GetParam().final_qdq);
POLICY_TRT7(GetParam(), scope);
POLICY_TRT8(GetParam(), scope);
}
TEST_P(TestQDQSuite, TestConvMaxpoolConv) {
SKIP_TRT7(!GetParam().final_qdq);
SKIP_TRT7(GetParam().data_format != "NCHW");
Scope scope = Scope::NewRootScope();
auto input = AddInput(scope, 0, GetParam().data_format,
{3, 28, 28});
auto conv1 =
AddConv2D(scope, input, 3, 16, {3, 3}, {1, 1},
GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
ops::MaxPool maxpool =
ops::MaxPool(scope.WithOpName("maxpool"),
AddQDQV2(scope.WithOpName("mp_qdq_in"), conv1), {1, 1, 1, 1},
{1, 1, 1, 1}, "SAME",
ops::MaxPool::Attrs().DataFormat(GetParam().data_format));
auto conv2 = AddConv2D(scope, maxpool, 16, 16, {3, 3}, {1, 1},
GetParam().data_format, GetParam().conv_has_bias,
GetParam().conv_epilogue, GetParam().qdq_on_output);
auto output =
AddOutput(scope.WithOpName("out"), conv2, 0, GetParam().final_qdq);
POLICY_TRT7(GetParam(), scope);
POLICY_TRT8(GetParam(), scope);
}
INSTANTIATE_TEST_SUITE_P(TestQDQSuiteInst, TestQDQSuite,
::testing::ValuesIn(EnumerateQDQTestOptions()));
}
}
}
#endif
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ef36b79c-43e5-4468-b0d3-2d13d55df0fe | cpp | tensorflow/tensorflow | negate | tensorflow/lite/experimental/shlo/ops/negate.cc | tensorflow/lite/experimental/shlo/ops/negate_test.cc | #include "tensorflow/lite/experimental/shlo/ops/negate.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Negate : std::negate<void> {};
NegateOp Create(NegateOp::Attributes) { return {}; }
absl::Status Prepare(NegateOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("negate"), input,
IsSignedIntTensor, IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("negate"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(NegateOp& op, const Tensor& input, Tensor& output) {
Negate negate;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), negate,
input, output)
} else if (IsSignedIntTensor(input) || IsFloatTensor(input)) {
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
input.tensor_element_type(), negate, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.negate: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/negate.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<NegateOp> {
static std::string Get() { return "Negate"; }
};
namespace {
struct Negate : std::negate<void> {
} negate_ref;
INSTANTIATE_TYPED_TEST_SUITE_P(Negate, UnaryElementwiseOpShapePropagationTest,
NegateOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Negate, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<NegateOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<NegateOp, ConcatTypes<BoolTestType, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Negate, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct NegateTest : ::testing::Test {};
TYPED_TEST_SUITE(NegateTest, ArithmeticTestTypes, TestParamNames);
TYPED_TEST(NegateTest, ArithmeticTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), negate_ref);
auto op = Create(NegateOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedNegateTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedNegateTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedNegateTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = negate_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(NegateOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/negate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/negate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
847a5dd2-48c1-4e5d-839a-ac7295ef96e3 | cpp | abseil/abseil-cpp | traits | absl/random/internal/traits.h | absl/random/internal/traits_test.cc | #ifndef ABSL_RANDOM_INTERNAL_TRAITS_H_
#define ABSL_RANDOM_INTERNAL_TRAITS_H_
#include <cstdint>
#include <limits>
#include <type_traits>
#include "absl/base/config.h"
#include "absl/numeric/bits.h"
#include "absl/numeric/int128.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
template <typename A, typename B>
class is_widening_convertible {
template <class T>
static constexpr int rank() {
return !std::numeric_limits<T>::is_integer +
std::numeric_limits<T>::is_signed;
}
public:
static constexpr bool value =
std::numeric_limits<A>::digits <= std::numeric_limits<B>::digits &&
rank<A>() <= rank<B>();
};
template <typename T>
struct IsIntegral : std::is_integral<T> {};
template <>
struct IsIntegral<absl::int128> : std::true_type {};
template <>
struct IsIntegral<absl::uint128> : std::true_type {};
template <typename T>
struct MakeUnsigned : std::make_unsigned<T> {};
template <>
struct MakeUnsigned<absl::int128> {
using type = absl::uint128;
};
template <>
struct MakeUnsigned<absl::uint128> {
using type = absl::uint128;
};
template <typename T>
struct IsUnsigned : std::is_unsigned<T> {};
template <>
struct IsUnsigned<absl::int128> : std::false_type {};
template <>
struct IsUnsigned<absl::uint128> : std::true_type {};
template <size_t N>
struct unsigned_bits;
template <>
struct unsigned_bits<8> {
using type = uint8_t;
};
template <>
struct unsigned_bits<16> {
using type = uint16_t;
};
template <>
struct unsigned_bits<32> {
using type = uint32_t;
};
template <>
struct unsigned_bits<64> {
using type = uint64_t;
};
template <>
struct unsigned_bits<128> {
using type = absl::uint128;
};
struct U256 {
uint128 hi;
uint128 lo;
};
template <>
struct unsigned_bits<256> {
using type = U256;
};
template <typename IntType>
struct make_unsigned_bits {
using type = typename unsigned_bits<
std::numeric_limits<typename MakeUnsigned<IntType>::type>::digits>::type;
};
template <typename T>
int BitWidth(T v) {
constexpr int half_bits = sizeof(T) * 8 / 2;
if (sizeof(T) == 16 && (v >> half_bits) != 0) {
return bit_width(static_cast<uint64_t>(v >> half_bits)) + half_bits;
} else {
return bit_width(static_cast<uint64_t>(v));
}
}
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/internal/traits.h"
#include <cstdint>
#include <type_traits>
#include "gtest/gtest.h"
namespace {
using absl::random_internal::is_widening_convertible;
template <typename T>
void CheckWideningConvertsToSelf() {
static_assert(is_widening_convertible<T, T>::value,
"Type is not convertible to self!");
}
template <typename T, typename Next, typename... Args>
void CheckWideningConvertsToSelf() {
CheckWideningConvertsToSelf<T>();
CheckWideningConvertsToSelf<Next, Args...>();
}
template <typename T>
void CheckNotWideningConvertibleWithSigned() {
using signed_t = typename std::make_signed<T>::type;
static_assert(!is_widening_convertible<T, signed_t>::value,
"Unsigned type is convertible to same-sized signed-type!");
static_assert(!is_widening_convertible<signed_t, T>::value,
"Signed type is convertible to same-sized unsigned-type!");
}
template <typename T, typename Next, typename... Args>
void CheckNotWideningConvertibleWithSigned() {
CheckNotWideningConvertibleWithSigned<T>();
CheckWideningConvertsToSelf<Next, Args...>();
}
template <typename T, typename Higher>
void CheckWideningConvertsToLargerTypes() {
using signed_t = typename std::make_signed<T>::type;
using higher_t = Higher;
using signed_higher_t = typename std::make_signed<Higher>::type;
static_assert(is_widening_convertible<T, higher_t>::value,
"Type not embeddable into larger type!");
static_assert(is_widening_convertible<T, signed_higher_t>::value,
"Type not embeddable into larger signed type!");
static_assert(!is_widening_convertible<signed_t, higher_t>::value,
"Signed type is embeddable into larger unsigned type!");
static_assert(is_widening_convertible<signed_t, signed_higher_t>::value,
"Signed type not embeddable into larger signed type!");
}
template <typename T, typename Higher, typename Next, typename... Args>
void CheckWideningConvertsToLargerTypes() {
CheckWideningConvertsToLargerTypes<T, Higher>();
CheckWideningConvertsToLargerTypes<Higher, Next, Args...>();
}
template <typename T, typename U, bool expect = true>
void CheckWideningConvertsTo() {
static_assert(is_widening_convertible<T, U>::value == expect,
"Unexpected result for is_widening_convertible<T, U>!");
}
TEST(TraitsTest, IsWideningConvertibleTest) {
constexpr bool kInvalid = false;
CheckWideningConvertsToSelf<
uint8_t, uint16_t, uint32_t, uint64_t,
int8_t, int16_t, int32_t, int64_t,
float, double>();
CheckNotWideningConvertibleWithSigned<
uint8_t, uint16_t, uint32_t, uint64_t>();
CheckWideningConvertsToLargerTypes<
uint8_t, uint16_t, uint32_t, uint64_t>();
CheckWideningConvertsTo<float, double>();
CheckWideningConvertsTo<uint16_t, float>();
CheckWideningConvertsTo<uint32_t, double>();
CheckWideningConvertsTo<uint64_t, double, kInvalid>();
CheckWideningConvertsTo<double, float, kInvalid>();
CheckWideningConvertsTo<bool, int>();
CheckWideningConvertsTo<bool, float>();
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/traits.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/traits_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
8dcaa8a0-e5c8-4330-b8cd-5a56f66cfb40 | cpp | google/cel-cpp | time_functions | runtime/standard/time_functions.cc | runtime/standard/time_functions_test.cc | #include "runtime/standard/time_functions.h"
#include <functional>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "base/builtins.h"
#include "base/function_adapter.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "internal/overflow.h"
#include "internal/status_macros.h"
namespace cel {
namespace {
absl::Status FindTimeBreakdown(absl::Time timestamp, absl::string_view tz,
absl::TimeZone::CivilInfo* breakdown) {
absl::TimeZone time_zone;
if (tz.empty()) {
*breakdown = time_zone.At(timestamp);
return absl::OkStatus();
}
if (absl::LoadTimeZone(tz, &time_zone)) {
*breakdown = time_zone.At(timestamp);
return absl::OkStatus();
}
if (absl::StrContains(tz, ":")) {
std::string dur = absl::StrCat(tz, "m");
absl::StrReplaceAll({{":", "h"}}, &dur);
absl::Duration d;
if (absl::ParseDuration(dur, &d)) {
timestamp += d;
*breakdown = time_zone.At(timestamp);
return absl::OkStatus();
}
}
return absl::InvalidArgumentError("Invalid timezone");
}
Value GetTimeBreakdownPart(
ValueManager& value_factory, absl::Time timestamp, absl::string_view tz,
const std::function<int64_t(const absl::TimeZone::CivilInfo&)>&
extractor_func) {
absl::TimeZone::CivilInfo breakdown;
auto status = FindTimeBreakdown(timestamp, tz, &breakdown);
if (!status.ok()) {
return value_factory.CreateErrorValue(status);
}
return value_factory.CreateIntValue(extractor_func(breakdown));
}
Value GetFullYear(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return breakdown.cs.year();
});
}
Value GetMonth(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return breakdown.cs.month() - 1;
});
}
Value GetDayOfYear(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(
value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return absl::GetYearDay(absl::CivilDay(breakdown.cs)) - 1;
});
}
Value GetDayOfMonth(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return breakdown.cs.day() - 1;
});
}
Value GetDate(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return breakdown.cs.day();
});
}
Value GetDayOfWeek(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(
value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
absl::Weekday weekday = absl::GetWeekday(breakdown.cs);
int weekday_num = static_cast<int>(weekday);
weekday_num = (weekday_num == 6) ? 0 : weekday_num + 1;
return weekday_num;
});
}
Value GetHours(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return breakdown.cs.hour();
});
}
Value GetMinutes(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return breakdown.cs.minute();
});
}
Value GetSeconds(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return breakdown.cs.second();
});
}
Value GetMilliseconds(ValueManager& value_factory, absl::Time timestamp,
absl::string_view tz) {
return GetTimeBreakdownPart(
value_factory, timestamp, tz,
[](const absl::TimeZone::CivilInfo& breakdown) {
return absl::ToInt64Milliseconds(breakdown.subsecond);
});
}
absl::Status RegisterTimestampFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kFullYear, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetFullYear(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(
builtin::kFullYear, true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetFullYear(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kMonth, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetMonth(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(builtin::kMonth,
true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetMonth(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kDayOfYear, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetDayOfYear(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(
builtin::kDayOfYear, true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetDayOfYear(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kDayOfMonth, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetDayOfMonth(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(
builtin::kDayOfMonth, true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetDayOfMonth(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kDate, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetDate(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(builtin::kDate,
true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetDate(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kDayOfWeek, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetDayOfWeek(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(
builtin::kDayOfWeek, true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetDayOfWeek(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kHours, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetHours(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(builtin::kHours,
true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetHours(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kMinutes, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetMinutes(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(
builtin::kMinutes, true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetMinutes(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kSeconds, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetSeconds(value_factory, ts, tz.ToString());
})));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(
builtin::kSeconds, true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetSeconds(value_factory, ts, "");
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
CreateDescriptor(builtin::kMilliseconds, true),
BinaryFunctionAdapter<Value, absl::Time, const StringValue&>::
WrapFunction([](ValueManager& value_factory, absl::Time ts,
const StringValue& tz) -> Value {
return GetMilliseconds(value_factory, ts, tz.ToString());
})));
return registry.Register(
UnaryFunctionAdapter<Value, absl::Time>::CreateDescriptor(
builtin::kMilliseconds, true),
UnaryFunctionAdapter<Value, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time ts) -> Value {
return GetMilliseconds(value_factory, ts, "");
}));
}
absl::Status RegisterCheckedTimeArithmeticFunctions(
FunctionRegistry& registry) {
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time,
absl::Duration>::CreateDescriptor(builtin::kAdd,
false),
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Time, absl::Duration>::
WrapFunction([](ValueManager& value_factory, absl::Time t1,
absl::Duration d2) -> absl::StatusOr<Value> {
auto sum = cel::internal::CheckedAdd(t1, d2);
if (!sum.ok()) {
return value_factory.CreateErrorValue(sum.status());
}
return value_factory.CreateTimestampValue(*sum);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Duration,
absl::Time>::CreateDescriptor(builtin::kAdd, false),
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Duration, absl::Time>::
WrapFunction([](ValueManager& value_factory, absl::Duration d2,
absl::Time t1) -> absl::StatusOr<Value> {
auto sum = cel::internal::CheckedAdd(t1, d2);
if (!sum.ok()) {
return value_factory.CreateErrorValue(sum.status());
}
return value_factory.CreateTimestampValue(*sum);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Duration,
absl::Duration>::CreateDescriptor(builtin::kAdd,
false),
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Duration,
absl::Duration>::
WrapFunction([](ValueManager& value_factory, absl::Duration d1,
absl::Duration d2) -> absl::StatusOr<Value> {
auto sum = cel::internal::CheckedAdd(d1, d2);
if (!sum.ok()) {
return value_factory.CreateErrorValue(sum.status());
}
return value_factory.CreateDurationValue(*sum);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Time, absl::Duration>::
CreateDescriptor(builtin::kSubtract, false),
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Time, absl::Duration>::
WrapFunction([](ValueManager& value_factory, absl::Time t1,
absl::Duration d2) -> absl::StatusOr<Value> {
auto diff = cel::internal::CheckedSub(t1, d2);
if (!diff.ok()) {
return value_factory.CreateErrorValue(diff.status());
}
return value_factory.CreateTimestampValue(*diff);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Time,
absl::Time>::CreateDescriptor(builtin::kSubtract,
false),
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Time, absl::Time>::
WrapFunction([](ValueManager& value_factory, absl::Time t1,
absl::Time t2) -> absl::StatusOr<Value> {
auto diff = cel::internal::CheckedSub(t1, t2);
if (!diff.ok()) {
return value_factory.CreateErrorValue(diff.status());
}
return value_factory.CreateDurationValue(*diff);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<
absl::StatusOr<Value>, absl::Duration,
absl::Duration>::CreateDescriptor(builtin::kSubtract, false),
BinaryFunctionAdapter<absl::StatusOr<Value>, absl::Duration,
absl::Duration>::
WrapFunction([](ValueManager& value_factory, absl::Duration d1,
absl::Duration d2) -> absl::StatusOr<Value> {
auto diff = cel::internal::CheckedSub(d1, d2);
if (!diff.ok()) {
return value_factory.CreateErrorValue(diff.status());
}
return value_factory.CreateDurationValue(*diff);
})));
return absl::OkStatus();
}
absl::Status RegisterUncheckedTimeArithmeticFunctions(
FunctionRegistry& registry) {
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time,
absl::Duration>::CreateDescriptor(builtin::kAdd,
false),
BinaryFunctionAdapter<Value, absl::Time, absl::Duration>::WrapFunction(
[](ValueManager& value_factory, absl::Time t1,
absl::Duration d2) -> Value {
return value_factory.CreateUncheckedTimestampValue(t1 + d2);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Duration,
absl::Time>::CreateDescriptor(builtin::kAdd, false),
BinaryFunctionAdapter<Value, absl::Duration, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Duration d2,
absl::Time t1) -> Value {
return value_factory.CreateUncheckedTimestampValue(t1 + d2);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Duration,
absl::Duration>::CreateDescriptor(builtin::kAdd,
false),
BinaryFunctionAdapter<Value, absl::Duration, absl::Duration>::
WrapFunction([](ValueManager& value_factory, absl::Duration d1,
absl::Duration d2) -> Value {
return value_factory.CreateUncheckedDurationValue(d1 + d2);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, absl::Duration>::
CreateDescriptor(builtin::kSubtract, false),
BinaryFunctionAdapter<Value, absl::Time, absl::Duration>::WrapFunction(
[](ValueManager& value_factory, absl::Time t1,
absl::Duration d2) -> Value {
return value_factory.CreateUncheckedTimestampValue(t1 - d2);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Time, absl::Time>::CreateDescriptor(
builtin::kSubtract, false),
BinaryFunctionAdapter<Value, absl::Time, absl::Time>::WrapFunction(
[](ValueManager& value_factory, absl::Time t1,
absl::Time t2) -> Value {
return value_factory.CreateUncheckedDurationValue(t1 - t2);
})));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, absl::Duration, absl::Duration>::
CreateDescriptor(builtin::kSubtract, false),
BinaryFunctionAdapter<Value, absl::Duration, absl::Duration>::
WrapFunction([](ValueManager& value_factory, absl::Duration d1,
absl::Duration d2) -> Value {
return value_factory.CreateUncheckedDurationValue(d1 - d2);
})));
return absl::OkStatus();
}
absl::Status RegisterDurationFunctions(FunctionRegistry& registry) {
using DurationAccessorFunction =
UnaryFunctionAdapter<int64_t, absl::Duration>;
CEL_RETURN_IF_ERROR(registry.Register(
DurationAccessorFunction::CreateDescriptor(builtin::kHours, true),
DurationAccessorFunction::WrapFunction(
[](ValueManager&, absl::Duration d) -> int64_t {
return absl::ToInt64Hours(d);
})));
CEL_RETURN_IF_ERROR(registry.Register(
DurationAccessorFunction::CreateDescriptor(builtin::kMinutes, true),
DurationAccessorFunction::WrapFunction(
[](ValueManager&, absl::Duration d) -> int64_t {
return absl::ToInt64Minutes(d);
})));
CEL_RETURN_IF_ERROR(registry.Register(
DurationAccessorFunction::CreateDescriptor(builtin::kSeconds, true),
DurationAccessorFunction::WrapFunction(
[](ValueManager&, absl::Duration d) -> int64_t {
return absl::ToInt64Seconds(d);
})));
return registry.Register(
DurationAccessorFunction::CreateDescriptor(builtin::kMilliseconds, true),
DurationAccessorFunction::WrapFunction(
[](ValueManager&, absl::Duration d) -> int64_t {
constexpr int64_t millis_per_second = 1000L;
return absl::ToInt64Milliseconds(d) % millis_per_second;
}));
}
}
absl::Status RegisterTimeFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
CEL_RETURN_IF_ERROR(RegisterTimestampFunctions(registry, options));
CEL_RETURN_IF_ERROR(RegisterDurationFunctions(registry));
if (options.enable_timestamp_duration_overflow_errors) {
return RegisterCheckedTimeArithmeticFunctions(registry);
}
return RegisterUncheckedTimeArithmeticFunctions(registry);
}
} | #include "runtime/standard/time_functions.h"
#include <vector>
#include "base/builtins.h"
#include "base/function_descriptor.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::testing::UnorderedElementsAre;
MATCHER_P3(MatchesOperatorDescriptor, name, expected_kind1, expected_kind2,
"") {
const FunctionDescriptor& descriptor = *arg;
std::vector<Kind> types{expected_kind1, expected_kind2};
return descriptor.name() == name && descriptor.receiver_style() == false &&
descriptor.types() == types;
}
MATCHER_P2(MatchesTimeAccessor, name, kind, "") {
const FunctionDescriptor& descriptor = *arg;
std::vector<Kind> types{kind};
return descriptor.name() == name && descriptor.receiver_style() == true &&
descriptor.types() == types;
}
MATCHER_P2(MatchesTimezoneTimeAccessor, name, kind, "") {
const FunctionDescriptor& descriptor = *arg;
std::vector<Kind> types{kind, Kind::kString};
return descriptor.name() == name && descriptor.receiver_style() == true &&
descriptor.types() == types;
}
TEST(RegisterTimeFunctions, MathOperatorsRegistered) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTimeFunctions(registry, options));
auto registered_functions = registry.ListFunctions();
EXPECT_THAT(registered_functions[builtin::kAdd],
UnorderedElementsAre(
MatchesOperatorDescriptor(builtin::kAdd, Kind::kDuration,
Kind::kDuration),
MatchesOperatorDescriptor(builtin::kAdd, Kind::kTimestamp,
Kind::kDuration),
MatchesOperatorDescriptor(builtin::kAdd, Kind::kDuration,
Kind::kTimestamp)));
EXPECT_THAT(registered_functions[builtin::kSubtract],
UnorderedElementsAre(
MatchesOperatorDescriptor(builtin::kSubtract, Kind::kDuration,
Kind::kDuration),
MatchesOperatorDescriptor(builtin::kSubtract,
Kind::kTimestamp, Kind::kDuration),
MatchesOperatorDescriptor(
builtin::kSubtract, Kind::kTimestamp, Kind::kTimestamp)));
}
TEST(RegisterTimeFunctions, AccessorsRegistered) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTimeFunctions(registry, options));
auto registered_functions = registry.ListFunctions();
EXPECT_THAT(
registered_functions[builtin::kFullYear],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kFullYear, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kFullYear, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kDate],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kDate, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kDate, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kMonth],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kMonth, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kMonth, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kDayOfYear],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kDayOfYear, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kDayOfYear, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kDayOfMonth],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kDayOfMonth, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kDayOfMonth, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kDayOfWeek],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kDayOfWeek, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kDayOfWeek, Kind::kTimestamp)));
EXPECT_THAT(
registered_functions[builtin::kHours],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kHours, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kHours, Kind::kTimestamp),
MatchesTimeAccessor(builtin::kHours, Kind::kDuration)));
EXPECT_THAT(
registered_functions[builtin::kMinutes],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kMinutes, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kMinutes, Kind::kTimestamp),
MatchesTimeAccessor(builtin::kMinutes, Kind::kDuration)));
EXPECT_THAT(
registered_functions[builtin::kSeconds],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kSeconds, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kSeconds, Kind::kTimestamp),
MatchesTimeAccessor(builtin::kSeconds, Kind::kDuration)));
EXPECT_THAT(
registered_functions[builtin::kMilliseconds],
UnorderedElementsAre(
MatchesTimeAccessor(builtin::kMilliseconds, Kind::kTimestamp),
MatchesTimezoneTimeAccessor(builtin::kMilliseconds, Kind::kTimestamp),
MatchesTimeAccessor(builtin::kMilliseconds, Kind::kDuration)));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/standard/time_functions.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/standard/time_functions_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
779e5c14-9a64-4b41-8d98-5656de7f3bb9 | cpp | tensorflow/tensorflow | extract_outside_compilation_pass | tensorflow/compiler/jit/extract_outside_compilation_pass.cc | tensorflow/compiler/jit/extract_outside_compilation_pass_test.cc | #include "tensorflow/compiler/jit/extract_outside_compilation_pass.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include "tensorflow/compiler/jit/encapsulate_util.h"
#include "tensorflow/compiler/tf2xla/side_effect_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
std::optional<string> HostGraphControlRetMapping(const Node* n) {
if (HasNodeAttr(n->def(), kXlaHasHostTransferAttrName)) {
return n->name();
}
return std::nullopt;
}
absl::StatusOr<Node*> AddHostComputeKeyPlaceholder(
const string& xla_cluster_name, Graph* g) {
NodeDef key_def;
NodeDefBuilder builder(absl::StrCat(xla_cluster_name, "_key_placeholder"),
"Placeholder");
builder.Attr("dtype", DT_STRING);
builder.Attr("shape", PartialTensorShape({2}));
builder.Attr("_host_compute_call_node", xla_cluster_name);
Status s = builder.Finalize(&key_def);
if (!s.ok()) return s;
Node* n = g->AddNode(key_def, &s);
if (!s.ok()) return s;
return n;
}
bool IsKeyPlaceholderNode(const Node& n) {
return n.type_string() == "Placeholder" &&
absl::EndsWith(n.name(), "_key_placeholder");
}
std::vector<Node*> GatherNodesWithType(const Graph& g, const string& type) {
std::vector<Node*> result;
for (Node* n : g.nodes()) {
if (n->type_string() == type) {
result.push_back(n);
}
}
return result;
}
Status GetArgDataTypes(const std::vector<Node*>& arg_nodes,
std::vector<DataType>* recv_at_host_dtypes) {
recv_at_host_dtypes->resize(arg_nodes.size(), DT_INVALID);
for (auto* n : arg_nodes) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "T", &dtype));
(*recv_at_host_dtypes)[index] = dtype;
}
for (int i = 0, end = recv_at_host_dtypes->size(); i < end; i++) {
if ((*recv_at_host_dtypes)[i] == DT_INVALID) {
return errors::Internal("Cannot get datatype for input ", i);
}
}
return absl::OkStatus();
}
absl::StatusOr<Node*> BuildRecvAtHostNode(
Graph* g, const string& oc_cluster_name,
const std::vector<DataType>& recv_at_host_dtypes, Node* key_placeholder) {
NodeDefBuilder recv_at_host_builder(
absl::StrCat("outside_compilation_", oc_cluster_name, "_recv"),
"_XlaRecvAtHost");
NodeDef recv_at_host_def;
recv_at_host_builder.Attr("Toutputs", recv_at_host_dtypes);
AttrValue device_ordinal_value;
device_ordinal_value.set_placeholder("_device_ordinal");
recv_at_host_builder.Attr("device_ordinal", device_ordinal_value);
recv_at_host_builder.Attr(
"key", absl::StrCat("host_compute_channel_", oc_cluster_name));
recv_at_host_builder.Attr(kXlaHasHostTransferAttrName, true);
recv_at_host_builder.Input(key_placeholder->name(), 0, DT_STRING);
TF_RETURN_IF_ERROR(recv_at_host_builder.Finalize(&recv_at_host_def));
TF_ASSIGN_OR_RETURN(Node * recv_at_host_node, g->AddNode(recv_at_host_def));
return recv_at_host_node;
}
absl::StatusOr<Node*> ReplaceArgNodesWithRecvAtHostNode(
Graph* g, const string& oc_cluster_name,
std::vector<DataType>* recv_at_host_dtypes, Node* key_placeholder) {
std::vector<Node*> arg_nodes = GatherNodesWithType(*g, "_Arg");
TF_RETURN_IF_ERROR(GetArgDataTypes(arg_nodes, recv_at_host_dtypes));
TF_ASSIGN_OR_RETURN(
Node * recv_at_host_node,
BuildRecvAtHostNode(g, oc_cluster_name, *recv_at_host_dtypes,
key_placeholder));
for (auto* n : arg_nodes) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
std::vector<OutEdgeInfo> out_edge_info;
out_edge_info.reserve(n->out_edges().size());
for (auto edge : n->out_edges()) {
out_edge_info.push_back(
{edge->dst(), edge->src_output(), edge->dst_input()});
}
g->RemoveNode(n);
for (const OutEdgeInfo& edge : out_edge_info) {
if (edge.dst_input == Graph::kControlSlot) {
g->AddControlEdge(recv_at_host_node, edge.dst);
} else {
g->AddEdge(recv_at_host_node, index, edge.dst, edge.dst_input);
}
}
for (int i = 0, end = out_edge_info.size(); i < end; i++) {
const OutEdgeInfo edge = out_edge_info[i];
if (edge.dst_input == Graph::kControlSlot) {
continue;
}
Node* dst = edge.dst;
NodeDef new_def = dst->def();
*new_def.mutable_input(edge.dst_input) =
absl::StrCat(recv_at_host_node->name(), ":", index);
TF_ASSIGN_OR_RETURN(Node * dst_replace, ReplaceNode(g, dst, new_def));
for (int j = i + 1, end = out_edge_info.size(); j < end; j++) {
if (out_edge_info[j].dst == dst) {
out_edge_info[j].dst = dst_replace;
}
}
}
}
g->AddEdge(key_placeholder, 0, recv_at_host_node, 0);
return recv_at_host_node;
}
Status GetRetDataTypes(const std::vector<Node*>& ret_nodes,
std::vector<DataType>* send_from_host_dtypes) {
send_from_host_dtypes->resize(ret_nodes.size(), DT_INVALID);
for (auto* n : ret_nodes) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "T", &dtype));
(*send_from_host_dtypes)[index] = dtype;
}
for (int i = 0, end = send_from_host_dtypes->size(); i < end; i++) {
if ((*send_from_host_dtypes)[i] == DT_INVALID) {
return errors::Internal("Cannot get datatype for output ", i);
}
}
return absl::OkStatus();
}
absl::StatusOr<Node*> BuildSendFromHostNode(
Graph* g, const string& oc_cluster_name,
const std::vector<Node*>& ret_nodes,
const std::vector<DataType>& send_from_host_dtypes, Node* key_placeholder) {
NodeDefBuilder send_from_host_builder(
absl::StrCat("outside_compilation_", oc_cluster_name, "_send"),
"_XlaSendFromHost");
NodeDef send_from_host_def;
send_from_host_builder.Attr("Tinputs", send_from_host_dtypes);
AttrValue device_ordinal_value;
device_ordinal_value.set_placeholder("_device_ordinal");
send_from_host_builder.Attr("device_ordinal", device_ordinal_value);
send_from_host_builder.Attr(
"key", absl::StrCat("host_compute_channel_", oc_cluster_name));
send_from_host_builder.Attr(kXlaHasHostTransferAttrName, true);
std::vector<NodeDefBuilder::NodeOut> inputs(send_from_host_dtypes.size());
for (auto* n : ret_nodes) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
const int num_dtypes = send_from_host_dtypes.size();
if (index < 0 || index >= num_dtypes) {
return errors::Internal("Invalid _Retval index: ", index);
}
for (auto edge : n->in_edges()) {
inputs[index] =
NodeDefBuilder::NodeOut{edge->src()->name(), edge->src_output(),
edge->src()->output_type(edge->src_output())};
}
}
send_from_host_builder.Input(inputs);
send_from_host_builder.Input(key_placeholder->name(), 0, DT_STRING);
TF_RETURN_IF_ERROR(send_from_host_builder.Finalize(&send_from_host_def));
TF_ASSIGN_OR_RETURN(Node * send_from_host_node,
g->AddNode(send_from_host_def));
return send_from_host_node;
}
absl::StatusOr<Node*> ReplaceRetNodesWithSendFromHostNode(
Graph* g, const string& oc_cluster_name,
std::vector<DataType>* send_from_host_dtypes, Node* key_placeholder) {
std::vector<Node*> ret_nodes = GatherNodesWithType(*g, "_Retval");
TF_RETURN_IF_ERROR(GetRetDataTypes(ret_nodes, send_from_host_dtypes));
TF_ASSIGN_OR_RETURN(
Node * send_from_host_node,
BuildSendFromHostNode(g, oc_cluster_name, ret_nodes,
*send_from_host_dtypes, key_placeholder));
for (auto* n : ret_nodes) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
for (auto edge : n->in_edges()) {
if (edge->src_output() == Graph::kControlSlot) {
g->AddControlEdge(edge->src(), send_from_host_node);
} else {
g->AddEdge(edge->src(), edge->src_output(), send_from_host_node, index);
}
}
g->RemoveNode(n);
}
g->AddEdge(key_placeholder, 0, send_from_host_node,
send_from_host_dtypes->size());
return send_from_host_node;
}
std::optional<std::vector<PartialTensorShape>> GetInferredInputShapes(
int num_inputs, Node* send_from_host_node) {
std::vector<PartialTensorShape> results(num_inputs);
for (int i = 0; i < num_inputs; i++) {
const Edge* e;
if (!send_from_host_node->input_edge(i, &e).ok()) {
return std::nullopt;
}
std::vector<PartialTensorShape> shapes;
if (!GetNodeAttr(e->src()->attrs(), kXlaInferredShapesAttrName, &shapes)
.ok()) {
return std::nullopt;
}
const PartialTensorShape shape = shapes[e->src_output()];
if (!shape.IsFullyDefined()) {
return std::nullopt;
}
results[e->dst_input()] = shape;
}
return results;
}
string host_compute_node_name(const string& original_oc_name) {
return absl::StrCat("outside_compilation_", original_oc_name,
"_host_compute");
}
absl::StatusOr<NodeDef> BuildXlaHostComputeNodeDef(
const Node* call_node, const std::map<string, int>& host_compute_core,
const absl::flat_hash_map<string, std::vector<string>>& cluster_deps) {
string original_oc_name;
TF_RETURN_IF_ERROR(GetNodeAttr(
call_node->attrs(), "_outside_compilation_subgraph", &original_oc_name));
NodeDefBuilder host_compute_builder(host_compute_node_name(original_oc_name),
"XlaHostCompute");
host_compute_builder.Attr(kXlaOriginalOutsideCompilationNodeName,
host_compute_builder.node_name());
for (const auto& attr : call_node->attrs()) {
host_compute_builder.Attr(attr.first, attr.second);
}
const auto iter = host_compute_core.find(original_oc_name);
if (iter != host_compute_core.end()) {
int core = iter->second;
host_compute_builder.Attr("tpu_core", core);
}
std::vector<string> xla_token_input_nodes;
xla_token_input_nodes.emplace_back(kXlaTokenArgNodeName);
auto cluster_deps_it = cluster_deps.find(original_oc_name);
if (cluster_deps_it != cluster_deps.end()) {
for (const auto& dep : cluster_deps_it->second) {
xla_token_input_nodes.emplace_back(host_compute_node_name(dep));
}
}
host_compute_builder.Attr(kXlaTokenInputNodesAttrName, xla_token_input_nodes);
std::vector<DataType> input_dtypes;
TF_RETURN_IF_ERROR(GetNodeAttr(call_node->attrs(), "Tinputs", &input_dtypes));
std::vector<NodeDefBuilder::NodeOut> inputs(input_dtypes.size());
for (auto e : call_node->in_edges()) {
if (e->IsControlEdge()) {
continue;
}
const int input_dtypes_size = input_dtypes.size();
if (e->dst_input() < 0 || e->dst_input() >= input_dtypes_size) {
return errors::Internal("Invalid dst_input: ", e->dst_input());
}
inputs[e->dst_input()] = NodeDefBuilder::NodeOut{
e->src()->name(), e->src_output(), input_dtypes[e->dst_input()]};
}
host_compute_builder.Input(inputs);
NodeDef new_def;
TF_RETURN_IF_ERROR(host_compute_builder.Finalize(&new_def));
return new_def;
}
TF_ATTRIBUTE_NOINLINE absl::StatusOr<Node*> ReplaceOutsideCompilationCallNode(
Graph* g, Node* call_node, const std::map<string, int>& host_compute_core,
const absl::flat_hash_map<string, std::vector<string>>& cluster_deps) {
TF_ASSIGN_OR_RETURN(
NodeDef node_def,
BuildXlaHostComputeNodeDef(call_node, host_compute_core, cluster_deps));
TF_ASSIGN_OR_RETURN(Node * host_compute_node,
ReplaceNode(g, call_node, node_def));
VLOG(4) << "Added HostCompute node: " << host_compute_node->DebugString();
return host_compute_node;
}
Status ResetDeviceOrdinalToPlaceholderValue(Graph* g) {
AttrValue device_ordinal_value;
device_ordinal_value.set_placeholder("_device_ordinal");
for (Node* n : g->nodes()) {
if (!HasNodeAttr(n->def(), kXlaHasHostTransferAttrName)) {
continue;
}
if (n->type_string() == "_XlaRecvAtHost" ||
n->type_string() == "_XlaSendFromHost") {
n->ClearAttr("device_ordinal");
n->AddAttr("device_ordinal", device_ordinal_value);
} else if (n->IsIfNode()) {
for (const string& attr_name :
std::vector<string>{"then_branch", "else_branch"}) {
NameAttrList branch_func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), attr_name, &branch_func));
(*branch_func.mutable_attr())["_device_ordinal"] = device_ordinal_value;
n->ClearAttr(attr_name);
n->AddAttr(attr_name, branch_func);
}
} else if (n->IsWhileNode()) {
for (const string& attr_name : std::vector<string>{"cond", "body"}) {
NameAttrList branch_func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), attr_name, &branch_func));
(*branch_func.mutable_attr())["_device_ordinal"] = device_ordinal_value;
n->ClearAttr(attr_name);
n->AddAttr(attr_name, branch_func);
}
} else if (HasNodeAttr(n->def(), "_device_ordinal")) {
n->ClearAttr("_device_ordinal");
n->AddAttr("_device_ordinal", device_ordinal_value);
} else {
return errors::Internal("Unknown node marked with ",
kXlaHasHostTransferAttrName, ": ",
n->DebugString());
}
}
return absl::OkStatus();
}
bool HasLiftedArgs(const FunctionDef& function_def) {
return absl::c_any_of(function_def.node_def(), [](const NodeDef& node_def) {
return (node_def.op() == "Placeholder" &&
node_def.attr().find(kXlaLiftedArgOutsideCompilationAttrName) !=
node_def.attr().end());
});
}
absl::StatusOr<std::vector<std::pair<Node*, Node*>>>
LiftedArgsAndOutsideCompilationNodesInFunctionBody(
const FunctionBody& function_body,
const std::unordered_map<string, Node*>& outside_compilation_attr_to_node) {
std::vector<std::pair<Node*, Node*>>
lifted_arg_nodes_and_outside_compilation_nodes;
for (Node* n : function_body.graph->op_nodes()) {
string oc_cluster;
if (n->type_string() == "Placeholder" &&
GetNodeAttr(n->def(), kXlaLiftedArgOutsideCompilationAttrName,
&oc_cluster)
.ok()) {
TF_RET_CHECK(outside_compilation_attr_to_node.find(oc_cluster) !=
outside_compilation_attr_to_node.end());
lifted_arg_nodes_and_outside_compilation_nodes.emplace_back(
n, outside_compilation_attr_to_node.at(oc_cluster));
}
}
return lifted_arg_nodes_and_outside_compilation_nodes;
}
absl::StatusOr<std::vector<DataType>> UpdateTypesAttribute(
const std::vector<std::pair<Node*, Node*>>&
lifted_arg_nodes_and_outside_compilation_nodes,
const string& type_attr_name, Node* n) {
std::vector<DataType> data_types;
data_types.reserve(lifted_arg_nodes_and_outside_compilation_nodes.size());
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), type_attr_name, &data_types));
for (auto pair : lifted_arg_nodes_and_outside_compilation_nodes) {
Node* outside_compilation_node = pair.second;
DataType data_type;
TF_RET_CHECK(outside_compilation_node->IsIdentity() ||
outside_compilation_node->type_string() == "Placeholder");
if (outside_compilation_node->IsIdentity()) {
TF_RETURN_IF_ERROR(
GetNodeAttr(outside_compilation_node->def(), "T", &data_type));
} else {
TF_RETURN_IF_ERROR(
GetNodeAttr(outside_compilation_node->def(), "dtype", &data_type));
}
data_types.push_back(data_type);
}
n->ClearAttr(type_attr_name);
n->AddAttr(type_attr_name, data_types);
return data_types;
}
void AddEdgesFromOutsideCompilationNodes(
const int original_arg_count, const int arg_to_input_edge_offset,
const std::vector<DataType>& data_types,
const std::vector<Node*>& outside_compilation_nodes, Graph* g, Node* n) {
for (int i = original_arg_count, end = data_types.size(); i < end; i++) {
Node* outside_compilation_node =
outside_compilation_nodes[i - original_arg_count];
g->AddEdge(outside_compilation_node, 0, n, i + arg_to_input_edge_offset);
}
}
absl::StatusOr<Node*> AddOutsideCompilationInputArgToFunctionBody(
const FunctionBody& function_body, const int arg_idx,
const DataType& data_type) {
NodeDefBuilder arg_builder(absl::StrCat("arg_", arg_idx), "_Arg");
arg_builder.Attr("T", data_type);
arg_builder.Attr("index", arg_idx);
NodeDef arg_def;
TF_RETURN_IF_ERROR(arg_builder.Finalize(&arg_def));
TF_ASSIGN_OR_RETURN(Node * arg_node, function_body.graph->AddNode(arg_def));
return arg_node;
}
Status AddMatchingRetvalNode(const FunctionBody& function_body,
const int arg_idx, const DataType& data_type,
Node* arg_node) {
NodeDefBuilder ret_builder(absl::StrCat("ret_", arg_idx), "_Retval");
ret_builder.Attr("T", data_type);
ret_builder.Attr("index", arg_idx);
ret_builder.Input(arg_node->name(), 0, data_type);
NodeDef ret_def;
TF_RETURN_IF_ERROR(ret_builder.Finalize(&ret_def));
TF_ASSIGN_OR_RETURN(Node * ret_node, function_body.graph->AddNode(ret_def));
function_body.graph->AddEdge(arg_node, 0, ret_node, 0);
return absl::OkStatus();
}
void ReplaceLiftedArgNodePlaceholderWithArg(
const FunctionBody& function_body, const int original_arg_count,
const int arg_idx, const std::vector<Node*>& lifted_arg_nodes,
Node* arg_node) {
Node* lifted_arg_node = lifted_arg_nodes[arg_idx - original_arg_count];
if (!lifted_arg_node) {
return;
}
for (const Edge* e : lifted_arg_node->out_edges()) {
if (e->IsControlEdge()) {
function_body.graph->AddControlEdge(arg_node, e->dst());
} else {
function_body.graph->AddEdge(arg_node, 0, e->dst(), e->dst_input());
}
}
function_body.graph->RemoveNode(lifted_arg_node);
}
Status AddFunctionWithNewName(const std::string& new_name,
const std::string& func_attr_name,
const FunctionDef& function_def,
NameAttrList* func_attr, Node* callsite_node,
FunctionLibraryDefinition* fld) {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(function_def));
func_attr->set_name(new_name);
callsite_node->ClearAttr(func_attr_name);
callsite_node->AddAttr(func_attr_name, *func_attr);
return absl::OkStatus();
}
Status PostprocessLiftedArgsForWhile(
const std::unordered_map<string, Node*>& outside_compilation_attr_to_node,
Graph* g, Node* n, FunctionLibraryDefinition* fld) {
TF_RET_CHECK(n->IsWhileNode());
NameAttrList body_func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "body", &body_func));
const FunctionDef* body_function_def = fld->Find(body_func.name());
TF_RET_CHECK(body_function_def);
if (!HasLiftedArgs(*body_function_def)) {
return absl::OkStatus();
}
std::unique_ptr<FunctionBody> body_function_body;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*body_function_def,
AttrSlice(&body_func.attr()), fld,
&body_function_body));
int original_arg_count = body_function_body->arg_nodes.size();
TF_ASSIGN_OR_RETURN(
auto lifted_arg_nodes_and_outside_compilation_nodes,
LiftedArgsAndOutsideCompilationNodesInFunctionBody(
*body_function_body, outside_compilation_attr_to_node));
TF_ASSIGN_OR_RETURN(
std::vector<DataType> data_types,
UpdateTypesAttribute(lifted_arg_nodes_and_outside_compilation_nodes, "T",
n));
std::vector<Node*> outside_compilation_nodes;
outside_compilation_nodes.reserve(
lifted_arg_nodes_and_outside_compilation_nodes.size());
std::transform(
lifted_arg_nodes_and_outside_compilation_nodes.begin(),
lifted_arg_nodes_and_outside_compilation_nodes.end(),
std::back_inserter(outside_compilation_nodes),
[](const std::pair<Node*, Node*>& pair) { return pair.second; });
AddEdgesFromOutsideCompilationNodes(original_arg_count,
0,
data_types, outside_compilation_nodes, g,
n);
std::vector<Node*> lifted_arg_nodes;
lifted_arg_nodes.reserve(
lifted_arg_nodes_and_outside_compilation_nodes.size());
std::transform(
lifted_arg_nodes_and_outside_compilation_nodes.begin(),
lifted_arg_nodes_and_outside_compilation_nodes.end(),
std::back_inserter(lifted_arg_nodes),
[](const std::pair<Node*, Node*>& pair) { return pair.first; });
for (int i = original_arg_count, end = data_types.size(); i < end; i++) {
TF_ASSIGN_OR_RETURN(Node * arg_node,
AddOutsideCompilationInputArgToFunctionBody(
*body_function_body, i, data_types[i]));
TF_RETURN_IF_ERROR(
AddMatchingRetvalNode(*body_function_body, i, data_types[i], arg_node));
ReplaceLiftedArgNodePlaceholderWithArg(
*body_function_body, original_arg_count, i, lifted_arg_nodes, arg_node);
}
const auto new_body_function_name =
fld->UniqueFunctionName(absl::StrCat(body_func.name(), "_lifted_arg_"));
FunctionDef rewritten_body_function_def;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*body_function_body->graph, new_body_function_name,
HostGraphControlRetMapping, &rewritten_body_function_def));
TF_RETURN_IF_ERROR(AddFunctionWithNewName(new_body_function_name, "body",
rewritten_body_function_def,
&body_func, n, fld));
NameAttrList cond_func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "cond", &cond_func));
const FunctionDef* cond_function_def = fld->Find(cond_func.name());
TF_RET_CHECK(cond_function_def);
std::unique_ptr<FunctionBody> cond_function_body;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*cond_function_def,
AttrSlice(&cond_func.attr()), fld,
&cond_function_body));
for (int i = original_arg_count, end = data_types.size(); i < end; i++) {
absl::StatusOr<Node*> arg_node_or =
AddOutsideCompilationInputArgToFunctionBody(*cond_function_body, i,
data_types[i]);
TF_RETURN_IF_ERROR(arg_node_or.status());
}
const auto new_cond_function_name =
fld->UniqueFunctionName(absl::StrCat(cond_func.name(), "_lifted_arg_"));
FunctionDef rewritten_cond_function_def;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*cond_function_body->graph, new_cond_function_name,
HostGraphControlRetMapping, &rewritten_cond_function_def));
TF_RETURN_IF_ERROR(AddFunctionWithNewName(new_cond_function_name, "cond",
rewritten_cond_function_def,
&cond_func, n, fld));
return absl::OkStatus();
}
Status PostprocessLiftedArgsForIf(
const std::unordered_map<string, Node*>& outside_compilation_attr_to_node,
Graph* g, Node* n, FunctionLibraryDefinition* fld) {
TF_RET_CHECK(n->IsIfNode());
NameAttrList then_branch_func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "then_branch", &then_branch_func));
const FunctionDef* then_branch_function_def =
fld->Find(then_branch_func.name());
TF_RET_CHECK(then_branch_function_def);
NameAttrList else_branch_func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "else_branch", &else_branch_func));
const FunctionDef* else_branch_function_def =
fld->Find(else_branch_func.name());
TF_RET_CHECK(else_branch_function_def);
if (!HasLiftedArgs(*then_branch_function_def) &&
!HasLiftedArgs(*else_branch_function_def)) {
return absl::OkStatus();
}
std::unique_ptr<FunctionBody> then_branch_function_body;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*then_branch_function_def, AttrSlice(&then_branch_func.attr()), fld,
&then_branch_function_body));
std::unique_ptr<FunctionBody> else_branch_function_body;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*else_branch_function_def, AttrSlice(&else_branch_func.attr()), fld,
&else_branch_function_body));
int original_arg_count = then_branch_function_body->arg_nodes.size();
TF_ASSIGN_OR_RETURN(
auto then_branch_lifted_arg_nodes_and_outside_compilation_nodes,
LiftedArgsAndOutsideCompilationNodesInFunctionBody(
*then_branch_function_body, outside_compilation_attr_to_node));
TF_ASSIGN_OR_RETURN(
auto else_branch_lifted_arg_nodes_and_outside_compilation_nodes,
LiftedArgsAndOutsideCompilationNodesInFunctionBody(
*else_branch_function_body, outside_compilation_attr_to_node));
std::vector<Node*> outside_compilation_nodes;
std::vector<Node*> then_branch_lifted_arg_nodes;
outside_compilation_nodes.reserve(
then_branch_lifted_arg_nodes_and_outside_compilation_nodes.size());
then_branch_lifted_arg_nodes.reserve(
then_branch_lifted_arg_nodes_and_outside_compilation_nodes.size());
for (const auto& pair :
then_branch_lifted_arg_nodes_and_outside_compilation_nodes) {
outside_compilation_nodes.push_back(pair.second);
then_branch_lifted_arg_nodes.push_back(pair.first);
}
for (const auto& pair :
else_branch_lifted_arg_nodes_and_outside_compilation_nodes) {
if (std::find(outside_compilation_nodes.begin(),
outside_compilation_nodes.end(),
pair.second) == outside_compilation_nodes.end()) {
outside_compilation_nodes.push_back(pair.second);
then_branch_lifted_arg_nodes.push_back(nullptr);
}
}
std::vector<Node*> else_branch_lifted_arg_nodes(
outside_compilation_nodes.size());
for (const auto& pair :
else_branch_lifted_arg_nodes_and_outside_compilation_nodes) {
auto iter = std::find(outside_compilation_nodes.begin(),
outside_compilation_nodes.end(), pair.second);
TF_RET_CHECK(iter != outside_compilation_nodes.end());
int index = iter - outside_compilation_nodes.begin();
else_branch_lifted_arg_nodes[index] = pair.first;
}
std::vector<DataType> data_types;
data_types.reserve(outside_compilation_nodes.size());
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "Tin", &data_types));
for (Node* n : outside_compilation_nodes) {
data_types.push_back(n->output_type(0));
}
n->ClearAttr("Tin");
n->AddAttr("Tin", data_types);
AddEdgesFromOutsideCompilationNodes(original_arg_count,
1,
data_types, outside_compilation_nodes, g,
n);
for (int i = original_arg_count, end = data_types.size(); i < end; ++i) {
TF_ASSIGN_OR_RETURN(Node * then_branch_arg_node,
AddOutsideCompilationInputArgToFunctionBody(
*then_branch_function_body, i, data_types[i]));
ReplaceLiftedArgNodePlaceholderWithArg(
*then_branch_function_body, original_arg_count, i,
then_branch_lifted_arg_nodes, then_branch_arg_node);
TF_ASSIGN_OR_RETURN(Node * else_branch_arg_node,
AddOutsideCompilationInputArgToFunctionBody(
*else_branch_function_body, i, data_types[i]));
ReplaceLiftedArgNodePlaceholderWithArg(
*else_branch_function_body, original_arg_count, i,
else_branch_lifted_arg_nodes, else_branch_arg_node);
}
const auto new_then_function_name = fld->UniqueFunctionName(
absl::StrCat(then_branch_func.name(), "_lifted_arg_"));
FunctionDef rewritten_then_branch_function_def;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*then_branch_function_body->graph, new_then_function_name,
HostGraphControlRetMapping, &rewritten_then_branch_function_def));
TF_RETURN_IF_ERROR(AddFunctionWithNewName(
new_then_function_name, "then_branch", rewritten_then_branch_function_def,
&then_branch_func, n, fld));
const auto new_else_function_name = fld->UniqueFunctionName(
absl::StrCat(else_branch_func.name(), "_lifted_arg_"));
FunctionDef rewritten_else_branch_function_def;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*else_branch_function_body->graph, new_else_function_name,
HostGraphControlRetMapping, &rewritten_else_branch_function_def));
TF_RETURN_IF_ERROR(AddFunctionWithNewName(
new_else_function_name, "else_branch", rewritten_else_branch_function_def,
&else_branch_func, n, fld));
return absl::OkStatus();
}
Status PostprocessLiftedArgsForCall(
const std::unordered_map<string, Node*>& outside_compilation_attr_to_node,
Graph* g, Node* n, FunctionLibraryDefinition* fld) {
const FunctionDef* fdef = fld->Find(n->type_string());
TF_RET_CHECK(fdef);
if (!HasLiftedArgs(*fdef)) {
return absl::OkStatus();
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*fdef, n->attrs(), fld, &fbody));
int original_arg_count = fbody->arg_nodes.size();
TF_ASSIGN_OR_RETURN(auto lifted_arg_nodes_and_outside_compilation_nodes,
LiftedArgsAndOutsideCompilationNodesInFunctionBody(
*fbody, outside_compilation_attr_to_node));
std::vector<DataType> data_types(n->input_types().begin(),
n->input_types().end());
for (auto pair : lifted_arg_nodes_and_outside_compilation_nodes) {
Node* outside_compilation_node = pair.second;
DataType data_type;
TF_RET_CHECK(outside_compilation_node->IsIdentity() ||
outside_compilation_node->type_string() == "Placeholder");
if (outside_compilation_node->IsIdentity()) {
TF_RETURN_IF_ERROR(
GetNodeAttr(outside_compilation_node->def(), "T", &data_type));
} else {
TF_RETURN_IF_ERROR(
GetNodeAttr(outside_compilation_node->def(), "dtype", &data_type));
}
data_types.push_back(data_type);
}
std::vector<Node*> lifted_arg_nodes;
lifted_arg_nodes.reserve(
lifted_arg_nodes_and_outside_compilation_nodes.size());
std::transform(
lifted_arg_nodes_and_outside_compilation_nodes.begin(),
lifted_arg_nodes_and_outside_compilation_nodes.end(),
std::back_inserter(lifted_arg_nodes),
[](const std::pair<Node*, Node*>& pair) { return pair.first; });
for (int i = original_arg_count, end = data_types.size(); i < end; ++i) {
TF_ASSIGN_OR_RETURN(
Node * arg_node,
AddOutsideCompilationInputArgToFunctionBody(*fbody, i, data_types[i]));
ReplaceLiftedArgNodePlaceholderWithArg(*fbody, original_arg_count, i,
lifted_arg_nodes, arg_node);
}
FunctionDef rewritten_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*fbody->graph, n->type_string(),
HostGraphControlRetMapping,
&rewritten_fdef));
const auto new_function_name =
fld->UniqueFunctionName(absl::StrCat(n->type_string(), "_lifted_arg_"));
rewritten_fdef.mutable_signature()->set_name(new_function_name);
TF_RETURN_IF_ERROR(fld->AddFunctionDef(rewritten_fdef));
NodeDef node_def = n->def();
*node_def.mutable_op() = new_function_name;
for (int i = original_arg_count, end = data_types.size(); i < end; i++) {
Node* outside_compilation_node =
lifted_arg_nodes_and_outside_compilation_nodes[i - original_arg_count]
.second;
node_def.add_input(absl::StrCat(outside_compilation_node->name(), ":", 0));
}
TF_ASSIGN_OR_RETURN(n, ReplaceNode(g, n, node_def));
std::vector<Node*> outside_compilation_nodes;
outside_compilation_nodes.reserve(
lifted_arg_nodes_and_outside_compilation_nodes.size());
std::transform(
lifted_arg_nodes_and_outside_compilation_nodes.begin(),
lifted_arg_nodes_and_outside_compilation_nodes.end(),
std::back_inserter(outside_compilation_nodes),
[](const std::pair<Node*, Node*>& pair) { return pair.second; });
AddEdgesFromOutsideCompilationNodes(original_arg_count,
0,
data_types, outside_compilation_nodes, g,
n);
return absl::OkStatus();
}
absl::StatusOr<std::unordered_map<string, Node*>> OutsideCompilationAttrToNode(
const Graph& g) {
std::unordered_map<string, Node*> outside_compilation_attr_to_node;
for (Node* n : g.op_nodes()) {
bool is_lifted_arg;
string outside_compilation_attr;
if (TryGetNodeAttr(n->def(), kXlaIsLiftedArgAttrName, &is_lifted_arg) &&
TryGetNodeAttr(n->def(), "_xla_outside_compilation",
&outside_compilation_attr)) {
TF_RET_CHECK(is_lifted_arg);
TF_RET_CHECK(n->IsIdentity() || n->type_string() == "Placeholder");
outside_compilation_attr_to_node[outside_compilation_attr] = n;
}
}
return outside_compilation_attr_to_node;
}
Status PostprocessLiftedArgs(Graph* g, FunctionLibraryDefinition* fld) {
TF_ASSIGN_OR_RETURN(auto outside_compilation_attr_to_node,
OutsideCompilationAttrToNode(*g));
std::vector<Node*> call_nodes;
for (Node* n : g->op_nodes()) {
if (!HasNodeAttr(n->def(), kXlaHasHostTransferAttrName)) {
continue;
}
if (n->IsWhileNode()) {
TF_RETURN_IF_ERROR(PostprocessLiftedArgsForWhile(
outside_compilation_attr_to_node, g, n, fld));
}
if (n->IsIfNode()) {
TF_RETURN_IF_ERROR(PostprocessLiftedArgsForIf(
outside_compilation_attr_to_node, g, n, fld));
}
if (fld->Contains(n->type_string())) {
call_nodes.push_back(n);
}
}
for (Node* n : call_nodes) {
TF_RETURN_IF_ERROR(PostprocessLiftedArgsForCall(
outside_compilation_attr_to_node, g, n, fld));
}
return absl::OkStatus();
}
Status ConstructHostGraph(
const string& xla_cluster_name, const string& outside_compilation_attr_name,
const std::vector<string>& outside_compilation_host_graphs,
FunctionLibraryDefinition* fld, std::unique_ptr<Graph>* host_graph) {
host_graph->reset(new Graph(fld));
NodeDefBuilder sequencer_builder(absl::StrCat(xla_cluster_name, "_sequencer"),
"NoOp");
sequencer_builder.Attr("_xla_host_transfer_sequencer", xla_cluster_name);
NodeDef sequencer_def;
TF_RETURN_IF_ERROR(sequencer_builder.Finalize(&sequencer_def));
TF_ASSIGN_OR_RETURN(Node * sequencer, (*host_graph)->AddNode(sequencer_def));
TF_ASSIGN_OR_RETURN(
Node * key_placeholder,
AddHostComputeKeyPlaceholder(xla_cluster_name, host_graph->get()));
for (const string& host_func : outside_compilation_host_graphs) {
VLOG(4) << "Expanding host graph " << host_func;
AttrValue device_ordinal_attr;
device_ordinal_attr.set_i(0);
protobuf::Map<string, AttrValue> attrs;
attrs["_device_ordinal"] = device_ordinal_attr;
std::unique_ptr<FunctionBody> host_fbody;
const FunctionDef* host_fdef = fld->Find(host_func);
TF_RET_CHECK(host_fdef);
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*host_fdef, AttrSlice(&attrs),
fld, &host_fbody));
FixupSourceAndSinkEdges(host_fbody->graph);
std::map<const Node*, Node*> node_map;
node_map[host_fbody->graph->source_node()] = (*host_graph)->source_node();
node_map[host_fbody->graph->sink_node()] = (*host_graph)->sink_node();
Status s;
ReverseDFS(
*host_fbody->graph, nullptr,
[&](const Node* n) {
if (!s.ok()) {
return;
}
Node* copy;
if (node_map.find(n) != node_map.end()) {
copy = node_map.at(n);
} else if (IsKeyPlaceholderNode(*n)) {
copy = key_placeholder;
node_map[n] = copy;
} else {
NodeDef copy_def = n->def();
copy_def.clear_device();
copy = (*host_graph)->AddNode(copy_def, &s);
if (!s.ok()) {
return;
}
node_map[n] = copy;
}
for (auto e : n->in_edges()) {
if (node_map.find(e->src()) == node_map.end()) {
s = errors::Internal("Cannot find node image for ",
e->src()->DebugString());
return;
}
(*host_graph)
->AddEdge(node_map[e->src()], e->src_output(), copy,
e->dst_input());
}
if (HasNodeAttr(copy->def(), kXlaHasHostTransferAttrName)) {
(*host_graph)->AddControlEdge(copy, sequencer);
}
},
NodeComparatorID());
if (!s.ok()) {
return s;
}
}
TF_RETURN_IF_ERROR(ResetDeviceOrdinalToPlaceholderValue(host_graph->get()));
if (!sequencer->in_edges().empty()) {
(*host_graph)->AddControlEdge(sequencer, (*host_graph)->sink_node());
}
PruneForReverseReachability(
host_graph->get(),
std::unordered_set<const Node*>{(*host_graph)->sink_node()});
TF_RETURN_IF_ERROR(PostprocessEdgesBetweenOutsideCompilations(
host_graph->get(), outside_compilation_attr_name));
TF_RETURN_IF_ERROR(PostprocessLiftedArgs(host_graph->get(), fld));
if (VLOG_IS_ON(4)) {
DumpGraphToFile(absl::StrCat("extract_outside_compilation_host_graph_for_",
xla_cluster_name),
**host_graph, fld);
}
return absl::OkStatus();
}
Status ExpandHostGraphIntoMainGraph(Graph* main_graph,
FunctionLibraryDefinition* fld,
const string& host_graph_func_name,
Node* xla_computation_node,
Node* pivot_node) {
AttrValue device_ordinal_attr;
device_ordinal_attr.set_i(0);
protobuf::Map<string, AttrValue> attrs;
attrs["_device_ordinal"] = device_ordinal_attr;
std::unique_ptr<FunctionBody> fbody;
const FunctionDef* host_graph_func = fld->Find(host_graph_func_name);
TF_RET_CHECK(host_graph_func);
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*host_graph_func,
AttrSlice(&attrs), fld, &fbody));
Graph* host_graph = fbody->graph;
FixupSourceAndSinkEdges(host_graph);
std::map<const Node*, Node*> node_map;
if (pivot_node) {
node_map[host_graph->source_node()] = pivot_node;
} else {
node_map[host_graph->source_node()] = main_graph->source_node();
}
node_map[host_graph->sink_node()] = main_graph->sink_node();
Status s = absl::OkStatus();
auto copy_node_fn = [&](const Node* n) {
if (!s.ok()) {
return;
}
Node* copy;
if (node_map.find(n) != node_map.end()) {
copy = node_map.at(n);
} else {
NodeDef copy_def = n->def();
copy = main_graph->AddNode(copy_def, &s);
if (!s.ok()) {
return;
}
node_map[n] = copy;
}
for (auto e : n->in_edges()) {
if (node_map.find(e->src()) == node_map.end()) {
s = errors::Internal("Cannot find node image for ",
e->src()->DebugString());
return;
}
main_graph->AddEdge(node_map[e->src()], e->src_output(), copy,
e->dst_input());
}
if (copy->type_string() == "NoOp" &&
HasNodeAttr(copy->def(), "_xla_host_transfer_sequencer")) {
main_graph->AddControlEdge(copy, xla_computation_node);
}
};
ReverseDFS(*host_graph, nullptr, copy_node_fn, NodeComparatorID());
return s;
}
Status RewriteShapeInferenceGraph(const string& shape_inference_graph_name,
Graph* host_graph, Node* pivot_node,
FunctionLibraryDefinition* fld) {
AttrValue device_ordinal_attr;
device_ordinal_attr.set_i(0);
protobuf::Map<string, AttrValue> attrs;
attrs["_device_ordinal"] = device_ordinal_attr;
std::unique_ptr<FunctionBody> fbody;
const FunctionDef* shape_inference_graph =
fld->Find(shape_inference_graph_name);
TF_RET_CHECK(shape_inference_graph);
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*shape_inference_graph,
AttrSlice(&attrs), fld, &fbody));
Graph* g = fbody->graph;
Node* send_from_host = nullptr;
for (Node* n : g->nodes()) {
if (n->type_string() == "_XlaSendFromHost") {
send_from_host = n;
break;
}
}
if (!send_from_host) {
return errors::Internal("Shape inference graph ",
shape_inference_graph_name,
" does not have _XlaSendFromHost node.");
}
Node* send_node_in_host_graph = nullptr;
for (Node* n : host_graph->nodes()) {
if (n->name() == send_from_host->name()) {
send_node_in_host_graph = n;
break;
}
}
if (send_node_in_host_graph) {
std::vector<Node*> nodes;
nodes.reserve(g->num_op_nodes());
for (Node* n : g->op_nodes()) {
nodes.push_back(n);
}
for (Node* n : nodes) {
g->RemoveNode(n);
}
Node* start_node = pivot_node ? pivot_node : host_graph->source_node();
struct Visit {
Node* n;
bool is_exiting;
};
std::vector<Visit> stack{{send_node_in_host_graph, false}};
std::map<Node*, Node*> node_map;
node_map[host_graph->source_node()] = g->source_node();
while (!stack.empty()) {
Visit& curr = stack.back();
if (curr.is_exiting) {
if (node_map.find(curr.n) == node_map.end()) {
Node* copy = g->CopyNode(curr.n);
if (curr.n != start_node) {
for (const Edge* e : curr.n->in_edges()) {
auto node_iter = node_map.find(e->src());
if (node_iter == node_map.end()) {
return errors::Internal("Cannot find node image for ",
e->src()->DebugString());
}
g->AddEdge(node_iter->second, e->src_output(), copy,
e->dst_input());
}
}
node_map[curr.n] = copy;
}
stack.pop_back();
} else {
curr.is_exiting = true;
if (curr.n != start_node) {
for (const Edge* e : curr.n->in_edges()) {
if (node_map.find(e->src()) != node_map.end()) {
continue;
}
stack.push_back({e->src(), false});
}
}
}
}
send_from_host = node_map[send_node_in_host_graph];
} else {
}
for (auto e : g->edges()) {
if (e->IsControlEdge()) {
g->RemoveEdge(e);
}
}
PruneForReverseReachability(g,
std::unordered_set<const Node*>{send_from_host});
if (VLOG_IS_ON(4)) {
DumpGraphToFile(shape_inference_graph_name, *g, fld);
}
FunctionDef fdef_replace;
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*g, shape_inference_graph_name, &fdef_replace));
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(shape_inference_graph_name, fdef_replace));
return absl::OkStatus();
}
void SetMaximalSharding(NodeDefBuilder& node_builder) {
xla::OpSharding sharding;
sharding.set_type(xla::OpSharding::MAXIMAL);
sharding.add_tile_assignment_dimensions(1);
sharding.add_tile_assignment_devices(0);
node_builder.Attr("_XlaSharding", sharding.SerializeAsString());
}
TF_ATTRIBUTE_NOINLINE absl::StatusOr<Node*> BuildSendIfPredNode(
const string& name, const string& host_transfer_key, Node* pred_node,
Graph* g) {
NodeDefBuilder send_pred_builder(name, "XlaSendToHost");
send_pred_builder.Attr("Tinput", DT_BOOL);
send_pred_builder.Attr("key", absl::StrCat(host_transfer_key, "_dtoh_0"));
send_pred_builder.Attr(kXlaTokenInputNodesAttrName,
std::vector<string>{kXlaTokenArgNodeName});
send_pred_builder.Attr(kXlaOriginalOutsideCompilationNodeName, name);
SetMaximalSharding(send_pred_builder);
send_pred_builder.Input(pred_node->name(), 0, DT_BOOL);
NodeDef send_pred_def;
TF_RETURN_IF_ERROR(send_pred_builder.Finalize(&send_pred_def));
TF_ASSIGN_OR_RETURN(Node * send_pred_node, g->AddNode(send_pred_def));
g->AddEdge(pred_node, 0, send_pred_node, 0);
return send_pred_node;
}
Status ReplaceKeyPlaceholderWithArgNode(const string& xla_cluster_name,
const string& func_name,
FunctionLibraryDefinition* fld) {
AttrValue device_ordinal_attr;
device_ordinal_attr.set_i(0);
protobuf::Map<string, AttrValue> attrs;
attrs["_device_ordinal"] = device_ordinal_attr;
std::unique_ptr<FunctionBody> fbody;
const FunctionDef* func = fld->Find(func_name);
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(*func, AttrSlice(&attrs), fld, &fbody));
Graph* g = fbody->graph;
Node* key_placeholder = nullptr;
for (Node* n : g->nodes()) {
if (IsKeyPlaceholderNode(*n)) {
key_placeholder = n;
break;
}
}
if (!key_placeholder) {
TF_ASSIGN_OR_RETURN(key_placeholder,
AddHostComputeKeyPlaceholder(xla_cluster_name, g));
}
NodeDefBuilder arg_builder("key_arg", FunctionLibraryDefinition::kArgOp);
arg_builder.Attr("T", DT_STRING);
arg_builder.Attr("index", 0);
NodeDef arg_def;
TF_RETURN_IF_ERROR(arg_builder.Finalize(&arg_def));
TF_RETURN_IF_ERROR(ReplaceNode(g, key_placeholder, arg_def).status());
TF_RETURN_IF_ERROR(ResetDeviceOrdinalToPlaceholderValue(g));
FunctionDef replace_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*g, func_name, HostGraphControlRetMapping, &replace_fdef));
TF_RETURN_IF_ERROR(fld->ReplaceFunction(func_name, replace_fdef));
return absl::OkStatus();
}
TF_ATTRIBUTE_NOINLINE Status BuildHostGraphForIfNode(
const string& xla_cluster_attr_name,
const string& outside_compilation_attr_name, const string& xla_cluster_name,
const string& if_node_name, const string& host_transfer_key,
const string& host_graph_func_name, FunctionLibraryDefinition* fld,
const string& then_branch_host_func_name,
const string& else_branch_host_func_name) {
Graph host_graph(fld);
string outside_compilation_name = absl::StrCat("oc_if_", if_node_name);
AttrValue device_ordinal_value;
device_ordinal_value.set_placeholder("_device_ordinal");
TF_ASSIGN_OR_RETURN(
Node * key_placeholder,
AddHostComputeKeyPlaceholder(xla_cluster_name, &host_graph));
NodeDefBuilder recv_pred_builder(
absl::StrCat("recv_oc_if_pred_", if_node_name), "_XlaRecvAtHost");
recv_pred_builder.Attr("Toutputs", std::vector<DataType>{DT_BOOL});
recv_pred_builder.Attr("key", host_transfer_key);
recv_pred_builder.Attr("device_ordinal", device_ordinal_value);
recv_pred_builder.Attr(xla_cluster_attr_name, xla_cluster_name);
recv_pred_builder.Attr(outside_compilation_attr_name,
outside_compilation_name);
recv_pred_builder.Attr(kXlaHasHostTransferAttrName, true);
recv_pred_builder.Input(key_placeholder->name(), 0, DT_STRING);
NodeDef recv_pred_def;
TF_RETURN_IF_ERROR(recv_pred_builder.Finalize(&recv_pred_def));
TF_ASSIGN_OR_RETURN(Node * recv_pred_node, host_graph.AddNode(recv_pred_def));
host_graph.AddEdge(key_placeholder, 0, recv_pred_node, 0);
TF_RETURN_IF_ERROR(ReplaceKeyPlaceholderWithArgNode(
xla_cluster_name, then_branch_host_func_name, fld));
TF_RETURN_IF_ERROR(ReplaceKeyPlaceholderWithArgNode(
xla_cluster_name, else_branch_host_func_name, fld));
NodeDefBuilder if_builder(absl::StrCat("oc_if_", if_node_name), "If");
if_builder.Attr("Tcond", DT_BOOL);
if_builder.Attr("Tin", std::vector<DataType>{DT_STRING});
if_builder.Attr("Tout", std::vector<DataType>{});
NameAttrList host_then_branch, host_else_branch;
host_then_branch.set_name(then_branch_host_func_name);
(*host_then_branch.mutable_attr())["_device_ordinal"] = device_ordinal_value;
host_else_branch.set_name(else_branch_host_func_name);
(*host_else_branch.mutable_attr())["_device_ordinal"] = device_ordinal_value;
if_builder.Attr("then_branch", host_then_branch);
if_builder.Attr("else_branch", host_else_branch);
if_builder.Attr(kXlaHasHostTransferAttrName, true);
if_builder.Attr(xla_cluster_attr_name, xla_cluster_name);
if_builder.Attr(outside_compilation_attr_name, outside_compilation_name);
if_builder.Input(recv_pred_node->name(), 0, DT_BOOL);
std::vector<NodeDefBuilder::NodeOut> if_inputs{
{key_placeholder->name(), 0, DT_STRING}};
if_builder.Input(if_inputs);
NodeDef if_def;
TF_RETURN_IF_ERROR(if_builder.Finalize(&if_def));
TF_ASSIGN_OR_RETURN(Node * if_node, host_graph.AddNode(if_def));
host_graph.AddEdge(recv_pred_node, 0, if_node, 0);
host_graph.AddEdge(key_placeholder, 0, if_node, 1);
FunctionDef oc_host_graph_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(host_graph, host_graph_func_name,
&oc_host_graph_fdef));
if (fld->Find(host_graph_func_name)) {
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(host_graph_func_name, oc_host_graph_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(oc_host_graph_fdef));
}
return absl::OkStatus();
}
TF_ATTRIBUTE_NOINLINE Status AddSendLoopPredToLoopCond(
const string& cond_xla_func_name, const string& host_transfer_key,
NameAttrList* loop_cond_func, FunctionLibraryDefinition* fld,
Node* while_node) {
std::unique_ptr<FunctionBody> fbody;
const FunctionDef* loop_cond_fdef = fld->Find(loop_cond_func->name());
TF_RET_CHECK(loop_cond_fdef);
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*loop_cond_fdef, AttrSlice(&loop_cond_func->attr()), fld, &fbody));
Graph* g = fbody->graph;
Node* ret_node = nullptr;
for (Node* n : g->nodes()) {
if (n->type_string() == "_Retval") {
if (ret_node) {
return errors::Internal("Multiple return node for loop cond function ",
loop_cond_func->name(), ": ",
ret_node->DebugString(), " and ",
n->DebugString());
} else {
ret_node = n;
}
}
}
if (!ret_node) {
return errors::Internal("No _Retval node for loop cond function ",
loop_cond_func->name());
}
Node* loop_cond;
TF_RETURN_IF_ERROR(ret_node->input_node(0, &loop_cond));
NodeDefBuilder send_loop_cond_builder(
absl::StrCat("send_oc_while_cond_", while_node->name()), "XlaSendToHost");
send_loop_cond_builder.Attr("Tinput", DT_BOOL);
send_loop_cond_builder.Attr("key",
absl::StrCat(host_transfer_key, "_dtoh_0"));
send_loop_cond_builder.Attr(kXlaTokenInputNodesAttrName,
std::vector<string>{kXlaTokenArgNodeName});
send_loop_cond_builder.Attr(kXlaOriginalOutsideCompilationNodeName,
send_loop_cond_builder.node_name());
SetMaximalSharding(send_loop_cond_builder);
send_loop_cond_builder.Input(loop_cond->name(), 0, DT_BOOL);
NodeDef send_loop_cond_def;
TF_RETURN_IF_ERROR(send_loop_cond_builder.Finalize(&send_loop_cond_def));
TF_ASSIGN_OR_RETURN(Node * send_loop_cond_node,
g->AddNode(send_loop_cond_def));
g->AddEdge(loop_cond, 0, send_loop_cond_node, 0);
FunctionDef replace_fdef;
if (loop_cond_func->name() == cond_xla_func_name) {
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*g, loop_cond_func->name(), &replace_fdef));
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(loop_cond_func->name(), replace_fdef));
} else {
const auto new_name = fld->UniqueFunctionName(
absl::StrCat(loop_cond_func->name(), "_send_pred_added_"));
TF_RETURN_IF_ERROR(GraphToFunctionDef(*g, new_name, &replace_fdef));
TF_RETURN_IF_ERROR(fld->AddFunctionDef(replace_fdef));
loop_cond_func->set_name(new_name);
while_node->ClearAttr("cond");
while_node->AddAttr("cond", *loop_cond_func);
}
return absl::OkStatus();
}
Status RewriteHostWhileLoopCond(
const string& cond_host_func_name, const string& while_node_name,
const string& host_transfer_key, const string& xla_cluster_attr_name,
const string& xla_cluster_name, const string& outside_compilation_attr_name,
const string& outside_compilation_name, FunctionLibraryDefinition* fld) {
TF_RETURN_IF_ERROR(ReplaceKeyPlaceholderWithArgNode(
xla_cluster_name, cond_host_func_name, fld));
AttrValue device_ordinal_temp_value;
device_ordinal_temp_value.set_i(0);
protobuf::Map<string, AttrValue> attrs;
attrs["_device_ordinal"] = device_ordinal_temp_value;
std::unique_ptr<FunctionBody> cond_fbody;
const FunctionDef* cond_host_func = fld->Find(cond_host_func_name);
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*cond_host_func, AttrSlice(&attrs),
fld, &cond_fbody));
Graph* cond_graph = cond_fbody->graph;
Node* key_arg = nullptr;
for (Node* n : cond_graph->nodes()) {
if (n->type_string() == "_Arg") {
key_arg = n;
}
}
if (!key_arg) {
return errors::Internal(
"No _Arg node found for host compute key in function ",
cond_host_func_name);
}
NodeDefBuilder recv_pred_builder(
absl::StrCat("recv_oc_while_cond_", while_node_name), "_XlaRecvAtHost");
recv_pred_builder.Attr("Toutputs", std::vector<DataType>{DT_BOOL});
recv_pred_builder.Attr("key", host_transfer_key);
AttrValue device_ordinal_value;
device_ordinal_value.set_placeholder("_device_ordinal");
recv_pred_builder.Attr("device_ordinal", device_ordinal_value);
recv_pred_builder.Attr(xla_cluster_attr_name, xla_cluster_name);
recv_pred_builder.Attr(outside_compilation_attr_name,
outside_compilation_name);
recv_pred_builder.Attr(kXlaHasHostTransferAttrName, true);
recv_pred_builder.Input(key_arg->name(), 0, DT_STRING);
NodeDef recv_pred_def;
TF_RETURN_IF_ERROR(recv_pred_builder.Finalize(&recv_pred_def));
TF_ASSIGN_OR_RETURN(Node * recv_pred_node,
cond_graph->AddNode(recv_pred_def));
cond_graph->AddEdge(key_arg, 0, recv_pred_node, 0);
NodeDefBuilder ret_builder(
absl::StrCat("recv_oc_while_cond_ret_", while_node_name), "_Retval");
ret_builder.Attr("T", DT_BOOL);
ret_builder.Attr("index", 0);
ret_builder.Input(recv_pred_node->name(), 0, DT_BOOL);
NodeDef ret_def;
TF_RETURN_IF_ERROR(ret_builder.Finalize(&ret_def));
TF_ASSIGN_OR_RETURN(Node * ret_node, cond_graph->AddNode(ret_def));
cond_graph->AddEdge(recv_pred_node, 0, ret_node, 0);
TF_RETURN_IF_ERROR(ResetDeviceOrdinalToPlaceholderValue(cond_graph));
FunctionDef cond_replace_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*cond_graph, cond_host_func_name,
HostGraphControlRetMapping,
&cond_replace_fdef));
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(cond_host_func_name, cond_replace_fdef));
return absl::OkStatus();
}
Status RewriteHostWhileLoopBody(
const string& body_host_func_name, const string& while_node_name,
const string& host_transfer_key, const string& xla_cluster_attr_name,
const string& xla_cluster_name, const string& outside_compilation_attr_name,
const string& outside_compilation_name, FunctionLibraryDefinition* fld) {
TF_RETURN_IF_ERROR(ReplaceKeyPlaceholderWithArgNode(
xla_cluster_name, body_host_func_name, fld));
AttrValue device_ordinal_temp_value;
device_ordinal_temp_value.set_i(0);
protobuf::Map<string, AttrValue> attrs;
attrs["_device_ordinal"] = device_ordinal_temp_value;
std::unique_ptr<FunctionBody> body_fbody;
const FunctionDef* body_host_func = fld->Find(body_host_func_name);
TF_RET_CHECK(body_host_func);
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*body_host_func, AttrSlice(&attrs),
fld, &body_fbody));
Graph* body_graph = body_fbody->graph;
Node* key_arg = nullptr;
for (Node* n : body_graph->nodes()) {
if (n->type_string() == "_Arg") {
key_arg = n;
}
}
if (!key_arg) {
return errors::Internal(
"No _Arg node found for host compute key in function ",
body_host_func_name);
}
NodeDefBuilder ret_builder(
absl::StrCat("recv_oc_while_body_ret_", while_node_name), "_Retval");
ret_builder.Attr("T", DT_STRING);
ret_builder.Attr("index", 0);
ret_builder.Input(key_arg->name(), 0, DT_STRING);
NodeDef ret_def;
TF_RETURN_IF_ERROR(ret_builder.Finalize(&ret_def));
TF_ASSIGN_OR_RETURN(Node * ret_node, body_graph->AddNode(ret_def));
body_graph->AddEdge(key_arg, 0, ret_node, 0);
TF_RETURN_IF_ERROR(ResetDeviceOrdinalToPlaceholderValue(body_graph));
FunctionDef body_replace_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*body_graph, body_host_func_name,
HostGraphControlRetMapping,
&body_replace_fdef));
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(body_host_func_name, body_replace_fdef));
return absl::OkStatus();
}
TF_ATTRIBUTE_NOINLINE Status BuildHostGraphForWhileNode(
const string& xla_cluster_attr_name,
const string& outside_compilation_attr_name, const string& xla_cluster_name,
const string& while_node_name, const string& host_transfer_key,
const string& host_graph_func_name, FunctionLibraryDefinition* fld,
const string& cond_host_func_name, const string& body_host_func_name) {
Graph host_graph(fld);
string outside_compilation_name = absl::StrCat("oc_while_", while_node_name);
TF_ASSIGN_OR_RETURN(
Node * key_placeholder,
AddHostComputeKeyPlaceholder(xla_cluster_name, &host_graph));
TF_RETURN_IF_ERROR(RewriteHostWhileLoopCond(
cond_host_func_name, while_node_name, host_transfer_key,
xla_cluster_attr_name, xla_cluster_name, outside_compilation_attr_name,
outside_compilation_name, fld));
TF_RETURN_IF_ERROR(RewriteHostWhileLoopBody(
body_host_func_name, while_node_name, host_transfer_key,
xla_cluster_attr_name, xla_cluster_name, outside_compilation_attr_name,
outside_compilation_name, fld));
NodeDefBuilder while_builder(absl::StrCat("oc_while_", while_node_name),
"While");
while_builder.Attr("T", std::vector<DataType>{DT_STRING});
NameAttrList func;
AttrValue device_ordinal_value;
device_ordinal_value.set_placeholder("_device_ordinal");
(*func.mutable_attr())["_device_ordinal"] = device_ordinal_value;
func.set_name(cond_host_func_name);
while_builder.Attr("cond", func);
func.set_name(body_host_func_name);
while_builder.Attr("body", func);
while_builder.Attr(kXlaHasHostTransferAttrName, true);
while_builder.Attr(xla_cluster_attr_name, xla_cluster_name);
while_builder.Attr(outside_compilation_attr_name, outside_compilation_name);
while_builder.Attr("parallel_iterations", 1);
std::vector<NodeDefBuilder::NodeOut> while_inputs{
{key_placeholder->name(), 0, DT_STRING}};
while_builder.Input(while_inputs);
NodeDef while_def;
TF_RETURN_IF_ERROR(while_builder.Finalize(&while_def));
TF_ASSIGN_OR_RETURN(Node * while_node, host_graph.AddNode(while_def));
host_graph.AddEdge(key_placeholder, 0, while_node, 0);
FunctionDef oc_host_graph_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(host_graph, host_graph_func_name,
&oc_host_graph_fdef));
if (fld->Find(host_graph_func_name)) {
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(host_graph_func_name, oc_host_graph_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(oc_host_graph_fdef));
}
return absl::OkStatus();
}
Status BuildHostGraphForFuncCallNode(
const string& xla_cluster_attr_name, const string& xla_cluster_name,
const string& outside_compilation_attr_name,
const string& func_call_node_name, const string& func_call_host_func_name,
const string& host_graph_func_name, FunctionLibraryDefinition* fld) {
Graph host_graph(fld);
AttrValue device_ordinal_value;
device_ordinal_value.set_placeholder("_device_ordinal");
TF_ASSIGN_OR_RETURN(
Node * key_placeholder,
AddHostComputeKeyPlaceholder(xla_cluster_name, &host_graph));
TF_RETURN_IF_ERROR(ReplaceKeyPlaceholderWithArgNode(
xla_cluster_name, func_call_host_func_name, fld));
NodeDefBuilder call_builder(absl::StrCat("oc_call_", func_call_node_name),
func_call_host_func_name, fld);
call_builder.Input(key_placeholder->name(), 0, DT_STRING);
call_builder.Attr("_device_ordinal", device_ordinal_value);
call_builder.Attr(kXlaHasHostTransferAttrName, true);
call_builder.Attr(xla_cluster_attr_name, xla_cluster_name);
call_builder.Attr(outside_compilation_attr_name, call_builder.node_name());
NodeDef call_def;
TF_RETURN_IF_ERROR(call_builder.Finalize(&call_def));
TF_ASSIGN_OR_RETURN(Node * call_node, host_graph.AddNode(call_def));
host_graph.AddEdge(key_placeholder, 0, call_node, 0);
FunctionDef oc_host_graph_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(host_graph, host_graph_func_name,
HostGraphControlRetMapping,
&oc_host_graph_fdef));
if (fld->Find(host_graph_func_name)) {
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(host_graph_func_name, oc_host_graph_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(oc_host_graph_fdef));
}
return absl::OkStatus();
}
TF_ATTRIBUTE_NOINLINE Status ExtractOutsideCompilationForFuncCallNode(
const string& xla_cluster_attr_name,
const string& outside_compilation_attr_name, const string& xla_cluster_name,
const std::map<string, int>& host_compute_core, Graph* g, Node* n,
FunctionLibraryRuntime* flr, FunctionLibraryDefinition* fld,
std::vector<string>* host_graphs,
std::vector<string>* shape_inference_graphs,
bool* has_outside_compilation) {
bool func_has_outside_compilation = false;
NameAttrList func;
if (fld->Contains(n->type_string())) {
func.set_name(n->type_string());
typedef protobuf::Map<string, AttrValue> AttrMap;
*func.mutable_attr() = AttrMap(n->attrs().begin(), n->attrs().end());
} else if (n->IsPartitionedCall()) {
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "f", &func));
} else {
TF_RET_CHECK(n->type_string() == FunctionLibraryDefinition::kGradientOp);
func.set_name(FunctionLibraryDefinition::kGradientOp);
*func.mutable_attr() = n->def().attr();
}
string canonical_func_name;
if (func.name() == FunctionLibraryDefinition::kGradientOp) {
NameAttrList forward_func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "f", &forward_func));
canonical_func_name = absl::StrCat("gradient_", forward_func.name());
} else {
canonical_func_name = func.name();
}
string new_func_name = absl::StrCat(canonical_func_name, "_oc");
string host_func_name =
absl::StrCat("oc_func_call_host_", canonical_func_name);
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForFunction(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
func, new_func_name, host_func_name, host_compute_core, flr, fld,
shape_inference_graphs, &func_has_outside_compilation));
if (!func_has_outside_compilation) {
return absl::OkStatus();
}
*has_outside_compilation = true;
auto replace_builder =
std::make_unique<NodeDefBuilder>(n->name(), new_func_name, fld);
std::vector<NodeDefBuilder::NodeOut> inputs(n->num_inputs());
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
continue;
}
const bool input_size_check =
e->dst_input() < static_cast<int>(inputs.size());
TF_RET_CHECK(e->dst_input() >= 0 && input_size_check);
inputs[e->dst_input()] =
NodeDefBuilder::NodeOut{e->src()->name(), e->src_output(),
e->src()->output_type(e->src_output())};
}
for (const auto& input : inputs) {
replace_builder->Input(input);
}
for (const auto& attr : n->attrs()) {
replace_builder->Attr(attr.first, attr.second);
}
auto replace_def = std::make_unique<NodeDef>();
TF_RETURN_IF_ERROR(replace_builder->Finalize(replace_def.get()));
TF_ASSIGN_OR_RETURN(Node * replace, ReplaceNode(g, n, *replace_def));
replace->AddAttr(kXlaTokenInputNodesAttrName,
std::vector<string>{kXlaTokenArgNodeName});
replace->AddAttr(kXlaOriginalOutsideCompilationNodeName, replace->name());
string oc_host_graph_name =
absl::StrCat("oc_func_host_graph_", replace->name());
TF_RETURN_IF_ERROR(BuildHostGraphForFuncCallNode(
xla_cluster_attr_name, xla_cluster_name, outside_compilation_attr_name,
replace->name(), host_func_name, oc_host_graph_name, fld));
host_graphs->push_back(oc_host_graph_name);
return absl::OkStatus();
}
Status ExtractOutsideCompilationForIfNode(
const string& xla_cluster_attr_name,
const string& outside_compilation_attr_name, const string& xla_cluster_name,
const std::map<string, int>& host_compute_core, Graph* g, Node* n,
FunctionLibraryRuntime* flr, FunctionLibraryDefinition* fld,
std::vector<string>* host_graphs,
std::vector<string>* shape_inference_graphs,
bool* has_outside_compilation) {
NameAttrList then_branch, else_branch;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "then_branch", &then_branch));
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "else_branch", &else_branch));
bool then_branch_has_outside_compilation = false;
bool else_branch_has_outside_compilation = false;
string then_branch_host_func_name =
absl::StrCat("oc_then_branch_host_if_", then_branch.name()),
else_branch_host_func_name =
absl::StrCat("oc_else_branch_host_if_", else_branch.name());
string then_branch_xla_func_name = absl::StrCat(then_branch.name(), "_oc"),
else_branch_xla_func_name = absl::StrCat(else_branch.name(), "_oc");
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForFunction(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
then_branch, then_branch_xla_func_name, then_branch_host_func_name,
host_compute_core, flr, fld, shape_inference_graphs,
&then_branch_has_outside_compilation));
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForFunction(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
else_branch, else_branch_xla_func_name, else_branch_host_func_name,
host_compute_core, flr, fld, shape_inference_graphs,
&else_branch_has_outside_compilation));
if (!then_branch_has_outside_compilation &&
!else_branch_has_outside_compilation) {
return absl::OkStatus();
}
*has_outside_compilation = true;
if (then_branch_has_outside_compilation) {
then_branch.set_name(then_branch_xla_func_name);
n->ClearAttr("then_branch");
n->AddAttr("then_branch", then_branch);
}
if (else_branch_has_outside_compilation) {
else_branch.set_name(else_branch_xla_func_name);
n->ClearAttr("else_branch");
n->AddAttr("else_branch", else_branch);
}
n->AddAttr(kXlaOriginalOutsideCompilationNodeName, n->name());
string host_transfer_key = absl::StrCat("oc_if_pred_", n->name());
Node* pred_node;
TF_RETURN_IF_ERROR(n->input_node(0, &pred_node));
TF_ASSIGN_OR_RETURN(
Node * send_pred_node,
BuildSendIfPredNode(absl::StrCat("send_oc_if_pred_", n->name()),
host_transfer_key, pred_node, g));
n->AddAttr(kXlaTokenInputNodesAttrName,
std::vector<string>{send_pred_node->name()});
g->AddControlEdge(send_pred_node, n);
if (!then_branch_has_outside_compilation) {
std::unique_ptr<Graph> then_branch_host_graph(new Graph(fld));
std::vector<string> then_branch_host_graphs;
TF_RETURN_IF_ERROR(ConstructHostGraph(
xla_cluster_name, outside_compilation_attr_name,
then_branch_host_graphs, fld, &then_branch_host_graph));
FunctionDef then_branch_host_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*then_branch_host_graph,
then_branch_host_func_name,
&then_branch_host_fdef));
if (fld->Find(then_branch_host_func_name)) {
TF_RETURN_IF_ERROR(fld->ReplaceFunction(then_branch_host_func_name,
then_branch_host_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(then_branch_host_fdef));
}
}
if (!else_branch_has_outside_compilation) {
std::unique_ptr<Graph> else_branch_host_graph(new Graph(fld));
std::vector<string> else_branch_host_graphs;
TF_RETURN_IF_ERROR(ConstructHostGraph(
xla_cluster_name, outside_compilation_attr_name,
else_branch_host_graphs, fld, &else_branch_host_graph));
FunctionDef else_branch_host_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*else_branch_host_graph,
else_branch_host_func_name,
&else_branch_host_fdef));
if (fld->Find(else_branch_host_func_name)) {
TF_RETURN_IF_ERROR(fld->ReplaceFunction(else_branch_host_func_name,
else_branch_host_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(else_branch_host_fdef));
}
}
string oc_host_graph_name = absl::StrCat("oc_if_host_graph_", n->name());
TF_RETURN_IF_ERROR(BuildHostGraphForIfNode(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
n->name(), host_transfer_key, oc_host_graph_name, fld,
then_branch_host_func_name, else_branch_host_func_name));
host_graphs->push_back(oc_host_graph_name);
return absl::OkStatus();
}
Status ExtractOutsideCompilationForWhileNode(
const string& xla_cluster_attr_name,
const string& outside_compilation_attr_name, const string& xla_cluster_name,
const std::map<string, int>& host_compute_core, Graph* g, Node* n,
FunctionLibraryRuntime* flr, FunctionLibraryDefinition* fld,
std::vector<string>* host_graphs,
std::vector<string>* shape_inference_graphs,
bool* has_outside_compilation) {
NameAttrList cond, body;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "cond", &cond));
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "body", &body));
bool cond_has_outside_compilation = false;
bool body_has_outside_compilation = false;
string cond_host_func_name = absl::StrCat("oc_cond_host_while_", cond.name()),
body_host_func_name = absl::StrCat("oc_body_host_while_", body.name());
string cond_xla_func_name = absl::StrCat(cond.name(), "_oc"),
body_xla_func_name = absl::StrCat(body.name(), "_oc");
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForFunction(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
cond, cond_xla_func_name, cond_host_func_name, host_compute_core, flr,
fld, shape_inference_graphs, &cond_has_outside_compilation));
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForFunction(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
body, body_xla_func_name, body_host_func_name, host_compute_core, flr,
fld, shape_inference_graphs, &body_has_outside_compilation));
if (!cond_has_outside_compilation && !body_has_outside_compilation) {
return absl::OkStatus();
}
*has_outside_compilation = true;
if (cond_has_outside_compilation) {
cond.set_name(cond_xla_func_name);
n->ClearAttr("cond");
n->AddAttr("cond", cond);
}
if (body_has_outside_compilation) {
body.set_name(body_xla_func_name);
n->ClearAttr("body");
n->AddAttr("body", body);
}
n->AddAttr(kXlaOriginalOutsideCompilationNodeName, n->name());
string host_transfer_key = absl::StrCat("oc_while_pred_", n->name());
TF_RETURN_IF_ERROR(AddSendLoopPredToLoopCond(
cond_xla_func_name, host_transfer_key, &cond, fld, n));
n->AddAttr(kXlaTokenInputNodesAttrName,
std::vector<string>{kXlaTokenArgNodeName});
if (!cond_has_outside_compilation) {
std::unique_ptr<Graph> cond_host_graph(new Graph(fld));
std::vector<string> host_graphs;
TF_RETURN_IF_ERROR(ConstructHostGraph(xla_cluster_name,
outside_compilation_attr_name,
host_graphs, fld, &cond_host_graph));
FunctionDef cond_host_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*cond_host_graph, cond_host_func_name,
&cond_host_fdef));
if (fld->Find(cond_host_func_name)) {
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(cond_host_func_name, cond_host_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(cond_host_fdef));
}
}
if (!body_has_outside_compilation) {
std::unique_ptr<Graph> body_host_graph(new Graph(fld));
std::vector<string> host_graphs;
TF_RETURN_IF_ERROR(ConstructHostGraph(xla_cluster_name,
outside_compilation_attr_name,
host_graphs, fld, &body_host_graph));
FunctionDef body_host_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*body_host_graph, body_host_func_name,
&body_host_fdef));
if (fld->Find(body_host_func_name)) {
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(body_host_func_name, body_host_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(body_host_fdef));
}
}
string oc_host_graph_name = absl::StrCat("oc_while_host_graph_", n->name());
TF_RETURN_IF_ERROR(BuildHostGraphForWhileNode(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
n->name(), host_transfer_key, oc_host_graph_name, fld,
cond_host_func_name, body_host_func_name));
host_graphs->push_back(oc_host_graph_name);
return absl::OkStatus();
}
Status ExtractOutsideCompilationForNodesWithAssociatedFunctions(
Graph* g, const string& xla_cluster_attr_name,
const string& outside_compilation_attr_name, const string& xla_cluster_name,
const std::map<string, int>& host_compute_core, FunctionLibraryRuntime* flr,
FunctionLibraryDefinition* fld, std::vector<string>* host_graphs,
std::vector<string>* shape_inference_graphs,
bool* has_outside_compilation) {
std::vector<Node*> if_nodes, while_nodes, func_call_nodes;
for (Node* n : g->nodes()) {
if (n->IsIfNode()) {
if_nodes.push_back(n);
} else if (n->IsWhileNode()) {
while_nodes.push_back(n);
} else if (IsFunctionCall(*fld, *n)) {
func_call_nodes.push_back(n);
}
}
for (Node* n : func_call_nodes) {
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForFuncCallNode(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
host_compute_core, g, n, flr, fld, host_graphs, shape_inference_graphs,
has_outside_compilation));
}
for (Node* n : if_nodes) {
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForIfNode(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
host_compute_core, g, n, flr, fld, host_graphs, shape_inference_graphs,
has_outside_compilation));
}
for (Node* n : while_nodes) {
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForWhileNode(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
host_compute_core, g, n, flr, fld, host_graphs, shape_inference_graphs,
has_outside_compilation));
}
return absl::OkStatus();
}
Status CopyOutsideCompilationConstNodes(
Graph* g, const string& outside_compilation_attr_name) {
for (Node* n : g->op_nodes()) {
if (!n->IsConstant() ||
!HasNodeAttr(n->def(), outside_compilation_attr_name)) {
continue;
}
std::vector<const Edge*> out_edges(n->out_edges().begin(),
n->out_edges().end());
bool has_non_oc_output = false;
for (const Edge* e : out_edges) {
if (!e->IsControlEdge() &&
!HasNodeAttr(e->dst()->def(), outside_compilation_attr_name)) {
has_non_oc_output = true;
break;
}
}
if (!has_non_oc_output) {
continue;
}
NodeDef copy_def = n->def();
copy_def.set_name(g->NewName(n->name()));
copy_def.mutable_attr()->erase(outside_compilation_attr_name);
TF_ASSIGN_OR_RETURN(Node * copy_node, g->AddNode(copy_def));
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
g->AddControlEdge(e->src(), copy_node);
}
}
for (const Edge* e : out_edges) {
if (!e->IsControlEdge() &&
!HasNodeAttr(e->dst()->def(), outside_compilation_attr_name)) {
Node* dst = e->dst();
int dst_input = e->dst_input();
g->RemoveEdge(e);
g->AddEdge(copy_node, 0, dst, dst_input);
}
}
}
return absl::OkStatus();
}
}
Status RewriteOutsideCompilationSubgraphFn::operator()(
const std::vector<OutputTensor>& arg_source_tensors,
std::unique_ptr<Graph>* graph, std::vector<int>* input_permutation,
std::vector<int>* output_permutation, NodeDef* node_def) {
string old_name = node_def->op();
string new_name =
absl::StrCat(xla_cluster_name_, "_", new_function_name_, "_", old_name);
node_def->set_op(new_name);
node_def->set_name(new_name);
FixupSourceAndSinkEdges(graph->get());
TF_ASSIGN_OR_RETURN(
Node * key_placeholder,
AddHostComputeKeyPlaceholder(xla_cluster_name_, graph->get()));
std::vector<DataType> recv_at_host_dtypes;
TF_ASSIGN_OR_RETURN(
Node * recv_at_host_node,
ReplaceArgNodesWithRecvAtHostNode(graph->get(), new_name,
&recv_at_host_dtypes, key_placeholder));
std::vector<DataType> send_from_host_dtypes;
TF_ASSIGN_OR_RETURN(
Node * send_from_host_node,
ReplaceRetNodesWithSendFromHostNode(
graph->get(), new_name, &send_from_host_dtypes, key_placeholder));
for (Node* n : (*graph)->nodes()) {
if (IsKeyPlaceholderNode(*n)) {
continue;
}
n->AddAttr(xla_cluster_attr_name_, xla_cluster_name_);
n->AddAttr(outside_compilation_attr_name_, old_name);
}
std::optional<std::vector<PartialTensorShape>> shapes =
GetInferredInputShapes(send_from_host_dtypes.size(), send_from_host_node);
for (Node* n : (*graph)->nodes()) {
n->ClearAttr(kXlaInferredShapesAttrName);
}
for (Node* n : (*graph)->nodes()) {
if (HasNodeAttr(n->def(), kXlaConnectedToXlaComputationAttrName)) {
(*graph)->AddControlEdge(n, send_from_host_node);
n->ClearAttr(kXlaConnectedToXlaComputationAttrName);
}
if (HasNodeAttr(n->def(), kXlaConnectedFromXlaComputationAttrName)) {
(*graph)->AddControlEdge(recv_at_host_node, n);
n->ClearAttr(kXlaConnectedFromXlaComputationAttrName);
}
}
if (send_from_host_node->in_edges().size() > 1) {
(*graph)->AddControlEdge(send_from_host_node, (*graph)->sink_node());
}
PruneForReverseReachability(
graph->get(), std::unordered_set<const Node*>{(*graph)->sink_node()});
AddNodeAttr("_outside_compilation_subgraph", old_name, node_def);
if (shapes) {
NameAttrList shape_inference_graph;
AddNodeAttr("shape_inference_graph", shape_inference_graph, node_def);
AddNodeAttr("shapes", *shapes, node_def);
} else {
string shape_inference_func_name =
absl::StrCat("_outside_compilation_shape_inference_", new_name);
NameAttrList shape_inference_graph;
shape_inference_graph.set_name(shape_inference_func_name);
AddNodeAttr("shape_inference_graph", shape_inference_graph, node_def);
AddNodeAttr("shapes", std::vector<TensorShapeProto>{}, node_def);
}
AddNodeAttr("ancestors", std::vector<string>{}, node_def);
AddNodeAttr("Tinputs", recv_at_host_dtypes, node_def);
AddNodeAttr("Toutputs", send_from_host_dtypes, node_def);
AddNodeAttr("key", absl::StrCat("host_compute_channel_", new_name), node_def);
return absl::OkStatus();
}
Status ExtractOutsideCompilationForFunction(
const string& xla_cluster_attr_name,
const string& outside_compilation_attr_name, const string& xla_cluster_name,
const NameAttrList& func_name_attrs, const string& new_func_name,
const string& host_graph_func_name,
const std::map<string, int>& host_compute_core, FunctionLibraryRuntime* flr,
FunctionLibraryDefinition* fld, std::vector<string>* shape_inference_graphs,
bool* has_outside_compilation) {
const string& func_name = func_name_attrs.name();
FunctionLibraryRuntime::Handle handle;
TF_RETURN_IF_ERROR(
flr->Instantiate(func_name, AttrSlice(&func_name_attrs.attr()), &handle));
Status ret_status = absl::OkStatus();
auto cleanup_handle = gtl::MakeCleanup([&]() {
auto s = flr->ReleaseHandle(handle);
if (!s.ok()) {
ret_status.Update(s);
}
});
const FunctionBody* fbody = flr->GetFunctionBody(handle);
*has_outside_compilation = false;
for (Node* n : fbody->graph->nodes()) {
if (HasNodeAttr(n->def(), outside_compilation_attr_name)) {
*has_outside_compilation = true;
break;
}
}
if (VLOG_IS_ON(4)) {
DumpGraphToFile(
absl::StrCat("extract_outside_compilation_for_func_before_", func_name),
*fbody->graph, fld);
}
std::unique_ptr<Graph> graph_out;
std::vector<string> outside_compilation_host_graphs;
std::vector<string> shape_inference_graphs_to_rewrite;
if (*has_outside_compilation) {
TF_RETURN_IF_ERROR(CopyOutsideCompilationConstNodes(
fbody->graph, outside_compilation_attr_name));
TF_ASSIGN_OR_RETURN(auto cluster_deps,
OutsideCompilationClusterDependencies(
fbody->graph, outside_compilation_attr_name));
TF_RETURN_IF_ERROR(PreprocessEdgesBetweenOutsideCompilations(
fbody->graph, outside_compilation_attr_name));
auto rewrite_fn = std::make_unique<RewriteOutsideCompilationSubgraphFn>(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
new_func_name);
TF_RETURN_IF_ERROR(EncapsulateSubgraphsInFunctions(
outside_compilation_attr_name, *fbody->graph, *rewrite_fn,
true, &graph_out, fld));
std::vector<Node*> outside_compilation_nodes;
for (Node* n : graph_out->nodes()) {
if (HasNodeAttr(n->def(), "_outside_compilation_subgraph")) {
outside_compilation_nodes.push_back(n);
outside_compilation_host_graphs.push_back(n->name());
auto shape_inference_graph = std::make_unique<NameAttrList>();
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "shape_inference_graph",
shape_inference_graph.get()));
if (!shape_inference_graph->name().empty()) {
shape_inference_graphs->push_back(shape_inference_graph->name());
shape_inference_graphs_to_rewrite.push_back(
shape_inference_graph->name());
const FunctionDef* xla_fdef = fld->Find(n->name());
if (!xla_fdef) {
return errors::Internal("Cannot find XLA function ", n->name());
}
auto shape_inference_fdef = std::make_unique<FunctionDef>(*xla_fdef);
shape_inference_fdef->mutable_signature()->set_name(
shape_inference_graph->name());
if (fld->Find(shape_inference_graph->name())) {
TF_RETURN_IF_ERROR(fld->ReplaceFunction(
shape_inference_graph->name(), *shape_inference_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(*shape_inference_fdef));
}
}
}
}
std::map<string, Node*> host_compute_nodes;
for (Node* n : outside_compilation_nodes) {
auto host_compute_node_or = ReplaceOutsideCompilationCallNode(
graph_out.get(), n, host_compute_core, *cluster_deps);
TF_RETURN_IF_ERROR(host_compute_node_or.status());
Node* host_compute_node = host_compute_node_or.value();
host_compute_nodes[host_compute_node->name()] = host_compute_node;
}
for (const auto& iter : host_compute_nodes) {
Node* host_compute_node = iter.second;
std::vector<string> token_input_node_names;
TF_RETURN_IF_ERROR(GetNodeAttr(host_compute_node->def(),
kXlaTokenInputNodesAttrName,
&token_input_node_names));
for (const string& node_name : token_input_node_names) {
if (node_name == kXlaTokenArgNodeName) {
continue;
}
auto iter = host_compute_nodes.find(node_name);
TF_RET_CHECK(iter != host_compute_nodes.end());
graph_out->AddControlEdge(iter->second, host_compute_node);
}
}
}
Graph* g = (*has_outside_compilation) ? graph_out.get() : fbody->graph;
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForNodesWithAssociatedFunctions(
g, xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
host_compute_core, flr, fld, &outside_compilation_host_graphs,
shape_inference_graphs, has_outside_compilation));
if (*has_outside_compilation) {
std::unique_ptr<Graph> host_graph;
TF_RETURN_IF_ERROR(
ConstructHostGraph(xla_cluster_name, outside_compilation_attr_name,
outside_compilation_host_graphs, fld, &host_graph));
auto host_graph_fdef = std::make_unique<FunctionDef>();
TF_RETURN_IF_ERROR(GraphToFunctionDef(*host_graph, host_graph_func_name,
HostGraphControlRetMapping,
host_graph_fdef.get()));
if (fld->Find(host_graph_func_name)) {
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(host_graph_func_name, *host_graph_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(*host_graph_fdef));
}
for (const string& shape_inference_graph :
shape_inference_graphs_to_rewrite) {
TF_RETURN_IF_ERROR(
RewriteShapeInferenceGraph(shape_inference_graph, host_graph.get(),
nullptr, fld));
}
for (const string& func : outside_compilation_host_graphs) {
TF_RETURN_IF_ERROR(fld->RemoveFunction(func));
}
auto updated_fdef = std::make_unique<FunctionDef>();
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*g, new_func_name, updated_fdef.get()));
updated_fdef->mutable_signature()->set_is_stateful(true);
const FunctionDef* original_fdef = fld->Find(func_name);
if (original_fdef) {
for (const auto& attr : original_fdef->attr()) {
(*updated_fdef->mutable_attr())[attr.first] = attr.second;
}
}
if (fld->Find(new_func_name)) {
TF_RETURN_IF_ERROR(fld->ReplaceFunction(new_func_name, *updated_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(*updated_fdef));
}
if (VLOG_IS_ON(4)) {
DumpGraphToFile(
absl::StrCat("extract_outside_compilation_for_func_after_",
func_name),
*g, fld);
}
}
return ret_status;
}
Status ExtractOutsideCompilation(
const string& xla_cluster_attr_name,
const string& outside_compilation_attr_name,
const std::unordered_map<string, XlaClusterInfo>& clusters, Graph* g,
FunctionLibraryRuntime* flr, FunctionLibraryDefinition* fld,
bool* modified) {
if (VLOG_IS_ON(4)) {
DumpGraphToFile("extract_outside_compilation_before", *g, fld);
}
*modified = false;
auto node_name_index = g->BuildNodeNameIndex();
for (auto& iter : clusters) {
string xla_cluster_name = iter.first;
Node* n = iter.second.node;
auto const& func_name_attrs = iter.second.func_name_attrs;
auto const& host_compute_core = iter.second.host_compute_core;
std::vector<string> shape_inference_graphs;
bool has_outside_compilation;
string host_graph_func_name =
absl::StrCat("oc_host_graph_", xla_cluster_name);
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForFunction(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
func_name_attrs, func_name_attrs.name(), host_graph_func_name,
host_compute_core, flr, fld, &shape_inference_graphs,
&has_outside_compilation));
*modified |= has_outside_compilation;
if (has_outside_compilation) {
string pivot_name = absl::StrCat(xla_cluster_name, "/pivot");
Node* pivot_node = node_name_index[pivot_name];
TF_RETURN_IF_ERROR(ExpandHostGraphIntoMainGraph(
g, fld, host_graph_func_name, n, pivot_node));
TF_RETURN_IF_ERROR(fld->RemoveFunction(host_graph_func_name));
for (const auto& shape_inference_graph_name : shape_inference_graphs) {
TF_RETURN_IF_ERROR(RewriteShapeInferenceGraph(
shape_inference_graph_name, g, pivot_node, fld));
}
}
}
if (VLOG_IS_ON(4)) {
DumpGraphToFile("extract_outside_compilation_after", *g, fld);
}
return absl::OkStatus();
}
} | #include "tensorflow/compiler/jit/extract_outside_compilation_pass.h"
#include "absl/strings/match.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/encapsulate_util.h"
#include "xla/test.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
TEST(RewriteOutsideCompilationSubgraphFnTest, Basic) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output arg0 = ops::_Arg(s.WithOpName("arg0"), DT_INT32, 0);
Output arg1 = ops::_Arg(s.WithOpName("arg1"), DT_FLOAT, 1);
Output arg2 = ops::_Arg(s.WithOpName("arg2"), DT_INT32, 2);
Output add = ops::Add(s.WithOpName("add"), arg0, arg0);
auto ret0 = ops::_Retval(s.WithOpName("ret0"), add, 0);
auto ret1 = ops::_Retval(s.WithOpName("ret1"), arg1, 1);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
auto node_name_image = g->BuildNodeNameIndex();
Node *add_node = node_name_image["add"];
EXPECT_NE(add_node, nullptr);
add_node->AddAttr(kXlaConnectedToXlaComputationAttrName, "cluster");
add_node->AddAttr(kXlaConnectedFromXlaComputationAttrName, "cluster");
RewriteOutsideCompilationSubgraphFn rewrite_fn("_xla", "_oc", "cluster", "");
std::vector<OutputTensor> arg_source_tensors;
NodeDef call_node_def;
call_node_def.set_op("0");
TF_CHECK_OK(
rewrite_fn(arg_source_tensors, &g, nullptr, nullptr, &call_node_def));
node_name_image = g->BuildNodeNameIndex();
Node *key_placeholder = node_name_image["cluster_key_placeholder"];
EXPECT_NE(key_placeholder, nullptr);
for (Node *n : g->nodes()) {
EXPECT_NE(n->type_string(), "_Arg");
}
Node *recv_at_host = node_name_image["outside_compilation_cluster__0_recv"];
EXPECT_NE(recv_at_host, nullptr);
std::vector<DataType> recv_at_host_dtypes;
TF_CHECK_OK(
GetNodeAttr(recv_at_host->attrs(), "Toutputs", &recv_at_host_dtypes));
EXPECT_EQ(recv_at_host_dtypes.size(), 3);
EXPECT_EQ(recv_at_host_dtypes[0], DT_INT32);
EXPECT_EQ(recv_at_host_dtypes[1], DT_FLOAT);
EXPECT_EQ(recv_at_host_dtypes[2], DT_INT32);
for (Node *n : g->nodes()) {
EXPECT_NE(n->type_string(), "_Retval");
}
Node *send_from_host = node_name_image["outside_compilation_cluster__0_send"];
EXPECT_NE(send_from_host, nullptr);
std::vector<DataType> send_from_host_dtypes;
TF_CHECK_OK(
GetNodeAttr(send_from_host->attrs(), "Tinputs", &send_from_host_dtypes));
EXPECT_EQ(send_from_host_dtypes.size(), 2);
EXPECT_EQ(send_from_host_dtypes[0], DT_INT32);
EXPECT_EQ(send_from_host_dtypes[1], DT_FLOAT);
add_node = node_name_image["add"];
EXPECT_NE(add_node, nullptr);
EXPECT_TRUE(HasNodeAttr(add_node->def(), "_xla"));
EXPECT_TRUE(HasNodeAttr(add_node->def(), "_oc"));
bool has_control_edge_from_recv_at_host = false;
for (auto e : add_node->in_edges()) {
if (e->IsControlEdge() && e->src() == recv_at_host) {
has_control_edge_from_recv_at_host = true;
}
}
EXPECT_TRUE(has_control_edge_from_recv_at_host);
bool has_control_edge_to_send_from_host = false;
for (auto e : add_node->out_edges()) {
if (e->IsControlEdge() && e->dst() == send_from_host) {
has_control_edge_to_send_from_host = true;
}
}
EXPECT_TRUE(has_control_edge_to_send_from_host);
NameAttrList shape_inference_graph;
TF_CHECK_OK(GetNodeAttr(AttrSlice(&call_node_def.attr()),
"shape_inference_graph", &shape_inference_graph));
EXPECT_EQ(shape_inference_graph.name(),
"_outside_compilation_shape_inference_cluster__0");
}
TEST(RewriteOutsideCompilationSubgraphFnTest, NoSendFromHost) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output arg0 = ops::_Arg(s.WithOpName("arg0"), DT_INT32, 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
RewriteOutsideCompilationSubgraphFn rewrite_fn("_xla", "_oc", "cluster", "");
std::vector<OutputTensor> arg_source_tensors;
NodeDef call_node_def;
call_node_def.set_op("0");
TF_CHECK_OK(
rewrite_fn(arg_source_tensors, &g, nullptr, nullptr, &call_node_def));
auto node_name_image = g->BuildNodeNameIndex();
Node *key_placeholder = node_name_image["cluster_key_placeholder"];
EXPECT_NE(key_placeholder, nullptr);
Node *recv_at_host = node_name_image["outside_compilation_cluster__0_recv"];
EXPECT_NE(recv_at_host, nullptr);
Node *send_from_host = node_name_image["outside_compilation_cluster__0_send"];
EXPECT_EQ(send_from_host, nullptr);
}
TEST(RewriteOutsideCompilationSubgraphFnTest, NoRecvAtHost) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output const0 = ops::Const(s.WithOpName("const0"), 1, {2});
auto ret = ops::_Retval(s.WithOpName("ret"), const0, 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
RewriteOutsideCompilationSubgraphFn rewrite_fn("_xla", "_oc", "cluster", "");
std::vector<OutputTensor> arg_source_tensors;
NodeDef call_node_def;
call_node_def.set_op("0");
TF_CHECK_OK(
rewrite_fn(arg_source_tensors, &g, nullptr, nullptr, &call_node_def));
auto node_name_image = g->BuildNodeNameIndex();
Node *key_placeholder = node_name_image["cluster_key_placeholder"];
EXPECT_NE(key_placeholder, nullptr);
Node *recv_at_host = node_name_image["outside_compilation_cluster__0_recv"];
EXPECT_EQ(recv_at_host, nullptr);
Node *send_from_host = node_name_image["outside_compilation_cluster__0_send"];
EXPECT_NE(send_from_host, nullptr);
}
TEST(RewriteOutsideCompilationSubgraphFnTest, NoKeyPlaceholder) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output const0 = ops::Const(s.WithOpName("const0"), 1, {2});
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
RewriteOutsideCompilationSubgraphFn rewrite_fn("_xla", "_oc", "cluster", "");
std::vector<OutputTensor> arg_source_tensors;
NodeDef call_node_def;
call_node_def.set_op("0");
TF_CHECK_OK(
rewrite_fn(arg_source_tensors, &g, nullptr, nullptr, &call_node_def));
auto node_name_image = g->BuildNodeNameIndex();
Node *key_placeholder = node_name_image["cluster_key_placeholder"];
EXPECT_EQ(key_placeholder, nullptr);
Node *recv_at_host = node_name_image["outside_compilation_cluster__0_recv"];
EXPECT_EQ(recv_at_host, nullptr);
Node *send_from_host = node_name_image["outside_compilation_cluster__0_send"];
EXPECT_EQ(send_from_host, nullptr);
}
TEST(RewriteOutsideCompilationSubgraphFnTest, ShapesInferred) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output const0 = ops::Const(s.WithOpName("const0"), 1, {2});
auto ret = ops::_Retval(s.WithOpName("ret"), const0, 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
auto node_name_image = g->BuildNodeNameIndex();
Node *const0_node = node_name_image["const0"];
EXPECT_NE(const0_node, nullptr);
PartialTensorShape shape({2});
const0_node->AddAttr(kXlaInferredShapesAttrName,
std::vector<PartialTensorShape>{shape});
RewriteOutsideCompilationSubgraphFn rewrite_fn("_xla", "_oc", "cluster", "");
std::vector<OutputTensor> arg_source_tensors;
NodeDef call_node_def;
call_node_def.set_op("0");
TF_CHECK_OK(
rewrite_fn(arg_source_tensors, &g, nullptr, nullptr, &call_node_def));
node_name_image = g->BuildNodeNameIndex();
std::vector<TensorShapeProto> shapes;
TF_CHECK_OK(GetNodeAttr(AttrSlice(&call_node_def.attr()), "shapes", &shapes));
EXPECT_EQ(shapes.size(), 1);
EXPECT_EQ(shapes[0].dim_size(), 1);
}
class ExtractOutsideCompilationForFunctionTest : public ::testing::Test {
public:
void SetUp() override {
SessionOptions session_options;
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
}
Status ExtractOutsideCompilationTest(
const string &xla_cluster_attr_name,
const string &outside_compilation_attr_name,
const string &xla_cluster_name, const NameAttrList &func_name_attrs,
const string &new_func_name, const string &host_graph_func_name,
const std::map<string, int> &host_compute_core,
FunctionLibraryDefinition *fld,
std::vector<string> *shape_inference_graphs,
bool *has_outside_compilation) {
OptimizerOptions opts;
pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr_.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, fld, opts,
nullptr);
auto flr = pflr_->GetFLR("/job:localhost/replica:0/task:0/cpu:0");
return ExtractOutsideCompilationForFunction(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
func_name_attrs, new_func_name, host_graph_func_name, host_compute_core,
flr, fld, shape_inference_graphs, has_outside_compilation);
}
private:
std::unique_ptr<DeviceMgr> device_mgr_;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_;
};
TEST_F(ExtractOutsideCompilationForFunctionTest, Basic) {
FunctionDefLibrary fdl;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output const0 = ops::Const(s.WithOpName("const0"), 1, {2});
Output identity0 = ops::Identity(s.WithOpName("identity0"), const0);
Output identity1 = ops::Identity(s.WithOpName("identity1"), identity0);
Output identity2 = ops::Identity(s.WithOpName("identity2"), identity1);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
auto node_name_image = g->BuildNodeNameIndex();
node_name_image["identity0"]->AddAttr("_oc", "0");
node_name_image["identity1"]->AddAttr("_oc", "1");
PartialTensorShape shape({2});
node_name_image["identity1"]->AddAttr(
kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape});
FunctionDef *xla_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef));
}
FunctionLibraryDefinition fld(OpRegistry::Global(), fdl);
protobuf::Map<string, tensorflow::AttrValue> attrs;
std::map<string, int> host_compute_core = {{"0", 1}, {"1", 0}};
std::vector<string> shape_inference_graphs;
bool has_outside_compilation;
NameAttrList name_attrs;
name_attrs.set_name("cluster");
*name_attrs.mutable_attr() = attrs;
TF_CHECK_OK(ExtractOutsideCompilationTest(
"_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph",
host_compute_core, &fld, &shape_inference_graphs,
&has_outside_compilation));
std::unique_ptr<FunctionBody> xla_fbody;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("cluster_rewritten"),
AttrSlice(), &fld, &xla_fbody));
auto node_name_index = xla_fbody->graph->BuildNodeNameIndex();
Node *host_compute_0 = node_name_index["outside_compilation_0_host_compute"];
EXPECT_NE(host_compute_0, nullptr);
Node *host_compute_1 = node_name_index["outside_compilation_1_host_compute"];
EXPECT_NE(host_compute_1, nullptr);
int tpu_core;
TF_CHECK_OK(GetNodeAttr(host_compute_0->attrs(), "tpu_core", &tpu_core));
EXPECT_EQ(tpu_core, 1);
TF_CHECK_OK(GetNodeAttr(host_compute_1->attrs(), "tpu_core", &tpu_core));
EXPECT_EQ(tpu_core, 0);
std::vector<TensorShapeProto> shapes;
TF_CHECK_OK(GetNodeAttr(host_compute_0->attrs(), "shapes", &shapes));
EXPECT_EQ(shapes.size(), 0);
TF_CHECK_OK(GetNodeAttr(host_compute_1->attrs(), "shapes", &shapes));
EXPECT_EQ(shapes.size(), 1);
EXPECT_EQ(shapes[0].dim_size(), 1);
NameAttrList shape_inference_graph;
TF_CHECK_OK(GetNodeAttr(host_compute_0->attrs(), "shape_inference_graph",
&shape_inference_graph));
EXPECT_EQ(shape_inference_graph.name(), "");
TF_CHECK_OK(GetNodeAttr(host_compute_1->attrs(), "shape_inference_graph",
&shape_inference_graph));
EXPECT_EQ(shape_inference_graph.name(), "");
EXPECT_EQ(shape_inference_graphs.size(), 0);
std::unique_ptr<FunctionBody> host_fbody;
AttrValue device_ordinal_temp_value;
device_ordinal_temp_value.set_i(0);
protobuf::Map<string, AttrValue> host_func_attrs;
host_func_attrs["_device_ordinal"] = device_ordinal_temp_value;
TF_CHECK_OK(FunctionDefToBodyHelper(
*fld.Find("host_graph"), AttrSlice(&host_func_attrs), &fld, &host_fbody));
Graph *host_graph = host_fbody->graph;
Node *key_placeholder = nullptr, *sequencer = nullptr;
for (Node *n : host_graph->nodes()) {
if (n->type_string() == "Placeholder" &&
absl::EndsWith(n->name(), "_key_placeholder")) {
EXPECT_EQ(key_placeholder, nullptr);
key_placeholder = n;
} else if (HasNodeAttr(n->def(), "_xla_host_transfer_sequencer")) {
EXPECT_EQ(sequencer, nullptr);
sequencer = n;
}
}
EXPECT_NE(key_placeholder, nullptr);
EXPECT_NE(sequencer, nullptr);
int num_send_from_host = 0, num_recv_at_host = 0;
std::vector<Node *> send_recv_nodes;
for (Node *n : host_graph->nodes()) {
if (n->type_string() == "_XlaSendFromHost") {
num_send_from_host++;
send_recv_nodes.push_back(n);
} else if (n->type_string() == "_XlaRecvAtHost") {
num_recv_at_host++;
send_recv_nodes.push_back(n);
}
}
EXPECT_EQ(num_send_from_host, 1);
EXPECT_EQ(num_recv_at_host, 1);
for (Node *n : send_recv_nodes) {
Node *input_node;
TF_CHECK_OK(n->input_node(n->num_inputs() - 1, &input_node));
EXPECT_EQ(input_node, key_placeholder);
bool has_control_edge_to_sequencer = false;
for (const Edge *e : n->out_edges()) {
if (e->IsControlEdge() && e->dst() == sequencer) {
has_control_edge_to_sequencer = true;
break;
}
}
EXPECT_TRUE(has_control_edge_to_sequencer);
}
}
TEST_F(ExtractOutsideCompilationForFunctionTest, NoHostGraph) {
FunctionDefLibrary fdl;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output const0 = ops::Const(s.WithOpName("const0"), 1, {2});
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
FunctionDef *xla_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef));
}
FunctionLibraryDefinition fld(OpRegistry::Global(), fdl);
protobuf::Map<string, tensorflow::AttrValue> attrs;
std::map<string, int> host_compute_core = {{"0", 1}, {"1", 0}};
std::vector<string> shape_inference_graphs;
bool has_outside_compilation;
NameAttrList name_attrs;
name_attrs.set_name("cluster");
*name_attrs.mutable_attr() = attrs;
TF_CHECK_OK(ExtractOutsideCompilationTest(
"_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph",
host_compute_core, &fld, &shape_inference_graphs,
&has_outside_compilation));
EXPECT_EQ(fld.Find("host_graph"), nullptr);
}
TEST_F(ExtractOutsideCompilationForFunctionTest, OutsideCompilationInIf) {
FunctionDefLibrary fdl;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(s.WithOpName("arg"), DT_INT32, 0);
Output identity = ops::Identity(s.WithOpName("identity_true_fn"), arg);
ops::_Retval retval(s.WithOpName("retval"), identity, 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
auto node_name_image = g->BuildNodeNameIndex();
node_name_image["identity_true_fn"]->AddAttr("_oc", "0");
PartialTensorShape shape({2});
node_name_image["identity_true_fn"]->AddAttr(
kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape});
FunctionDef *true_fn_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "true_fn", true_fn_fdef));
}
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(s.WithOpName("arg"), DT_INT32, 0);
Output identity = ops::Identity(s.WithOpName("identity_false_fn"), arg);
ops::_Retval retval(s.WithOpName("retval"), identity, 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
auto node_name_image = g->BuildNodeNameIndex();
node_name_image["identity_false_fn"]->AddAttr("_oc", "0");
PartialTensorShape shape({2});
node_name_image["identity_false_fn"]->AddAttr(
kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape});
FunctionDef *false_fn_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "false_fn", false_fn_fdef));
}
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output cond = ops::Const(s.WithOpName("const0"), true, {2});
Output input = ops::Const(s.WithOpName("const1"), 1, {2});
NameAttrList true_fn;
true_fn.set_name("true_fn");
NameAttrList false_fn;
false_fn.set_name("false_fn");
auto if_op = ops::If(s.WithOpName("if"), cond,
std::initializer_list<Input>{cond, input}, {DT_INT32},
true_fn, false_fn);
ops::_Retval retval(s.WithOpName("retval"), if_op.output[0], 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
FunctionDef *xla_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef));
}
FunctionLibraryDefinition fld(OpRegistry::Global(), fdl);
protobuf::Map<string, tensorflow::AttrValue> attrs;
std::map<string, int> host_compute_core;
std::vector<string> shape_inference_graphs;
bool has_outside_compilation;
NameAttrList name_attrs;
name_attrs.set_name("cluster");
*name_attrs.mutable_attr() = attrs;
TF_CHECK_OK(ExtractOutsideCompilationTest(
"_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph",
host_compute_core, &fld, &shape_inference_graphs,
&has_outside_compilation));
{
std::unique_ptr<FunctionBody> host_fbody;
AttrValue device_ordinal_temp_value;
device_ordinal_temp_value.set_i(0);
protobuf::Map<string, AttrValue> host_func_attrs;
host_func_attrs["_device_ordinal"] = device_ordinal_temp_value;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("host_graph"),
AttrSlice(&host_func_attrs), &fld,
&host_fbody));
Graph *host_graph = host_fbody->graph;
auto node_name_index = host_graph->BuildNodeNameIndex();
Node *recv_if_pred_node = node_name_index["recv_oc_if_pred_if"];
EXPECT_NE(recv_if_pred_node, nullptr);
Node *if_oc_node = node_name_index["oc_if_if"];
EXPECT_NE(if_oc_node, nullptr);
Node *if_oc_node_cond_input;
TF_CHECK_OK(if_oc_node->input_node(0, &if_oc_node_cond_input));
EXPECT_EQ(if_oc_node_cond_input, recv_if_pred_node);
const FunctionDef *true_def = fld.Find("oc_then_branch_host_if_true_fn");
EXPECT_NE(true_def, nullptr);
bool has_identity_true_fn_node = false;
for (const auto &node_def : true_def->node_def()) {
if (node_def.name() == "identity_true_fn") {
has_identity_true_fn_node = true;
break;
}
}
EXPECT_TRUE(has_identity_true_fn_node);
const FunctionDef *false_def = fld.Find("oc_else_branch_host_if_false_fn");
EXPECT_NE(false_def, nullptr);
bool has_identity_false_fn_node = false;
for (const auto &node_def : false_def->node_def()) {
if (node_def.name() == "identity_false_fn") {
has_identity_false_fn_node = true;
break;
}
}
EXPECT_TRUE(has_identity_false_fn_node);
}
{
std::unique_ptr<FunctionBody> xla_fbody;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("cluster_rewritten"),
AttrSlice(), &fld, &xla_fbody));
Graph *xla_graph = xla_fbody->graph;
auto node_name_index = xla_graph->BuildNodeNameIndex();
Node *send_if_pred_node = node_name_index["send_oc_if_pred_if"];
EXPECT_NE(send_if_pred_node, nullptr);
bool has_control_edge_to_if = false;
for (const Edge *e : send_if_pred_node->out_edges()) {
if (e->IsControlEdge() && e->dst()->name() == "if") {
has_control_edge_to_if = true;
break;
}
}
EXPECT_TRUE(has_control_edge_to_if);
Node *if_node = node_name_index["if"];
EXPECT_NE(if_node, nullptr);
std::vector<string> token_inputs;
TF_CHECK_OK(
GetNodeAttr(if_node->def(), "_xla_token_input_nodes", &token_inputs));
EXPECT_THAT(token_inputs, ::testing::ElementsAre("send_oc_if_pred_if"));
}
}
TEST_F(ExtractOutsideCompilationForFunctionTest, OutsideCompilationInWhile) {
FunctionDefLibrary fdl;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(s.WithOpName("arg"), DT_BOOL, 0);
Output identity = ops::Identity(s.WithOpName("identity_cond_fn"), arg);
ops::_Retval retval(s.WithOpName("retval"), identity, 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
auto node_name_image = g->BuildNodeNameIndex();
node_name_image["identity_cond_fn"]->AddAttr("_oc", "0");
PartialTensorShape shape({2});
node_name_image["identity_cond_fn"]->AddAttr(
kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape});
FunctionDef *cond_fn_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "cond_fn", cond_fn_fdef));
}
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(s.WithOpName("arg"), DT_BOOL, 0);
Output identity = ops::Identity(s.WithOpName("identity_body_fn"), arg);
ops::_Retval retval(s.WithOpName("retval"), identity, 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
auto node_name_image = g->BuildNodeNameIndex();
node_name_image["identity_body_fn"]->AddAttr("_oc", "0");
PartialTensorShape shape({2});
node_name_image["identity_body_fn"]->AddAttr(
kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape});
FunctionDef *body_fn_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "body_fn", body_fn_fdef));
}
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("const0"), true, {2});
NameAttrList cond_fn;
cond_fn.set_name("cond_fn");
NameAttrList body_fn;
body_fn.set_name("body_fn");
auto while_op =
ops::While(s.WithOpName("while"), std::initializer_list<Input>{input},
cond_fn, body_fn);
ops::_Retval retval(s.WithOpName("retval"), while_op.output[0], 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
FunctionDef *xla_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef));
}
FunctionLibraryDefinition fld(OpRegistry::Global(), fdl);
protobuf::Map<string, tensorflow::AttrValue> attrs;
std::map<string, int> host_compute_core;
std::vector<string> shape_inference_graphs;
bool has_outside_compilation;
NameAttrList name_attrs;
name_attrs.set_name("cluster");
*name_attrs.mutable_attr() = attrs;
TF_CHECK_OK(ExtractOutsideCompilationTest(
"_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph",
host_compute_core, &fld, &shape_inference_graphs,
&has_outside_compilation));
{
std::unique_ptr<FunctionBody> host_fbody;
AttrValue device_ordinal_temp_value;
device_ordinal_temp_value.set_i(0);
protobuf::Map<string, AttrValue> host_func_attrs;
host_func_attrs["_device_ordinal"] = device_ordinal_temp_value;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("host_graph"),
AttrSlice(&host_func_attrs), &fld,
&host_fbody));
Graph *host_graph = host_fbody->graph;
auto node_name_index = host_graph->BuildNodeNameIndex();
Node *while_oc_node = node_name_index["oc_while_while"];
EXPECT_NE(while_oc_node, nullptr);
const FunctionDef *cond_def = fld.Find("oc_cond_host_while_cond_fn");
EXPECT_NE(cond_def, nullptr);
bool has_identity_cond_fn_node = false;
for (const auto &node_def : cond_def->node_def()) {
if (node_def.name() == "identity_cond_fn") {
has_identity_cond_fn_node = true;
break;
}
}
EXPECT_TRUE(has_identity_cond_fn_node);
const FunctionDef *body_def = fld.Find("oc_body_host_while_body_fn");
EXPECT_NE(body_def, nullptr);
bool has_identity_body_fn_node = false;
for (const auto &node_def : body_def->node_def()) {
if (node_def.name() == "identity_body_fn") {
has_identity_body_fn_node = true;
break;
}
}
EXPECT_TRUE(has_identity_body_fn_node);
}
{
const FunctionDef *cond_def = fld.Find("cond_fn_oc");
EXPECT_NE(cond_def, nullptr);
bool has_send_oc_while_cond_node = false;
for (const auto &node_def : cond_def->node_def()) {
if (node_def.name() == "send_oc_while_cond_while") {
has_send_oc_while_cond_node = true;
break;
}
}
EXPECT_TRUE(has_send_oc_while_cond_node);
}
}
TEST_F(ExtractOutsideCompilationForFunctionTest, OutsideCompilationInFunction) {
FunctionDefLibrary fdl;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(s.WithOpName("arg"), DT_INT32, 0);
Output identity = ops::Identity(s.WithOpName("identity"), arg);
ops::_Retval retval(s.WithOpName("retval"), identity, 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
auto node_name_image = g->BuildNodeNameIndex();
node_name_image["identity"]->AddAttr("_oc", "0");
PartialTensorShape shape({2});
node_name_image["identity"]->AddAttr(
kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape});
FunctionDef *true_fn_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "fn", true_fn_fdef));
}
FunctionLibraryDefinition fld(OpRegistry::Global(), fdl);
{
std::unique_ptr<Graph> g(new Graph(&fld));
tensorflow::TensorProto tensor_proto;
tensor_proto.set_dtype(tensorflow::DT_INT32);
tensorflow::TensorShapeProto shape;
shape.add_dim()->set_size(2);
*tensor_proto.mutable_tensor_shape() = shape;
for (int i = 0; i < 2; ++i) {
tensor_proto.add_int_val(1);
}
NodeDef const_def;
TF_CHECK_OK(NodeDefBuilder("const", "Const")
.Attr("dtype", DT_INT32)
.Attr("value", tensor_proto)
.Finalize(&const_def));
Status s;
Node *const_node = g->AddNode(const_def, &s);
TF_CHECK_OK(s);
NodeDef fn_def;
TF_CHECK_OK(NodeDefBuilder("fn", "fn", &fld)
.Input("const", 0, DT_INT32)
.Finalize(&fn_def));
Node *fn_node = g->AddNode(fn_def, &s);
TF_CHECK_OK(s);
g->AddEdge(const_node, 0, fn_node, 0);
NodeDef ret_def;
TF_CHECK_OK(NodeDefBuilder("ret", "_Retval")
.Attr("index", 0)
.Attr("T", DT_INT32)
.Input("fn", 0, DT_INT32)
.Finalize(&ret_def));
Node *ret_node = g->AddNode(ret_def, &s);
TF_CHECK_OK(s);
g->AddEdge(fn_node, 0, ret_node, 0);
FunctionDef *xla_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef));
TF_CHECK_OK(fld.AddFunctionDef(*xla_fdef));
}
protobuf::Map<string, tensorflow::AttrValue> attrs;
std::map<string, int> host_compute_core;
std::vector<string> shape_inference_graphs;
bool has_outside_compilation;
NameAttrList name_attrs;
name_attrs.set_name("cluster");
*name_attrs.mutable_attr() = attrs;
TF_CHECK_OK(ExtractOutsideCompilationTest(
"_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph",
host_compute_core, &fld, &shape_inference_graphs,
&has_outside_compilation));
{
std::unique_ptr<FunctionBody> host_fbody;
AttrValue device_ordinal_temp_value;
device_ordinal_temp_value.set_i(0);
protobuf::Map<string, AttrValue> host_func_attrs;
host_func_attrs["_device_ordinal"] = device_ordinal_temp_value;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("host_graph"),
AttrSlice(&host_func_attrs), &fld,
&host_fbody));
Graph *host_graph = host_fbody->graph;
auto node_name_index = host_graph->BuildNodeNameIndex();
Node *call_node = node_name_index["oc_call_fn"];
EXPECT_NE(call_node, nullptr);
std::unique_ptr<FunctionBody> call_fbody;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("oc_func_call_host_fn"),
AttrSlice(&host_func_attrs), &fld,
&call_fbody));
bool has_recv = false, has_send = false;
for (Node *n : call_fbody->graph->nodes()) {
if (n->type_string() == "_XlaRecvAtHost") {
has_recv = true;
} else if (n->type_string() == "_XlaSendFromHost") {
has_send = true;
}
}
EXPECT_TRUE(has_recv);
EXPECT_TRUE(has_send);
}
{
std::unique_ptr<FunctionBody> xla_fbody;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("cluster_rewritten"),
AttrSlice(), &fld, &xla_fbody));
Graph *xla_graph = xla_fbody->graph;
auto node_name_index = xla_graph->BuildNodeNameIndex();
Node *fn_node = node_name_index["fn"];
EXPECT_NE(fn_node, nullptr);
EXPECT_EQ(fn_node->type_string(), "fn_oc");
std::unique_ptr<FunctionBody> call_fbody;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("fn_oc"), AttrSlice(), &fld,
&call_fbody));
bool has_hc = false;
for (Node *n : call_fbody->graph->nodes()) {
if (n->type_string() == "XlaHostCompute") {
has_hc = true;
}
}
EXPECT_TRUE(has_hc);
}
}
TEST_F(ExtractOutsideCompilationForFunctionTest,
OutsideCompilationClusterDataDependency) {
FunctionDefLibrary fdl;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output const0 = ops::Const(s.WithOpName("const0"), 1, {2});
Output identity0 = ops::Identity(s.WithOpName("identity0"), const0);
Output identity1 = ops::Identity(s.WithOpName("identity1"), identity0);
Output identity2 = ops::Identity(s.WithOpName("identity2"), identity1);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
std::cout << "Graph is " << (*g).ToGraphDefDebug().DebugString()
<< std::endl;
auto node_name_image = g->BuildNodeNameIndex();
node_name_image["identity0"]->AddAttr("_oc", "0");
node_name_image["identity1"]->AddAttr("_oc", "1");
PartialTensorShape shape({2});
node_name_image["identity1"]->AddAttr(
kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape});
FunctionDef *xla_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef));
}
FunctionLibraryDefinition fld(OpRegistry::Global(), fdl);
protobuf::Map<string, tensorflow::AttrValue> attrs;
std::map<string, int> host_compute_core = {{"0", 1}, {"1", 0}};
std::vector<string> shape_inference_graphs;
bool has_outside_compilation;
NameAttrList name_attrs;
name_attrs.set_name("cluster");
*name_attrs.mutable_attr() = attrs;
TF_CHECK_OK(ExtractOutsideCompilationTest(
"_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph",
host_compute_core, &fld, &shape_inference_graphs,
&has_outside_compilation));
std::unique_ptr<FunctionBody> xla_fbody;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("cluster_rewritten"),
AttrSlice(), &fld, &xla_fbody));
auto node_name_index = xla_fbody->graph->BuildNodeNameIndex();
Node *host_compute_0 = node_name_index["outside_compilation_0_host_compute"];
EXPECT_NE(host_compute_0, nullptr);
Node *host_compute_1 = node_name_index["outside_compilation_1_host_compute"];
EXPECT_NE(host_compute_1, nullptr);
std::vector<string> token_input_nodes;
TF_CHECK_OK(GetNodeAttr(AttrSlice(host_compute_0->attrs()),
"_xla_token_input_nodes", &token_input_nodes));
std::vector<string> expected_token_input_nodes_0({"_xla_token_arg_node"});
EXPECT_EQ(token_input_nodes, expected_token_input_nodes_0);
token_input_nodes.clear();
std::vector<string> expected_token_input_nodes_1(
{"_xla_token_arg_node", "outside_compilation_0_host_compute"});
TF_CHECK_OK(GetNodeAttr(AttrSlice(host_compute_1->attrs()),
"_xla_token_input_nodes", &token_input_nodes));
EXPECT_EQ(token_input_nodes, expected_token_input_nodes_1);
bool has_control_edge = false;
for (const Edge *e : host_compute_1->in_edges()) {
if (e->IsControlEdge() && e->src() == host_compute_0) {
has_control_edge = true;
break;
}
}
EXPECT_TRUE(has_control_edge);
}
TEST_F(ExtractOutsideCompilationForFunctionTest,
OutsideCompilationClusterControlDependency) {
FunctionDefLibrary fdl;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output const0 = ops::Const(s.WithOpName("const0"), 1, {2});
Output identity0 = ops::Identity(s.WithOpName("identity0"), const0);
Output identity1 = ops::Identity(
s.WithOpName("identity1").WithControlDependencies(identity0), const0);
Output identity2 = ops::Identity(s.WithOpName("identity2"), identity1);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
std::cout << "Graph is " << (*g).ToGraphDefDebug().DebugString()
<< std::endl;
auto node_name_image = g->BuildNodeNameIndex();
node_name_image["identity0"]->AddAttr("_oc", "0");
node_name_image["identity1"]->AddAttr("_oc", "1");
PartialTensorShape shape({2});
node_name_image["identity1"]->AddAttr(
kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape});
FunctionDef *xla_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef));
}
FunctionLibraryDefinition fld(OpRegistry::Global(), fdl);
protobuf::Map<string, tensorflow::AttrValue> attrs;
std::map<string, int> host_compute_core = {{"0", 1}, {"1", 0}};
std::vector<string> shape_inference_graphs;
bool has_outside_compilation;
NameAttrList name_attrs;
name_attrs.set_name("cluster");
*name_attrs.mutable_attr() = attrs;
TF_CHECK_OK(ExtractOutsideCompilationTest(
"_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph",
host_compute_core, &fld, &shape_inference_graphs,
&has_outside_compilation));
std::unique_ptr<FunctionBody> xla_fbody;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("cluster_rewritten"),
AttrSlice(), &fld, &xla_fbody));
auto node_name_index = xla_fbody->graph->BuildNodeNameIndex();
Node *host_compute_0 = node_name_index["outside_compilation_0_host_compute"];
EXPECT_NE(host_compute_0, nullptr);
Node *host_compute_1 = node_name_index["outside_compilation_1_host_compute"];
EXPECT_NE(host_compute_1, nullptr);
std::vector<string> token_input_nodes;
TF_CHECK_OK(GetNodeAttr(AttrSlice(host_compute_0->attrs()),
"_xla_token_input_nodes", &token_input_nodes));
std::vector<string> expected_token_input_nodes_0({"_xla_token_arg_node"});
EXPECT_EQ(token_input_nodes, expected_token_input_nodes_0);
token_input_nodes.clear();
std::vector<string> expected_token_input_nodes_1(
{"_xla_token_arg_node", "outside_compilation_0_host_compute"});
TF_CHECK_OK(GetNodeAttr(AttrSlice(host_compute_1->attrs()),
"_xla_token_input_nodes", &token_input_nodes));
EXPECT_EQ(token_input_nodes, expected_token_input_nodes_1);
bool has_control_edge = false;
for (const Edge *e : host_compute_1->in_edges()) {
if (e->IsControlEdge() && e->src() == host_compute_0) {
has_control_edge = true;
break;
}
}
EXPECT_TRUE(has_control_edge);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/extract_outside_compilation_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/extract_outside_compilation_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
35a86109-ab28-4382-a184-1d795c9d4f32 | cpp | tensorflow/tensorflow | make_padding | tensorflow/lite/delegates/gpu/common/transformations/make_padding.cc | tensorflow/lite/delegates/gpu/common/transformations/make_padding_test.cc | #include "tensorflow/lite/delegates/gpu/common/transformations/make_padding.h"
#include <any>
#include <memory>
#include <string>
#include <vector>
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
bool IsConstZeros(const Node& node) {
if (node.operation.type != ToString(OperationType::CONSTANT)) {
return false;
}
auto& attr =
std::any_cast<const ConstTensorAttributes&>(node.operation.attributes);
for (auto f : attr.tensor.data) {
if (f != 0) {
return false;
}
}
return true;
}
class MakePaddingFromZerosConcat : public NodeTransformation {
public:
TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final {
if (node->operation.type != ToString(OperationType::CONCAT)) {
return {TransformStatus::SKIPPED, ""};
}
auto inputs = graph->FindInputs(node->id);
if (inputs.size() != 2) {
return {TransformStatus::SKIPPED, ""};
}
bool first = true;
for (auto input : inputs) {
auto dep = graph->FindProducer(input->id);
if (dep != nullptr && IsConstZeros(*dep)) {
auto& concat_attr =
std::any_cast<const ConcatAttributes&>(node->operation.attributes);
PadAttributes pad_attr;
pad_attr.type = PaddingContentType::ZEROS;
pad_attr.appended = BHWC(0, 0, 0, 0);
pad_attr.prepended = BHWC(0, 0, 0, 0);
BHWC* p = first ? &pad_attr.prepended : &pad_attr.appended;
switch (concat_attr.axis) {
case Axis::HEIGHT:
p->h = input->tensor.shape.h;
break;
case Axis::WIDTH:
p->w = input->tensor.shape.w;
break;
case Axis::CHANNELS:
p->c = input->tensor.shape.c;
break;
default:
return {TransformStatus::DECLINED,
"Padding for concat axis is unsupported: " +
ToString(concat_attr.axis)};
}
absl::Status status = RemovePrecedingNode(graph, dep, node);
if (!status.ok()) {
return {TransformStatus::INVALID, "Unable to remove const node: " +
std::string(status.message())};
}
node->operation.attributes = pad_attr;
node->operation.type = ToString(OperationType::PAD);
return {TransformStatus::APPLIED, "Replaced concat with padding"};
}
first = false;
}
return {TransformStatus::SKIPPED, ""};
}
};
}
std::unique_ptr<NodeTransformation> NewMakePaddingFromConcat() {
return std::make_unique<MakePaddingFromZerosConcat>();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/transformations/make_padding.h"
#include <any>
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
TEST(MakePadding, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
input->tensor.shape = BHWC(1, 2, 3, 5);
auto concat_node = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(concat_node->id, input->id).ok());
concat_node->operation.type = ToString(OperationType::CONCAT);
ConcatAttributes attr;
attr.axis = Axis::HEIGHT;
concat_node->operation.attributes = attr;
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, concat_node, &output).ok());
output->tensor.shape = BHWC(1, 7, 3, 5);
auto const_node = graph.NewNode();
const_node->operation.type = ToString(OperationType::CONSTANT);
ConstTensorAttributes const_attr;
const_attr.tensor.shape = BHWC(1, 5, 3, 5);
const_attr.tensor.data =
std::vector<float>(const_attr.tensor.shape.DimensionsProduct(), 0);
const_node->operation.attributes = const_attr;
Value* const_link = nullptr;
ASSERT_TRUE(
ConnectTwoNodes(&graph, const_node, concat_node, &const_link).ok());
const_link->tensor.shape = const_attr.tensor.shape;
ASSERT_EQ(2, graph.nodes().size());
auto transformation = NewMakePaddingFromConcat();
ModelTransformer transformer(&graph);
transformer.Apply("make_padding", transformation.get());
ASSERT_EQ(1, graph.nodes().size());
ASSERT_EQ(2, graph.values().size());
auto pad_node = graph.nodes()[0];
ASSERT_EQ(ToString(OperationType::PAD), pad_node->operation.type);
auto pad_attr = std::any_cast<PadAttributes>(pad_node->operation.attributes);
EXPECT_EQ(BHWC(0, 0, 0, 0), pad_attr.prepended);
EXPECT_EQ(BHWC(0, 5, 0, 0), pad_attr.appended);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/make_padding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/make_padding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
df710905-4c94-4794-b5f8-5d19921229f4 | cpp | google/arolla | unspecified_qtype | arolla/qtype/unspecified_qtype.cc | arolla/qtype/unspecified_qtype_test.cc | #include "arolla/qtype/unspecified_qtype.h"
#include "absl/base/no_destructor.h"
#include "absl/strings/string_view.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
namespace arolla {
namespace {
struct Unspecified {};
class UnspecifiedQType final : public QType {
public:
UnspecifiedQType()
: QType(ConstructorArgs{.name = "UNSPECIFIED",
.type_info = typeid(Unspecified),
.type_layout = MakeTypeLayout<Unspecified>()}) {}
ReprToken UnsafeReprToken(const void* source) const override {
return ReprToken{"unspecified"};
}
void UnsafeCopy(const void* ,
void* ) const override {}
void UnsafeCombineToFingerprintHasher(
const void* , FingerprintHasher* hasher) const override {
hasher->Combine(absl::string_view("::arolla::UnspecifiedQValue"));
}
};
}
QTypePtr GetUnspecifiedQType() {
static const absl::NoDestructor<UnspecifiedQType> result;
return result.get();
}
const TypedValue& GetUnspecifiedQValue() {
static const absl::NoDestructor<TypedValue> result(
TypedValue::UnsafeFromTypeDefaultConstructed(GetUnspecifiedQType()));
return *result;
}
} | #include "arolla/qtype/unspecified_qtype.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla {
namespace {
using ::arolla::testing::ReprTokenEq;
TEST(UnspecifiedQTypeTest, UnspecifiedQType) {
const auto unspecified_qtype = GetUnspecifiedQType();
EXPECT_EQ(unspecified_qtype->name(), "UNSPECIFIED");
EXPECT_EQ(unspecified_qtype->type_layout().AllocSize(), 1);
EXPECT_EQ(unspecified_qtype->type_layout().AllocAlignment().value, 1);
EXPECT_TRUE(unspecified_qtype->type_fields().empty());
EXPECT_EQ(unspecified_qtype->value_qtype(), nullptr);
}
TEST(UnspecifiedQTypeTest, UnspecifiedQValue) {
const auto unspecified_qvalue = GetUnspecifiedQValue();
EXPECT_EQ(unspecified_qvalue.GetType(), GetUnspecifiedQType());
EXPECT_THAT(unspecified_qvalue.GenReprToken(), ReprTokenEq("unspecified"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/unspecified_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/unspecified_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
1728e1b9-6e09-4311-b043-433a6e8d79b4 | cpp | google/cel-cpp | cel_proto_wrap_util | eval/public/structs/cel_proto_wrap_util.cc | eval/public/structs/cel_proto_wrap_util_test.cc | #include "eval/public/structs/cel_proto_wrap_util.h"
#include <math.h>
#include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "google/protobuf/any.pb.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/struct.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include "google/protobuf/wrappers.pb.h"
#include "google/protobuf/message.h"
#include "absl/base/attributes.h"
#include "absl/base/optimization.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/optional.h"
#include "common/any.h"
#include "eval/public/cel_value.h"
#include "eval/public/structs/protobuf_value_factory.h"
#include "eval/testutil/test_message.pb.h"
#include "extensions/protobuf/internal/any.h"
#include "extensions/protobuf/internal/duration.h"
#include "extensions/protobuf/internal/struct.h"
#include "extensions/protobuf/internal/timestamp.h"
#include "extensions/protobuf/internal/wrappers.h"
#include "internal/overflow.h"
#include "internal/proto_time_encoding.h"
#include "internal/time.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
#include "google/protobuf/message_lite.h"
namespace google::api::expr::runtime::internal {
namespace {
using cel::internal::DecodeDuration;
using cel::internal::DecodeTime;
using cel::internal::EncodeTime;
using google::protobuf::Any;
using google::protobuf::BoolValue;
using google::protobuf::BytesValue;
using google::protobuf::DoubleValue;
using google::protobuf::Duration;
using google::protobuf::FloatValue;
using google::protobuf::Int32Value;
using google::protobuf::Int64Value;
using google::protobuf::ListValue;
using google::protobuf::StringValue;
using google::protobuf::Struct;
using google::protobuf::Timestamp;
using google::protobuf::UInt32Value;
using google::protobuf::UInt64Value;
using google::protobuf::Value;
using google::protobuf::Arena;
using google::protobuf::Descriptor;
using google::protobuf::DescriptorPool;
using google::protobuf::Message;
using google::protobuf::MessageFactory;
constexpr int64_t kMaxIntJSON = (1ll << 53) - 1;
constexpr int64_t kMinIntJSON = -kMaxIntJSON;
static bool IsJSONSafe(int64_t i) {
return i >= kMinIntJSON && i <= kMaxIntJSON;
}
static bool IsJSONSafe(uint64_t i) {
return i <= static_cast<uint64_t>(kMaxIntJSON);
}
class DynamicList : public CelList {
public:
DynamicList(const ListValue* values, ProtobufValueFactory factory,
Arena* arena)
: arena_(arena), factory_(std::move(factory)), values_(values) {}
CelValue operator[](int index) const override;
int size() const override { return values_->values_size(); }
private:
Arena* arena_;
ProtobufValueFactory factory_;
const ListValue* values_;
};
class DynamicMap : public CelMap {
public:
DynamicMap(const Struct* values, ProtobufValueFactory factory, Arena* arena)
: arena_(arena),
factory_(std::move(factory)),
values_(values),
key_list_(values) {}
absl::StatusOr<bool> Has(const CelValue& key) const override {
CelValue::StringHolder str_key;
if (!key.GetValue(&str_key)) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid map key type: '", CelValue::TypeName(key.type()), "'"));
}
return values_->fields().contains(std::string(str_key.value()));
}
absl::optional<CelValue> operator[](CelValue key) const override;
int size() const override { return values_->fields_size(); }
absl::StatusOr<const CelList*> ListKeys() const override {
return &key_list_;
}
private:
class DynamicMapKeyList : public CelList {
public:
explicit DynamicMapKeyList(const Struct* values)
: values_(values), keys_(), initialized_(false) {}
CelValue operator[](int index) const override {
CheckInit();
return keys_[index];
}
int size() const override {
CheckInit();
return values_->fields_size();
}
private:
void CheckInit() const {
absl::MutexLock lock(&mutex_);
if (!initialized_) {
for (const auto& it : values_->fields()) {
keys_.push_back(CelValue::CreateString(&it.first));
}
initialized_ = true;
}
}
const Struct* values_;
mutable absl::Mutex mutex_;
mutable std::vector<CelValue> keys_;
mutable bool initialized_;
};
Arena* arena_;
ProtobufValueFactory factory_;
const Struct* values_;
const DynamicMapKeyList key_list_;
};
class ValueManager {
public:
ValueManager(const ProtobufValueFactory& value_factory,
const google::protobuf::DescriptorPool* descriptor_pool,
google::protobuf::Arena* arena, google::protobuf::MessageFactory* message_factory)
: value_factory_(value_factory),
descriptor_pool_(descriptor_pool),
arena_(arena),
message_factory_(message_factory) {}
ValueManager(const ProtobufValueFactory& value_factory, google::protobuf::Arena* arena)
: value_factory_(value_factory),
descriptor_pool_(DescriptorPool::generated_pool()),
arena_(arena),
message_factory_(MessageFactory::generated_factory()) {}
static CelValue ValueFromDuration(absl::Duration duration) {
return CelValue::CreateDuration(duration);
}
CelValue ValueFromDuration(const google::protobuf::Message* message) {
auto status_or_unwrapped =
cel::extensions::protobuf_internal::UnwrapDynamicDurationProto(
*message);
if (!status_or_unwrapped.ok()) {
return CreateErrorValue(arena_, status_or_unwrapped.status());
}
return ValueFromDuration(*status_or_unwrapped);
}
CelValue ValueFromMessage(const Duration* duration) {
return ValueFromDuration(DecodeDuration(*duration));
}
CelValue ValueFromTimestamp(const google::protobuf::Message* message) {
auto status_or_unwrapped =
cel::extensions::protobuf_internal::UnwrapDynamicTimestampProto(
*message);
if (!status_or_unwrapped.ok()) {
return CreateErrorValue(arena_, status_or_unwrapped.status());
}
return ValueFromTimestamp(*status_or_unwrapped);
}
static CelValue ValueFromTimestamp(absl::Time timestamp) {
return CelValue::CreateTimestamp(timestamp);
}
CelValue ValueFromMessage(const Timestamp* timestamp) {
return ValueFromTimestamp(DecodeTime(*timestamp));
}
CelValue ValueFromMessage(const ListValue* list_values) {
return CelValue::CreateList(Arena::Create<DynamicList>(
arena_, list_values, value_factory_, arena_));
}
CelValue ValueFromMessage(const Struct* struct_value) {
return CelValue::CreateMap(Arena::Create<DynamicMap>(
arena_, struct_value, value_factory_, arena_));
}
CelValue ValueFromAny(const google::protobuf::Message* message) {
auto status_or_unwrapped =
cel::extensions::protobuf_internal::UnwrapDynamicAnyProto(*message);
if (!status_or_unwrapped.ok()) {
return CreateErrorValue(arena_, status_or_unwrapped.status());
}
return ValueFromAny(status_or_unwrapped->type_url(),
cel::GetAnyValueAsCord(*status_or_unwrapped),
descriptor_pool_, message_factory_);
}
CelValue ValueFromAny(absl::string_view type_url, const absl::Cord& payload,
const DescriptorPool* descriptor_pool,
MessageFactory* message_factory) {
auto pos = type_url.find_last_of('/');
if (pos == absl::string_view::npos) {
return CreateErrorValue(arena_, "Malformed type_url string");
}
std::string full_name = std::string(type_url.substr(pos + 1));
const Descriptor* nested_descriptor =
descriptor_pool->FindMessageTypeByName(full_name);
if (nested_descriptor == nullptr) {
return CreateErrorValue(arena_, "Descriptor not found");
}
const Message* prototype = message_factory->GetPrototype(nested_descriptor);
if (prototype == nullptr) {
return CreateErrorValue(arena_, "Prototype not found");
}
Message* nested_message = prototype->New(arena_);
if (!nested_message->ParseFromCord(payload)) {
return CreateErrorValue(arena_, "Failed to unpack Any into message");
}
return UnwrapMessageToValue(nested_message, value_factory_, arena_);
}
CelValue ValueFromMessage(const Any* any_value,
const DescriptorPool* descriptor_pool,
MessageFactory* message_factory) {
return ValueFromAny(any_value->type_url(), absl::Cord(any_value->value()),
descriptor_pool, message_factory);
}
CelValue ValueFromMessage(const Any* any_value) {
return ValueFromMessage(any_value, descriptor_pool_, message_factory_);
}
CelValue ValueFromBool(const google::protobuf::Message* message) {
auto status_or_unwrapped =
cel::extensions::protobuf_internal::UnwrapDynamicBoolValueProto(
*message);
if (!status_or_unwrapped.ok()) {
return CreateErrorValue(arena_, status_or_unwrapped.status());
}
return ValueFromBool(*status_or_unwrapped);
}
static CelValue ValueFromBool(bool value) {
return CelValue::CreateBool(value);
}
CelValue ValueFromMessage(const BoolValue* wrapper) {
return ValueFromBool(wrapper->value());
}
CelValue ValueFromInt32(const google::protobuf::Message* message) {
auto status_or_unwrapped =
cel::extensions::protobuf_internal::UnwrapDynamicInt32ValueProto(
*message);
if (!status_or_unwrapped.ok()) {
return CreateErrorValue(arena_, status_or_unwrapped.status());
}
return ValueFromInt32(*status_or_unwrapped);
}
static CelValue ValueFromInt32(int32_t value) {
return CelValue::CreateInt64(value);
}
CelValue ValueFromMessage(const Int32Value* wrapper) {
return ValueFromInt32(wrapper->value());
}
CelValue ValueFromUInt32(const google::protobuf::Message* message) {
auto status_or_unwrapped =
cel::extensions::protobuf_internal::UnwrapDynamicUInt32ValueProto(
*message);
if (!status_or_unwrapped.ok()) {
return CreateErrorValue(arena_, status_or_unwrapped.status());
}
return ValueFromUInt32(*status_or_unwrapped);
}
static CelValue ValueFromUInt32(uint32_t value) {
return CelValue::CreateUint64(value);
}
CelValue ValueFromMessage(const UInt32Value* wrapper) {
return ValueFromUInt32(wrapper->value());
}
CelValue ValueFromInt64(const google::protobuf::Message* message) {
auto status_or_unwrapped =
cel::extensions::protobuf_internal::UnwrapDynamicInt64ValueProto(
*message);
if (!status_or_unwrapped.ok()) {
return CreateErrorValue(arena_, status_or_unwrapped.status());
}
return ValueFromInt64(*status_or_unwrapped);
}
static CelValue ValueFromInt64(int64_t value) {
return CelValue::CreateInt64(value);
}
CelValue ValueFromMessage(const Int64Value* wrapper) {
return ValueFromInt64(wrapper->value());
}
CelValue ValueFromUInt64(const google::protobuf::Message* message) {
auto status_or_unwrapped =
cel::extensions::protobuf_internal::UnwrapDynamicUInt64ValueProto(
*message);
if (!status_or_unwrapped.ok()) {
return CreateErrorValue(arena_, status_or_unwrapped.status());
}
return ValueFromUInt64(*status_or_unwrapped);
}
static CelValue ValueFromUInt64(uint64_t value) {
return CelValue::CreateUint64(value);
}
CelValue ValueFromMessage(const UInt64Value* wrapper) {
return ValueFromUInt64(wrapper->value());
}
CelValue ValueFromFloat(const google::protobuf::Message* message) {
auto status_or_unwrapped =
cel::extensions::protobuf_internal::UnwrapDynamicFloatValueProto(
*message);
if (!status_or_unwrapped.ok()) {
return CreateErrorValue(arena_, status_or_unwrapped.status());
}
return ValueFromFloat(*status_or_unwrapped);
}
static CelValue ValueFromFloat(float value) {
return CelValue::CreateDouble(value);
}
CelValue ValueFromMessage(const FloatValue* wrapper) {
return ValueFromFloat(wrapper->value());
}
CelValue ValueFromDouble(const google::protobuf::Message* message) {
auto status_or_unwrapped =
cel::extensions::protobuf_internal::UnwrapDynamicDoubleValueProto(
*message);
if (!status_or_unwrapped.ok()) {
return CreateErrorValue(arena_, status_or_unwrapped.status());
}
return ValueFromDouble(*status_or_unwrapped);
}
static CelValue ValueFromDouble(double value) {
return CelValue::CreateDouble(value);
}
CelValue ValueFromMessage(const DoubleValue* wrapper) {
return ValueFromDouble(wrapper->value());
}
CelValue ValueFromString(const google::protobuf::Message* message) {
auto status_or_unwrapped =
cel::extensions::protobuf_internal::UnwrapDynamicStringValueProto(
*message);
if (!status_or_unwrapped.ok()) {
return CreateErrorValue(arena_, status_or_unwrapped.status());
}
return ValueFromString(*status_or_unwrapped);
}
CelValue ValueFromString(const absl::Cord& value) {
return CelValue::CreateString(
Arena::Create<std::string>(arena_, static_cast<std::string>(value)));
}
static CelValue ValueFromString(const std::string* value) {
return CelValue::CreateString(value);
}
CelValue ValueFromMessage(const StringValue* wrapper) {
return ValueFromString(&wrapper->value());
}
CelValue ValueFromBytes(const google::protobuf::Message* message) {
auto status_or_unwrapped =
cel::extensions::protobuf_internal::UnwrapDynamicBytesValueProto(
*message);
if (!status_or_unwrapped.ok()) {
return CreateErrorValue(arena_, status_or_unwrapped.status());
}
return ValueFromBytes(*status_or_unwrapped);
}
CelValue ValueFromBytes(const absl::Cord& value) {
return CelValue::CreateBytes(
Arena::Create<std::string>(arena_, static_cast<std::string>(value)));
}
static CelValue ValueFromBytes(google::protobuf::Arena* arena, std::string value) {
return CelValue::CreateBytes(
Arena::Create<std::string>(arena, std::move(value)));
}
CelValue ValueFromMessage(const BytesValue* wrapper) {
return CelValue::CreateBytes(
Arena::Create<std::string>(arena_, std::string(wrapper->value())));
}
CelValue ValueFromMessage(const Value* value) {
switch (value->kind_case()) {
case Value::KindCase::kNullValue:
return CelValue::CreateNull();
case Value::KindCase::kNumberValue:
return CelValue::CreateDouble(value->number_value());
case Value::KindCase::kStringValue:
return CelValue::CreateString(&value->string_value());
case Value::KindCase::kBoolValue:
return CelValue::CreateBool(value->bool_value());
case Value::KindCase::kStructValue:
return ValueFromMessage(&value->struct_value());
case Value::KindCase::kListValue:
return ValueFromMessage(&value->list_value());
default:
return CelValue::CreateNull();
}
}
template <typename T>
CelValue ValueFromGeneratedMessageLite(const google::protobuf::Message* message) {
const auto* downcast_message = google::protobuf::DynamicCastToGenerated<T>(message);
if (downcast_message != nullptr) {
return ValueFromMessage(downcast_message);
}
auto* value = google::protobuf::Arena::Create<T>(arena_);
absl::Cord serialized;
if (!message->SerializeToCord(&serialized)) {
return CreateErrorValue(
arena_, absl::UnknownError(
absl::StrCat("failed to serialize dynamic message: ",
message->GetTypeName())));
}
if (!value->ParseFromCord(serialized)) {
return CreateErrorValue(arena_, absl::UnknownError(absl::StrCat(
"failed to parse generated message: ",
value->GetTypeName())));
}
return ValueFromMessage(value);
}
template <typename T>
CelValue ValueFromMessage(const google::protobuf::Message* message) {
if constexpr (std::is_same_v<Any, T>) {
return ValueFromAny(message);
} else if constexpr (std::is_same_v<BoolValue, T>) {
return ValueFromBool(message);
} else if constexpr (std::is_same_v<BytesValue, T>) {
return ValueFromBytes(message);
} else if constexpr (std::is_same_v<DoubleValue, T>) {
return ValueFromDouble(message);
} else if constexpr (std::is_same_v<Duration, T>) {
return ValueFromDuration(message);
} else if constexpr (std::is_same_v<FloatValue, T>) {
return ValueFromFloat(message);
} else if constexpr (std::is_same_v<Int32Value, T>) {
return ValueFromInt32(message);
} else if constexpr (std::is_same_v<Int64Value, T>) {
return ValueFromInt64(message);
} else if constexpr (std::is_same_v<ListValue, T>) {
return ValueFromGeneratedMessageLite<ListValue>(message);
} else if constexpr (std::is_same_v<StringValue, T>) {
return ValueFromString(message);
} else if constexpr (std::is_same_v<Struct, T>) {
return ValueFromGeneratedMessageLite<Struct>(message);
} else if constexpr (std::is_same_v<Timestamp, T>) {
return ValueFromTimestamp(message);
} else if constexpr (std::is_same_v<UInt32Value, T>) {
return ValueFromUInt32(message);
} else if constexpr (std::is_same_v<UInt64Value, T>) {
return ValueFromUInt64(message);
} else if constexpr (std::is_same_v<Value, T>) {
return ValueFromGeneratedMessageLite<Value>(message);
} else {
ABSL_UNREACHABLE();
}
}
private:
const ProtobufValueFactory& value_factory_;
const google::protobuf::DescriptorPool* descriptor_pool_;
google::protobuf::Arena* arena_;
MessageFactory* message_factory_;
};
class ValueFromMessageMaker {
public:
template <class MessageType>
static CelValue CreateWellknownTypeValue(const google::protobuf::Message* msg,
const ProtobufValueFactory& factory,
Arena* arena) {
google::protobuf::MessageFactory* message_factory =
msg->GetReflection()->GetMessageFactory();
const google::protobuf::DescriptorPool* pool = msg->GetDescriptor()->file()->pool();
return ValueManager(factory, pool, arena, message_factory)
.ValueFromMessage<MessageType>(msg);
}
static absl::optional<CelValue> CreateValue(
const google::protobuf::Message* message, const ProtobufValueFactory& factory,
Arena* arena) {
switch (message->GetDescriptor()->well_known_type()) {
case google::protobuf::Descriptor::WELLKNOWNTYPE_DOUBLEVALUE:
return CreateWellknownTypeValue<DoubleValue>(message, factory, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_FLOATVALUE:
return CreateWellknownTypeValue<FloatValue>(message, factory, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_INT64VALUE:
return CreateWellknownTypeValue<Int64Value>(message, factory, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_UINT64VALUE:
return CreateWellknownTypeValue<UInt64Value>(message, factory, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_INT32VALUE:
return CreateWellknownTypeValue<Int32Value>(message, factory, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_UINT32VALUE:
return CreateWellknownTypeValue<UInt32Value>(message, factory, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_STRINGVALUE:
return CreateWellknownTypeValue<StringValue>(message, factory, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_BYTESVALUE:
return CreateWellknownTypeValue<BytesValue>(message, factory, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_BOOLVALUE:
return CreateWellknownTypeValue<BoolValue>(message, factory, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_ANY:
return CreateWellknownTypeValue<Any>(message, factory, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_DURATION:
return CreateWellknownTypeValue<Duration>(message, factory, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_TIMESTAMP:
return CreateWellknownTypeValue<Timestamp>(message, factory, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_VALUE:
return CreateWellknownTypeValue<Value>(message, factory, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_LISTVALUE:
return CreateWellknownTypeValue<ListValue>(message, factory, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_STRUCT:
return CreateWellknownTypeValue<Struct>(message, factory, arena);
default:
return absl::nullopt;
}
}
ValueFromMessageMaker(const ValueFromMessageMaker&) = delete;
ValueFromMessageMaker& operator=(const ValueFromMessageMaker&) = delete;
};
CelValue DynamicList::operator[](int index) const {
return ValueManager(factory_, arena_)
.ValueFromMessage(&values_->values(index));
}
absl::optional<CelValue> DynamicMap::operator[](CelValue key) const {
CelValue::StringHolder str_key;
if (!key.GetValue(&str_key)) {
return CreateErrorValue(arena_, absl::InvalidArgumentError(absl::StrCat(
"Invalid map key type: '",
CelValue::TypeName(key.type()), "'")));
}
auto it = values_->fields().find(std::string(str_key.value()));
if (it == values_->fields().end()) {
return absl::nullopt;
}
return ValueManager(factory_, arena_).ValueFromMessage(&it->second);
}
google::protobuf::Message* DurationFromValue(const google::protobuf::Message* prototype,
const CelValue& value,
google::protobuf::Arena* arena) {
absl::Duration val;
if (!value.GetValue(&val)) {
return nullptr;
}
if (!cel::internal::ValidateDuration(val).ok()) {
return nullptr;
}
auto* message = prototype->New(arena);
auto status_or_wrapped =
cel::extensions::protobuf_internal::WrapDynamicDurationProto(val,
*message);
if (!status_or_wrapped.ok()) {
status_or_wrapped.IgnoreError();
return nullptr;
}
return message;
}
google::protobuf::Message* BoolFromValue(const google::protobuf::Message* prototype,
const CelValue& value, google::protobuf::Arena* arena) {
bool val;
if (!value.GetValue(&val)) {
return nullptr;
}
auto* message = prototype->New(arena);
auto status_or_wrapped =
cel::extensions::protobuf_internal::WrapDynamicBoolValueProto(val,
*message);
if (!status_or_wrapped.ok()) {
status_or_wrapped.IgnoreError();
return nullptr;
}
return message;
}
google::protobuf::Message* BytesFromValue(const google::protobuf::Message* prototype,
const CelValue& value, google::protobuf::Arena* arena) {
CelValue::BytesHolder view_val;
if (!value.GetValue(&view_val)) {
return nullptr;
}
auto* message = prototype->New(arena);
auto status_or_wrapped =
cel::extensions::protobuf_internal::WrapDynamicBytesValueProto(
absl::Cord(view_val.value()), *message);
if (!status_or_wrapped.ok()) {
status_or_wrapped.IgnoreError();
return nullptr;
}
return message;
}
google::protobuf::Message* DoubleFromValue(const google::protobuf::Message* prototype,
const CelValue& value, google::protobuf::Arena* arena) {
double val;
if (!value.GetValue(&val)) {
return nullptr;
}
auto* message = prototype->New(arena);
auto status_or_wrapped =
cel::extensions::protobuf_internal::WrapDynamicDoubleValueProto(val,
*message);
if (!status_or_wrapped.ok()) {
status_or_wrapped.IgnoreError();
return nullptr;
}
return message;
}
google::protobuf::Message* FloatFromValue(const google::protobuf::Message* prototype,
const CelValue& value, google::protobuf::Arena* arena) {
double val;
if (!value.GetValue(&val)) {
return nullptr;
}
float fval = val;
if (val > std::numeric_limits<float>::max()) {
fval = std::numeric_limits<float>::infinity();
} else if (val < std::numeric_limits<float>::lowest()) {
fval = -std::numeric_limits<float>::infinity();
}
auto* message = prototype->New(arena);
auto status_or_wrapped =
cel::extensions::protobuf_internal::WrapDynamicFloatValueProto(fval,
*message);
if (!status_or_wrapped.ok()) {
status_or_wrapped.IgnoreError();
return nullptr;
}
return message;
}
google::protobuf::Message* Int32FromValue(const google::protobuf::Message* prototype,
const CelValue& value, google::protobuf::Arena* arena) {
int64_t val;
if (!value.GetValue(&val)) {
return nullptr;
}
if (!cel::internal::CheckedInt64ToInt32(val).ok()) {
return nullptr;
}
int32_t ival = static_cast<int32_t>(val);
auto* message = prototype->New(arena);
auto status_or_wrapped =
cel::extensions::protobuf_internal::WrapDynamicInt32ValueProto(ival,
*message);
if (!status_or_wrapped.ok()) {
status_or_wrapped.IgnoreError();
return nullptr;
}
return message;
}
google::protobuf::Message* Int64FromValue(const google::protobuf::Message* prototype,
const CelValue& value, google::protobuf::Arena* arena) {
int64_t val;
if (!value.GetValue(&val)) {
return nullptr;
}
auto* message = prototype->New(arena);
auto status_or_wrapped =
cel::extensions::protobuf_internal::WrapDynamicInt64ValueProto(val,
*message);
if (!status_or_wrapped.ok()) {
status_or_wrapped.IgnoreError();
return nullptr;
}
return message;
}
google::protobuf::Message* StringFromValue(const google::protobuf::Message* prototype,
const CelValue& value, google::protobuf::Arena* arena) {
CelValue::StringHolder view_val;
if (!value.GetValue(&view_val)) {
return nullptr;
}
auto* message = prototype->New(arena);
auto status_or_wrapped =
cel::extensions::protobuf_internal::WrapDynamicStringValueProto(
absl::Cord(view_val.value()), *message);
if (!status_or_wrapped.ok()) {
status_or_wrapped.IgnoreError();
return nullptr;
}
return message;
}
google::protobuf::Message* TimestampFromValue(const google::protobuf::Message* prototype,
const CelValue& value,
google::protobuf::Arena* arena) {
absl::Time val;
if (!value.GetValue(&val)) {
return nullptr;
}
if (!cel::internal::ValidateTimestamp(val).ok()) {
return nullptr;
}
auto* message = prototype->New(arena);
auto status_or_wrapped =
cel::extensions::protobuf_internal::WrapDynamicTimestampProto(val,
*message);
if (!status_or_wrapped.ok()) {
status_or_wrapped.IgnoreError();
return nullptr;
}
return message;
}
google::protobuf::Message* UInt32FromValue(const google::protobuf::Message* prototype,
const CelValue& value, google::protobuf::Arena* arena) {
uint64_t val;
if (!value.GetValue(&val)) {
return nullptr;
}
if (!cel::internal::CheckedUint64ToUint32(val).ok()) {
return nullptr;
}
uint32_t ival = static_cast<uint32_t>(val);
auto* message = prototype->New(arena);
auto status_or_wrapped =
cel::extensions::protobuf_internal::WrapDynamicUInt32ValueProto(ival,
*message);
if (!status_or_wrapped.ok()) {
status_or_wrapped.IgnoreError();
return nullptr;
}
return message;
}
google::protobuf::Message* UInt64FromValue(const google::protobuf::Message* prototype,
const CelValue& value, google::protobuf::Arena* arena) {
uint64_t val;
if (!value.GetValue(&val)) {
return nullptr;
}
auto* message = prototype->New(arena);
auto status_or_wrapped =
cel::extensions::protobuf_internal::WrapDynamicUInt64ValueProto(val,
*message);
if (!status_or_wrapped.ok()) {
status_or_wrapped.IgnoreError();
return nullptr;
}
return message;
}
google::protobuf::Message* ValueFromValue(google::protobuf::Message* message, const CelValue& value,
google::protobuf::Arena* arena);
google::protobuf::Message* ValueFromValue(const google::protobuf::Message* prototype,
const CelValue& value, google::protobuf::Arena* arena) {
return ValueFromValue(prototype->New(arena), value, arena);
}
google::protobuf::Message* ListFromValue(google::protobuf::Message* message, const CelValue& value,
google::protobuf::Arena* arena) {
if (!value.IsList()) {
return nullptr;
}
const CelList& list = *value.ListOrDie();
for (int i = 0; i < list.size(); i++) {
auto e = list.Get(arena, i);
auto status_or_elem =
cel::extensions::protobuf_internal::DynamicListValueProtoAddElement(
message);
if (!status_or_elem.ok()) {
return nullptr;
}
if (ValueFromValue(*status_or_elem, e, arena) == nullptr) {
return nullptr;
}
}
return message;
}
google::protobuf::Message* ListFromValue(const google::protobuf::Message* prototype,
const CelValue& value, google::protobuf::Arena* arena) {
if (!value.IsList()) {
return nullptr;
}
return ListFromValue(prototype->New(arena), value, arena);
}
google::protobuf::Message* StructFromValue(google::protobuf::Message* message,
const CelValue& value, google::protobuf::Arena* arena) {
if (!value.IsMap()) {
return nullptr;
}
const CelMap& map = *value.MapOrDie();
absl::StatusOr<const CelList*> keys_or = map.ListKeys(arena);
if (!keys_or.ok()) {
return nullptr;
}
const CelList& keys = **keys_or;
for (int i = 0; i < keys.size(); i++) {
auto k = keys.Get(arena, i);
if (!k.IsString()) {
return nullptr;
}
absl::string_view key = k.StringOrDie().value();
auto v = map.Get(arena, k);
if (!v.has_value()) {
return nullptr;
}
auto status_or_value =
cel::extensions::protobuf_internal::DynamicStructValueProtoAddField(
key, message);
if (!status_or_value.ok()) {
return nullptr;
}
if (ValueFromValue(*status_or_value, *v, arena) == nullptr) {
return nullptr;
}
}
return message;
}
google::protobuf::Message* StructFromValue(const google::protobuf::Message* prototype,
const CelValue& value, google::protobuf::Arena* arena) {
if (!value.IsMap()) {
return nullptr;
}
return StructFromValue(prototype->New(arena), value, arena);
}
google::protobuf::Message* ValueFromValue(google::protobuf::Message* message, const CelValue& value,
google::protobuf::Arena* arena) {
switch (value.type()) {
case CelValue::Type::kBool: {
bool val;
if (value.GetValue(&val)) {
if (cel::extensions::protobuf_internal::DynamicValueProtoSetBoolValue(
val, message)
.ok()) {
return message;
}
}
} break;
case CelValue::Type::kBytes: {
CelValue::BytesHolder val;
if (value.GetValue(&val)) {
if (cel::extensions::protobuf_internal::DynamicValueProtoSetStringValue(
absl::Base64Escape(val.value()), message)
.ok()) {
return message;
}
}
} break;
case CelValue::Type::kDouble: {
double val;
if (value.GetValue(&val)) {
if (cel::extensions::protobuf_internal::DynamicValueProtoSetNumberValue(
val, message)
.ok()) {
return message;
}
}
} break;
case CelValue::Type::kDuration: {
absl::Duration val;
if (value.GetValue(&val)) {
auto encode = cel::internal::EncodeDurationToString(val);
if (!encode.ok()) {
return nullptr;
}
if (cel::extensions::protobuf_internal::DynamicValueProtoSetStringValue(
*encode, message)
.ok()) {
return message;
}
}
} break;
case CelValue::Type::kInt64: {
int64_t val;
if (value.GetValue(&val)) {
if (IsJSONSafe(val)) {
if (cel::extensions::protobuf_internal::
DynamicValueProtoSetNumberValue(static_cast<double>(val),
message)
.ok()) {
return message;
}
} else {
if (cel::extensions::protobuf_internal::
DynamicValueProtoSetStringValue(absl::StrCat(val), message)
.ok()) {
return message;
}
}
}
} break;
case CelValue::Type::kString: {
CelValue::StringHolder val;
if (value.GetValue(&val)) {
if (cel::extensions::protobuf_internal::DynamicValueProtoSetStringValue(
val.value(), message)
.ok()) {
return message;
}
}
} break;
case CelValue::Type::kTimestamp: {
absl::Time val;
if (value.GetValue(&val)) {
auto encode = cel::internal::EncodeTimeToString(val);
if (!encode.ok()) {
return nullptr;
}
if (cel::extensions::protobuf_internal::DynamicValueProtoSetStringValue(
*encode, message)
.ok()) {
return message;
}
}
} break;
case CelValue::Type::kUint64: {
uint64_t val;
if (value.GetValue(&val)) {
if (IsJSONSafe(val)) {
if (cel::extensions::protobuf_internal::
DynamicValueProtoSetNumberValue(static_cast<double>(val),
message)
.ok()) {
return message;
}
} else {
if (cel::extensions::protobuf_internal::
DynamicValueProtoSetStringValue(absl::StrCat(val), message)
.ok()) {
return message;
}
}
}
} break;
case CelValue::Type::kList: {
auto status_or_list =
cel::extensions::protobuf_internal::DynamicValueProtoMutableListValue(
message);
if (!status_or_list.ok()) {
return nullptr;
}
if (ListFromValue(*status_or_list, value, arena) != nullptr) {
return message;
}
} break;
case CelValue::Type::kMap: {
auto status_or_struct = cel::extensions::protobuf_internal::
DynamicValueProtoMutableStructValue(message);
if (!status_or_struct.ok()) {
return nullptr;
}
if (StructFromValue(*status_or_struct, value, arena) != nullptr) {
return message;
}
} break;
case CelValue::Type::kNullType:
if (cel::extensions::protobuf_internal::DynamicValueProtoSetNullValue(
message)
.ok()) {
return message;
}
break;
default:
return nullptr;
}
return nullptr;
}
bool ValueFromValue(Value* json, const CelValue& value, google::protobuf::Arena* arena);
bool ListFromValue(ListValue* json_list, const CelValue& value,
google::protobuf::Arena* arena) {
if (!value.IsList()) {
return false;
}
const CelList& list = *value.ListOrDie();
for (int i = 0; i < list.size(); i++) {
auto e = list.Get(arena, i);
Value* elem = json_list->add_values();
if (!ValueFromValue(elem, e, arena)) {
return false;
}
}
return true;
}
bool StructFromValue(Struct* json_struct, const CelValue& value,
google::protobuf::Arena* arena) {
if (!value.IsMap()) {
return false;
}
const CelMap& map = *value.MapOrDie();
absl::StatusOr<const CelList*> keys_or = map.ListKeys(arena);
if (!keys_or.ok()) {
return false;
}
const CelList& keys = **keys_or;
auto fields = json_struct->mutable_fields();
for (int i = 0; i < keys.size(); i++) {
auto k = keys.Get(arena, i);
if (!k.IsString()) {
return false;
}
absl::string_view key = k.StringOrDie().value();
auto v = map.Get(arena, k);
if (!v.has_value()) {
return false;
}
Value field_value;
if (!ValueFromValue(&field_value, *v, arena)) {
return false;
}
(*fields)[std::string(key)] = field_value;
}
return true;
}
bool ValueFromValue(Value* json, const CelValue& value, google::protobuf::Arena* arena) {
switch (value.type()) {
case CelValue::Type::kBool: {
bool val;
if (value.GetValue(&val)) {
json->set_bool_value(val);
return true;
}
} break;
case CelValue::Type::kBytes: {
CelValue::BytesHolder val;
if (value.GetValue(&val)) {
json->set_string_value(absl::Base64Escape(val.value()));
return true;
}
} break;
case CelValue::Type::kDouble: {
double val;
if (value.GetValue(&val)) {
json->set_number_value(val);
return true;
}
} break;
case CelValue::Type::kDuration: {
absl::Duration val;
if (value.GetValue(&val)) {
auto encode = cel::internal::EncodeDurationToString(val);
if (!encode.ok()) {
return false;
}
json->set_string_value(*encode);
return true;
}
} break;
case CelValue::Type::kInt64: {
int64_t val;
if (value.GetValue(&val)) {
if (IsJSONSafe(val)) {
json->set_number_value(val);
} else {
json->set_string_value(absl::StrCat(val));
}
return true;
}
} break;
case CelValue::Type::kString: {
CelValue::StringHolder val;
if (value.GetValue(&val)) {
json->set_string_value(val.value());
return true;
}
} break;
case CelValue::Type::kTimestamp: {
absl::Time val;
if (value.GetValue(&val)) {
auto encode = cel::internal::EncodeTimeToString(val);
if (!encode.ok()) {
return false;
}
json->set_string_value(*encode);
return true;
}
} break;
case CelValue::Type::kUint64: {
uint64_t val;
if (value.GetValue(&val)) {
if (IsJSONSafe(val)) {
json->set_number_value(val);
} else {
json->set_string_value(absl::StrCat(val));
}
return true;
}
} break;
case CelValue::Type::kList:
return ListFromValue(json->mutable_list_value(), value, arena);
case CelValue::Type::kMap:
return StructFromValue(json->mutable_struct_value(), value, arena);
case CelValue::Type::kNullType:
json->set_null_value(protobuf::NULL_VALUE);
return true;
default:
return false;
}
return false;
}
google::protobuf::Message* AnyFromValue(const google::protobuf::Message* prototype,
const CelValue& value, google::protobuf::Arena* arena) {
std::string type_name;
absl::Cord payload;
switch (value.type()) {
case CelValue::Type::kBool: {
BoolValue v;
type_name = v.GetTypeName();
v.set_value(value.BoolOrDie());
payload = v.SerializeAsCord();
} break;
case CelValue::Type::kBytes: {
BytesValue v;
type_name = v.GetTypeName();
v.set_value(std::string(value.BytesOrDie().value()));
payload = v.SerializeAsCord();
} break;
case CelValue::Type::kDouble: {
DoubleValue v;
type_name = v.GetTypeName();
v.set_value(value.DoubleOrDie());
payload = v.SerializeAsCord();
} break;
case CelValue::Type::kDuration: {
Duration v;
if (!cel::internal::EncodeDuration(value.DurationOrDie(), &v).ok()) {
return nullptr;
}
type_name = v.GetTypeName();
payload = v.SerializeAsCord();
} break;
case CelValue::Type::kInt64: {
Int64Value v;
type_name = v.GetTypeName();
v.set_value(value.Int64OrDie());
payload = v.SerializeAsCord();
} break;
case CelValue::Type::kString: {
StringValue v;
type_name = v.GetTypeName();
v.set_value(std::string(value.StringOrDie().value()));
payload = v.SerializeAsCord();
} break;
case CelValue::Type::kTimestamp: {
Timestamp v;
if (!cel::internal::EncodeTime(value.TimestampOrDie(), &v).ok()) {
return nullptr;
}
type_name = v.GetTypeName();
payload = v.SerializeAsCord();
} break;
case CelValue::Type::kUint64: {
UInt64Value v;
type_name = v.GetTypeName();
v.set_value(value.Uint64OrDie());
payload = v.SerializeAsCord();
} break;
case CelValue::Type::kList: {
ListValue v;
if (!ListFromValue(&v, value, arena)) {
return nullptr;
}
type_name = v.GetTypeName();
payload = v.SerializeAsCord();
} break;
case CelValue::Type::kMap: {
Struct v;
if (!StructFromValue(&v, value, arena)) {
return nullptr;
}
type_name = v.GetTypeName();
payload = v.SerializeAsCord();
} break;
case CelValue::Type::kNullType: {
Value v;
type_name = v.GetTypeName();
v.set_null_value(google::protobuf::NULL_VALUE);
payload = v.SerializeAsCord();
} break;
case CelValue::Type::kMessage: {
type_name = value.MessageWrapperOrDie().message_ptr()->GetTypeName();
payload = value.MessageWrapperOrDie().message_ptr()->SerializeAsCord();
} break;
default:
return nullptr;
}
auto* message = prototype->New(arena);
if (cel::extensions::protobuf_internal::WrapDynamicAnyProto(
absl::StrCat("type.googleapis.com/", type_name), payload, *message)
.ok()) {
return message;
}
return nullptr;
}
bool IsAlreadyWrapped(google::protobuf::Descriptor::WellKnownType wkt,
const CelValue& value) {
if (value.IsMessage()) {
const auto* msg = value.MessageOrDie();
if (wkt == msg->GetDescriptor()->well_known_type()) {
return true;
}
}
return false;
}
class MessageFromValueMaker {
public:
MessageFromValueMaker(const MessageFromValueMaker&) = delete;
MessageFromValueMaker& operator=(const MessageFromValueMaker&) = delete;
static google::protobuf::Message* MaybeWrapMessage(const google::protobuf::Descriptor* descriptor,
google::protobuf::MessageFactory* factory,
const CelValue& value,
Arena* arena) {
switch (descriptor->well_known_type()) {
case google::protobuf::Descriptor::WELLKNOWNTYPE_DOUBLEVALUE:
if (IsAlreadyWrapped(descriptor->well_known_type(), value)) {
return nullptr;
}
return DoubleFromValue(factory->GetPrototype(descriptor), value, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_FLOATVALUE:
if (IsAlreadyWrapped(descriptor->well_known_type(), value)) {
return nullptr;
}
return FloatFromValue(factory->GetPrototype(descriptor), value, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_INT64VALUE:
if (IsAlreadyWrapped(descriptor->well_known_type(), value)) {
return nullptr;
}
return Int64FromValue(factory->GetPrototype(descriptor), value, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_UINT64VALUE:
if (IsAlreadyWrapped(descriptor->well_known_type(), value)) {
return nullptr;
}
return UInt64FromValue(factory->GetPrototype(descriptor), value, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_INT32VALUE:
if (IsAlreadyWrapped(descriptor->well_known_type(), value)) {
return nullptr;
}
return Int32FromValue(factory->GetPrototype(descriptor), value, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_UINT32VALUE:
if (IsAlreadyWrapped(descriptor->well_known_type(), value)) {
return nullptr;
}
return UInt32FromValue(factory->GetPrototype(descriptor), value, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_STRINGVALUE:
if (IsAlreadyWrapped(descriptor->well_known_type(), value)) {
return nullptr;
}
return StringFromValue(factory->GetPrototype(descriptor), value, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_BYTESVALUE:
if (IsAlreadyWrapped(descriptor->well_known_type(), value)) {
return nullptr;
}
return BytesFromValue(factory->GetPrototype(descriptor), value, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_BOOLVALUE:
if (IsAlreadyWrapped(descriptor->well_known_type(), value)) {
return nullptr;
}
return BoolFromValue(factory->GetPrototype(descriptor), value, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_ANY:
if (IsAlreadyWrapped(descriptor->well_known_type(), value)) {
return nullptr;
}
return AnyFromValue(factory->GetPrototype(descriptor), value, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_DURATION:
if (IsAlreadyWrapped(descriptor->well_known_type(), value)) {
return nullptr;
}
return DurationFromValue(factory->GetPrototype(descriptor), value,
arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_TIMESTAMP:
if (IsAlreadyWrapped(descriptor->well_known_type(), value)) {
return nullptr;
}
return TimestampFromValue(factory->GetPrototype(descriptor), value,
arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_VALUE:
if (IsAlreadyWrapped(descriptor->well_known_type(), value)) {
return nullptr;
}
return ValueFromValue(factory->GetPrototype(descriptor), value, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_LISTVALUE:
if (IsAlreadyWrapped(descriptor->well_known_type(), value)) {
return nullptr;
}
return ListFromValue(factory->GetPrototype(descriptor), value, arena);
case google::protobuf::Descriptor::WELLKNOWNTYPE_STRUCT:
if (IsAlreadyWrapped(descriptor->well_known_type(), value)) {
return nullptr;
}
return StructFromValue(factory->GetPrototype(descriptor), value, arena);
default:
return nullptr;
}
}
};
}
CelValue UnwrapMessageToValue(const google::protobuf::Message* value,
const ProtobufValueFactory& factory,
Arena* arena) {
if (value == nullptr) {
return CelValue::CreateNull();
}
absl::optional<CelValue> special_value =
ValueFromMessageMaker::CreateValue(value, factory, arena);
if (special_value.has_value()) {
return *special_value;
}
return factory(value);
}
const google::protobuf::Message* MaybeWrapValueToMessage(
const google::protobuf::Descriptor* descriptor, google::protobuf::MessageFactory* factory,
const CelValue& value, Arena* arena) {
google::protobuf::Message* msg = MessageFromValueMaker::MaybeWrapMessage(
descriptor, factory, value, arena);
return msg;
}
} | #include "eval/public/structs/cel_proto_wrap_util.h"
#include <cassert>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "google/protobuf/any.pb.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/empty.pb.h"
#include "google/protobuf/struct.pb.h"
#include "google/protobuf/wrappers.pb.h"
#include "google/protobuf/dynamic_message.h"
#include "google/protobuf/message.h"
#include "absl/base/no_destructor.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "eval/public/cel_value.h"
#include "eval/public/containers/container_backed_list_impl.h"
#include "eval/public/containers/container_backed_map_impl.h"
#include "eval/public/message_wrapper.h"
#include "eval/public/structs/protobuf_value_factory.h"
#include "eval/public/structs/trivial_legacy_type_info.h"
#include "eval/testutil/test_message.pb.h"
#include "internal/proto_time_encoding.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "testutil/util.h"
namespace google::api::expr::runtime::internal {
namespace {
using ::testing::Eq;
using ::testing::UnorderedPointwise;
using google::protobuf::Duration;
using google::protobuf::ListValue;
using google::protobuf::Struct;
using google::protobuf::Timestamp;
using google::protobuf::Value;
using google::protobuf::Any;
using google::protobuf::BoolValue;
using google::protobuf::BytesValue;
using google::protobuf::DoubleValue;
using google::protobuf::FloatValue;
using google::protobuf::Int32Value;
using google::protobuf::Int64Value;
using google::protobuf::StringValue;
using google::protobuf::UInt32Value;
using google::protobuf::UInt64Value;
using google::protobuf::Arena;
CelValue ProtobufValueFactoryImpl(const google::protobuf::Message* m) {
return CelValue::CreateMessageWrapper(
CelValue::MessageWrapper(m, TrivialTypeInfo::GetInstance()));
}
class CelProtoWrapperTest : public ::testing::Test {
protected:
CelProtoWrapperTest() {}
void ExpectWrappedMessage(const CelValue& value,
const google::protobuf::Message& message) {
auto* result = MaybeWrapValueToMessage(
message.GetDescriptor(), message.GetReflection()->GetMessageFactory(),
value, arena());
EXPECT_TRUE(result != nullptr);
EXPECT_THAT(result, testutil::EqualsProto(message));
auto* identity = MaybeWrapValueToMessage(
message.GetDescriptor(), message.GetReflection()->GetMessageFactory(),
ProtobufValueFactoryImpl(result), arena());
EXPECT_TRUE(identity == nullptr);
result = MaybeWrapValueToMessage(
ReflectedCopy(message)->GetDescriptor(),
ReflectedCopy(message)->GetReflection()->GetMessageFactory(), value,
arena());
EXPECT_TRUE(result != nullptr);
EXPECT_THAT(result, testutil::EqualsProto(message));
}
void ExpectNotWrapped(const CelValue& value, const google::protobuf::Message& message) {
auto result = MaybeWrapValueToMessage(
message.GetDescriptor(), message.GetReflection()->GetMessageFactory(),
value, arena());
EXPECT_TRUE(result == nullptr);
}
template <class T>
void ExpectUnwrappedPrimitive(const google::protobuf::Message& message, T result) {
CelValue cel_value =
UnwrapMessageToValue(&message, &ProtobufValueFactoryImpl, arena());
T value;
EXPECT_TRUE(cel_value.GetValue(&value));
EXPECT_THAT(value, Eq(result));
T dyn_value;
CelValue cel_dyn_value = UnwrapMessageToValue(
ReflectedCopy(message).get(), &ProtobufValueFactoryImpl, arena());
EXPECT_THAT(cel_dyn_value.type(), Eq(cel_value.type()));
EXPECT_TRUE(cel_dyn_value.GetValue(&dyn_value));
EXPECT_THAT(value, Eq(dyn_value));
}
void ExpectUnwrappedMessage(const google::protobuf::Message& message,
google::protobuf::Message* result) {
CelValue cel_value =
UnwrapMessageToValue(&message, &ProtobufValueFactoryImpl, arena());
if (result == nullptr) {
EXPECT_TRUE(cel_value.IsNull());
return;
}
EXPECT_TRUE(cel_value.IsMessage());
EXPECT_THAT(cel_value.MessageOrDie(), testutil::EqualsProto(*result));
}
std::unique_ptr<google::protobuf::Message> ReflectedCopy(
const google::protobuf::Message& message) {
std::unique_ptr<google::protobuf::Message> dynamic_value(
factory_.GetPrototype(message.GetDescriptor())->New());
dynamic_value->CopyFrom(message);
return dynamic_value;
}
Arena* arena() { return &arena_; }
private:
Arena arena_;
google::protobuf::DynamicMessageFactory factory_;
};
TEST_F(CelProtoWrapperTest, TestType) {
Duration msg_duration;
msg_duration.set_seconds(2);
msg_duration.set_nanos(3);
CelValue value_duration2 =
UnwrapMessageToValue(&msg_duration, &ProtobufValueFactoryImpl, arena());
EXPECT_THAT(value_duration2.type(), Eq(CelValue::Type::kDuration));
Timestamp msg_timestamp;
msg_timestamp.set_seconds(2);
msg_timestamp.set_nanos(3);
CelValue value_timestamp2 =
UnwrapMessageToValue(&msg_timestamp, &ProtobufValueFactoryImpl, arena());
EXPECT_THAT(value_timestamp2.type(), Eq(CelValue::Type::kTimestamp));
}
TEST_F(CelProtoWrapperTest, TestDuration) {
Duration msg_duration;
msg_duration.set_seconds(2);
msg_duration.set_nanos(3);
CelValue value =
UnwrapMessageToValue(&msg_duration, &ProtobufValueFactoryImpl, arena());
EXPECT_THAT(value.type(), Eq(CelValue::Type::kDuration));
Duration out;
auto status = cel::internal::EncodeDuration(value.DurationOrDie(), &out);
EXPECT_TRUE(status.ok());
EXPECT_THAT(out, testutil::EqualsProto(msg_duration));
}
TEST_F(CelProtoWrapperTest, TestTimestamp) {
Timestamp msg_timestamp;
msg_timestamp.set_seconds(2);
msg_timestamp.set_nanos(3);
CelValue value =
UnwrapMessageToValue(&msg_timestamp, &ProtobufValueFactoryImpl, arena());
EXPECT_TRUE(value.IsTimestamp());
Timestamp out;
auto status = cel::internal::EncodeTime(value.TimestampOrDie(), &out);
EXPECT_TRUE(status.ok());
EXPECT_THAT(out, testutil::EqualsProto(msg_timestamp));
}
TEST_F(CelProtoWrapperTest, UnwrapMessageToValueNull) {
Value json;
json.set_null_value(google::protobuf::NullValue::NULL_VALUE);
ExpectUnwrappedMessage(json, nullptr);
}
TEST_F(CelProtoWrapperTest, UnwrapDynamicValueNull) {
Value value_msg;
value_msg.set_null_value(protobuf::NULL_VALUE);
CelValue value = UnwrapMessageToValue(ReflectedCopy(value_msg).get(),
&ProtobufValueFactoryImpl, arena());
EXPECT_TRUE(value.IsNull());
}
TEST_F(CelProtoWrapperTest, UnwrapMessageToValueBool) {
bool value = true;
Value json;
json.set_bool_value(true);
ExpectUnwrappedPrimitive(json, value);
}
TEST_F(CelProtoWrapperTest, UnwrapMessageToValueNumber) {
double value = 1.0;
Value json;
json.set_number_value(value);
ExpectUnwrappedPrimitive(json, value);
}
TEST_F(CelProtoWrapperTest, UnwrapMessageToValueString) {
const std::string test = "test";
auto value = CelValue::StringHolder(&test);
Value json;
json.set_string_value(test);
ExpectUnwrappedPrimitive(json, value);
}
TEST_F(CelProtoWrapperTest, UnwrapMessageToValueStruct) {
const std::vector<std::string> kFields = {"field1", "field2", "field3"};
Struct value_struct;
auto& value1 = (*value_struct.mutable_fields())[kFields[0]];
value1.set_bool_value(true);
auto& value2 = (*value_struct.mutable_fields())[kFields[1]];
value2.set_number_value(1.0);
auto& value3 = (*value_struct.mutable_fields())[kFields[2]];
value3.set_string_value("test");
CelValue value =
UnwrapMessageToValue(&value_struct, &ProtobufValueFactoryImpl, arena());
ASSERT_TRUE(value.IsMap());
const CelMap* cel_map = value.MapOrDie();
CelValue field1 = CelValue::CreateString(&kFields[0]);
auto field1_presence = cel_map->Has(field1);
ASSERT_OK(field1_presence);
EXPECT_TRUE(*field1_presence);
auto lookup1 = (*cel_map)[field1];
ASSERT_TRUE(lookup1.has_value());
ASSERT_TRUE(lookup1->IsBool());
EXPECT_EQ(lookup1->BoolOrDie(), true);
CelValue field2 = CelValue::CreateString(&kFields[1]);
auto field2_presence = cel_map->Has(field2);
ASSERT_OK(field2_presence);
EXPECT_TRUE(*field2_presence);
auto lookup2 = (*cel_map)[field2];
ASSERT_TRUE(lookup2.has_value());
ASSERT_TRUE(lookup2->IsDouble());
EXPECT_DOUBLE_EQ(lookup2->DoubleOrDie(), 1.0);
CelValue field3 = CelValue::CreateString(&kFields[2]);
auto field3_presence = cel_map->Has(field3);
ASSERT_OK(field3_presence);
EXPECT_TRUE(*field3_presence);
auto lookup3 = (*cel_map)[field3];
ASSERT_TRUE(lookup3.has_value());
ASSERT_TRUE(lookup3->IsString());
EXPECT_EQ(lookup3->StringOrDie().value(), "test");
std::string missing = "missing_field";
CelValue missing_field = CelValue::CreateString(&missing);
auto missing_field_presence = cel_map->Has(missing_field);
ASSERT_OK(missing_field_presence);
EXPECT_FALSE(*missing_field_presence);
const CelList* key_list = cel_map->ListKeys().value();
ASSERT_EQ(key_list->size(), kFields.size());
std::vector<std::string> result_keys;
for (int i = 0; i < key_list->size(); i++) {
CelValue key = (*key_list)[i];
ASSERT_TRUE(key.IsString());
result_keys.push_back(std::string(key.StringOrDie().value()));
}
EXPECT_THAT(result_keys, UnorderedPointwise(Eq(), kFields));
}
TEST_F(CelProtoWrapperTest, UnwrapDynamicStruct) {
Struct struct_msg;
const std::string kFieldInt = "field_int";
const std::string kFieldBool = "field_bool";
(*struct_msg.mutable_fields())[kFieldInt].set_number_value(1.);
(*struct_msg.mutable_fields())[kFieldBool].set_bool_value(true);
CelValue value = UnwrapMessageToValue(ReflectedCopy(struct_msg).get(),
&ProtobufValueFactoryImpl, arena());
EXPECT_TRUE(value.IsMap());
const CelMap* cel_map = value.MapOrDie();
ASSERT_TRUE(cel_map != nullptr);
{
auto lookup = (*cel_map)[CelValue::CreateString(&kFieldInt)];
ASSERT_TRUE(lookup.has_value());
auto v = lookup.value();
ASSERT_TRUE(v.IsDouble());
EXPECT_THAT(v.DoubleOrDie(), testing::DoubleEq(1.));
}
{
auto lookup = (*cel_map)[CelValue::CreateString(&kFieldBool)];
ASSERT_TRUE(lookup.has_value());
auto v = lookup.value();
ASSERT_TRUE(v.IsBool());
EXPECT_EQ(v.BoolOrDie(), true);
}
{
auto presence = cel_map->Has(CelValue::CreateBool(true));
ASSERT_FALSE(presence.ok());
EXPECT_EQ(presence.status().code(), absl::StatusCode::kInvalidArgument);
auto lookup = (*cel_map)[CelValue::CreateBool(true)];
ASSERT_TRUE(lookup.has_value());
auto v = lookup.value();
ASSERT_TRUE(v.IsError());
}
}
TEST_F(CelProtoWrapperTest, UnwrapDynamicValueStruct) {
const std::string kField1 = "field1";
const std::string kField2 = "field2";
Value value_msg;
(*value_msg.mutable_struct_value()->mutable_fields())[kField1]
.set_number_value(1);
(*value_msg.mutable_struct_value()->mutable_fields())[kField2]
.set_number_value(2);
CelValue value = UnwrapMessageToValue(ReflectedCopy(value_msg).get(),
&ProtobufValueFactoryImpl, arena());
EXPECT_TRUE(value.IsMap());
EXPECT_TRUE(
(*value.MapOrDie())[CelValue::CreateString(&kField1)].has_value());
EXPECT_TRUE(
(*value.MapOrDie())[CelValue::CreateString(&kField2)].has_value());
}
TEST_F(CelProtoWrapperTest, UnwrapMessageToValueList) {
const std::vector<std::string> kFields = {"field1", "field2", "field3"};
ListValue list_value;
list_value.add_values()->set_bool_value(true);
list_value.add_values()->set_number_value(1.0);
list_value.add_values()->set_string_value("test");
CelValue value =
UnwrapMessageToValue(&list_value, &ProtobufValueFactoryImpl, arena());
ASSERT_TRUE(value.IsList());
const CelList* cel_list = value.ListOrDie();
ASSERT_EQ(cel_list->size(), 3);
CelValue value1 = (*cel_list)[0];
ASSERT_TRUE(value1.IsBool());
EXPECT_EQ(value1.BoolOrDie(), true);
auto value2 = (*cel_list)[1];
ASSERT_TRUE(value2.IsDouble());
EXPECT_DOUBLE_EQ(value2.DoubleOrDie(), 1.0);
auto value3 = (*cel_list)[2];
ASSERT_TRUE(value3.IsString());
EXPECT_EQ(value3.StringOrDie().value(), "test");
}
TEST_F(CelProtoWrapperTest, UnwrapDynamicValueListValue) {
Value value_msg;
value_msg.mutable_list_value()->add_values()->set_number_value(1.);
value_msg.mutable_list_value()->add_values()->set_number_value(2.);
CelValue value = UnwrapMessageToValue(ReflectedCopy(value_msg).get(),
&ProtobufValueFactoryImpl, arena());
EXPECT_TRUE(value.IsList());
EXPECT_THAT((*value.ListOrDie())[0].DoubleOrDie(), testing::DoubleEq(1));
EXPECT_THAT((*value.ListOrDie())[1].DoubleOrDie(), testing::DoubleEq(2));
}
TEST_F(CelProtoWrapperTest, UnwrapAnyValue) {
TestMessage test_message;
test_message.set_string_value("test");
Any any;
any.PackFrom(test_message);
ExpectUnwrappedMessage(any, &test_message);
}
TEST_F(CelProtoWrapperTest, UnwrapInvalidAny) {
Any any;
CelValue value =
UnwrapMessageToValue(&any, &ProtobufValueFactoryImpl, arena());
ASSERT_TRUE(value.IsError());
any.set_type_url("/");
ASSERT_TRUE(
UnwrapMessageToValue(&any, &ProtobufValueFactoryImpl, arena()).IsError());
any.set_type_url("/invalid.proto.name");
ASSERT_TRUE(
UnwrapMessageToValue(&any, &ProtobufValueFactoryImpl, arena()).IsError());
}
TEST_F(CelProtoWrapperTest, UnwrapBoolWrapper) {
bool value = true;
BoolValue wrapper;
wrapper.set_value(value);
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, UnwrapInt32Wrapper) {
int64_t value = 12;
Int32Value wrapper;
wrapper.set_value(value);
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, UnwrapUInt32Wrapper) {
uint64_t value = 12;
UInt32Value wrapper;
wrapper.set_value(value);
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, UnwrapInt64Wrapper) {
int64_t value = 12;
Int64Value wrapper;
wrapper.set_value(value);
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, UnwrapUInt64Wrapper) {
uint64_t value = 12;
UInt64Value wrapper;
wrapper.set_value(value);
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, UnwrapFloatWrapper) {
double value = 42.5;
FloatValue wrapper;
wrapper.set_value(value);
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, UnwrapDoubleWrapper) {
double value = 42.5;
DoubleValue wrapper;
wrapper.set_value(value);
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, UnwrapStringWrapper) {
std::string text = "42";
auto value = CelValue::StringHolder(&text);
StringValue wrapper;
wrapper.set_value(text);
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, UnwrapBytesWrapper) {
std::string text = "42";
auto value = CelValue::BytesHolder(&text);
BytesValue wrapper;
wrapper.set_value("42");
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, WrapNull) {
auto cel_value = CelValue::CreateNull();
Value json;
json.set_null_value(protobuf::NULL_VALUE);
ExpectWrappedMessage(cel_value, json);
Any any;
any.PackFrom(json);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapBool) {
auto cel_value = CelValue::CreateBool(true);
Value json;
json.set_bool_value(true);
ExpectWrappedMessage(cel_value, json);
BoolValue wrapper;
wrapper.set_value(true);
ExpectWrappedMessage(cel_value, wrapper);
Any any;
any.PackFrom(wrapper);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapBytes) {
std::string str = "hello world";
auto cel_value = CelValue::CreateBytes(CelValue::BytesHolder(&str));
BytesValue wrapper;
wrapper.set_value(str);
ExpectWrappedMessage(cel_value, wrapper);
Any any;
any.PackFrom(wrapper);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapBytesToValue) {
std::string str = "hello world";
auto cel_value = CelValue::CreateBytes(CelValue::BytesHolder(&str));
Value json;
json.set_string_value("aGVsbG8gd29ybGQ=");
ExpectWrappedMessage(cel_value, json);
}
TEST_F(CelProtoWrapperTest, WrapDuration) {
auto cel_value = CelValue::CreateDuration(absl::Seconds(300));
Duration d;
d.set_seconds(300);
ExpectWrappedMessage(cel_value, d);
Any any;
any.PackFrom(d);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapDurationToValue) {
auto cel_value = CelValue::CreateDuration(absl::Seconds(300));
Value json;
json.set_string_value("300s");
ExpectWrappedMessage(cel_value, json);
}
TEST_F(CelProtoWrapperTest, WrapDouble) {
double num = 1.5;
auto cel_value = CelValue::CreateDouble(num);
Value json;
json.set_number_value(num);
ExpectWrappedMessage(cel_value, json);
DoubleValue wrapper;
wrapper.set_value(num);
ExpectWrappedMessage(cel_value, wrapper);
Any any;
any.PackFrom(wrapper);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapDoubleToFloatValue) {
double num = 1.5;
auto cel_value = CelValue::CreateDouble(num);
FloatValue wrapper;
wrapper.set_value(num);
ExpectWrappedMessage(cel_value, wrapper);
double small_num = -9.9e-100;
wrapper.set_value(small_num);
cel_value = CelValue::CreateDouble(small_num);
ExpectWrappedMessage(cel_value, wrapper);
}
TEST_F(CelProtoWrapperTest, WrapDoubleOverflow) {
double lowest_double = std::numeric_limits<double>::lowest();
auto cel_value = CelValue::CreateDouble(lowest_double);
FloatValue wrapper;
wrapper.set_value(-std::numeric_limits<float>::infinity());
ExpectWrappedMessage(cel_value, wrapper);
double max_double = std::numeric_limits<double>::max();
cel_value = CelValue::CreateDouble(max_double);
wrapper.set_value(std::numeric_limits<float>::infinity());
ExpectWrappedMessage(cel_value, wrapper);
}
TEST_F(CelProtoWrapperTest, WrapInt64) {
int32_t num = std::numeric_limits<int32_t>::lowest();
auto cel_value = CelValue::CreateInt64(num);
Value json;
json.set_number_value(static_cast<double>(num));
ExpectWrappedMessage(cel_value, json);
Int64Value wrapper;
wrapper.set_value(num);
ExpectWrappedMessage(cel_value, wrapper);
Any any;
any.PackFrom(wrapper);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapInt64ToInt32Value) {
int32_t num = std::numeric_limits<int32_t>::lowest();
auto cel_value = CelValue::CreateInt64(num);
Int32Value wrapper;
wrapper.set_value(num);
ExpectWrappedMessage(cel_value, wrapper);
}
TEST_F(CelProtoWrapperTest, WrapFailureInt64ToInt32Value) {
int64_t num = std::numeric_limits<int64_t>::lowest();
auto cel_value = CelValue::CreateInt64(num);
Int32Value wrapper;
ExpectNotWrapped(cel_value, wrapper);
}
TEST_F(CelProtoWrapperTest, WrapInt64ToValue) {
int64_t max = std::numeric_limits<int64_t>::max();
auto cel_value = CelValue::CreateInt64(max);
Value json;
json.set_string_value(absl::StrCat(max));
ExpectWrappedMessage(cel_value, json);
int64_t min = std::numeric_limits<int64_t>::min();
cel_value = CelValue::CreateInt64(min);
json.set_string_value(absl::StrCat(min));
ExpectWrappedMessage(cel_value, json);
}
TEST_F(CelProtoWrapperTest, WrapUint64) {
uint32_t num = std::numeric_limits<uint32_t>::max();
auto cel_value = CelValue::CreateUint64(num);
Value json;
json.set_number_value(static_cast<double>(num));
ExpectWrappedMessage(cel_value, json);
UInt64Value wrapper;
wrapper.set_value(num);
ExpectWrappedMessage(cel_value, wrapper);
Any any;
any.PackFrom(wrapper);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapUint64ToUint32Value) {
uint32_t num = std::numeric_limits<uint32_t>::max();
auto cel_value = CelValue::CreateUint64(num);
UInt32Value wrapper;
wrapper.set_value(num);
ExpectWrappedMessage(cel_value, wrapper);
}
TEST_F(CelProtoWrapperTest, WrapUint64ToValue) {
uint64_t num = std::numeric_limits<uint64_t>::max();
auto cel_value = CelValue::CreateUint64(num);
Value json;
json.set_string_value(absl::StrCat(num));
ExpectWrappedMessage(cel_value, json);
}
TEST_F(CelProtoWrapperTest, WrapFailureUint64ToUint32Value) {
uint64_t num = std::numeric_limits<uint64_t>::max();
auto cel_value = CelValue::CreateUint64(num);
UInt32Value wrapper;
ExpectNotWrapped(cel_value, wrapper);
}
TEST_F(CelProtoWrapperTest, WrapString) {
std::string str = "test";
auto cel_value = CelValue::CreateString(CelValue::StringHolder(&str));
Value json;
json.set_string_value(str);
ExpectWrappedMessage(cel_value, json);
StringValue wrapper;
wrapper.set_value(str);
ExpectWrappedMessage(cel_value, wrapper);
Any any;
any.PackFrom(wrapper);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapTimestamp) {
absl::Time ts = absl::FromUnixSeconds(1615852799);
auto cel_value = CelValue::CreateTimestamp(ts);
Timestamp t;
t.set_seconds(1615852799);
ExpectWrappedMessage(cel_value, t);
Any any;
any.PackFrom(t);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapTimestampToValue) {
absl::Time ts = absl::FromUnixSeconds(1615852799);
auto cel_value = CelValue::CreateTimestamp(ts);
Value json;
json.set_string_value("2021-03-15T23:59:59Z");
ExpectWrappedMessage(cel_value, json);
}
TEST_F(CelProtoWrapperTest, WrapList) {
std::vector<CelValue> list_elems = {
CelValue::CreateDouble(1.5),
CelValue::CreateInt64(-2L),
};
ContainerBackedListImpl list(std::move(list_elems));
auto cel_value = CelValue::CreateList(&list);
Value json;
json.mutable_list_value()->add_values()->set_number_value(1.5);
json.mutable_list_value()->add_values()->set_number_value(-2.);
ExpectWrappedMessage(cel_value, json);
ExpectWrappedMessage(cel_value, json.list_value());
Any any;
any.PackFrom(json.list_value());
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapFailureListValueBadJSON) {
TestMessage message;
std::vector<CelValue> list_elems = {
CelValue::CreateDouble(1.5),
UnwrapMessageToValue(&message, &ProtobufValueFactoryImpl, arena()),
};
ContainerBackedListImpl list(std::move(list_elems));
auto cel_value = CelValue::CreateList(&list);
Value json;
ExpectNotWrapped(cel_value, json);
}
TEST_F(CelProtoWrapperTest, WrapStruct) {
const std::string kField1 = "field1";
std::vector<std::pair<CelValue, CelValue>> args = {
{CelValue::CreateString(CelValue::StringHolder(&kField1)),
CelValue::CreateBool(true)}};
auto cel_map =
CreateContainerBackedMap(
absl::Span<std::pair<CelValue, CelValue>>(args.data(), args.size()))
.value();
auto cel_value = CelValue::CreateMap(cel_map.get());
Value json;
(*json.mutable_struct_value()->mutable_fields())[kField1].set_bool_value(
true);
ExpectWrappedMessage(cel_value, json);
ExpectWrappedMessage(cel_value, json.struct_value());
Any any;
any.PackFrom(json.struct_value());
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapFailureStructBadKeyType) {
std::vector<std::pair<CelValue, CelValue>> args = {
{CelValue::CreateInt64(1L), CelValue::CreateBool(true)}};
auto cel_map =
CreateContainerBackedMap(
absl::Span<std::pair<CelValue, CelValue>>(args.data(), args.size()))
.value();
auto cel_value = CelValue::CreateMap(cel_map.get());
Value json;
ExpectNotWrapped(cel_value, json);
}
TEST_F(CelProtoWrapperTest, WrapFailureStructBadValueType) {
const std::string kField1 = "field1";
TestMessage bad_value;
std::vector<std::pair<CelValue, CelValue>> args = {
{CelValue::CreateString(CelValue::StringHolder(&kField1)),
UnwrapMessageToValue(&bad_value, &ProtobufValueFactoryImpl, arena())}};
auto cel_map =
CreateContainerBackedMap(
absl::Span<std::pair<CelValue, CelValue>>(args.data(), args.size()))
.value();
auto cel_value = CelValue::CreateMap(cel_map.get());
Value json;
ExpectNotWrapped(cel_value, json);
}
class TestMap : public CelMapBuilder {
public:
absl::StatusOr<const CelList*> ListKeys() const override {
return absl::UnimplementedError("test");
}
};
TEST_F(CelProtoWrapperTest, WrapFailureStructListKeysUnimplemented) {
const std::string kField1 = "field1";
TestMap map;
ASSERT_OK(map.Add(CelValue::CreateString(CelValue::StringHolder(&kField1)),
CelValue::CreateString(CelValue::StringHolder(&kField1))));
auto cel_value = CelValue::CreateMap(&map);
Value json;
ExpectNotWrapped(cel_value, json);
}
TEST_F(CelProtoWrapperTest, WrapFailureWrongType) {
auto cel_value = CelValue::CreateNull();
std::vector<const google::protobuf::Message*> wrong_types = {
&BoolValue::default_instance(), &BytesValue::default_instance(),
&DoubleValue::default_instance(), &Duration::default_instance(),
&FloatValue::default_instance(), &Int32Value::default_instance(),
&Int64Value::default_instance(), &ListValue::default_instance(),
&StringValue::default_instance(), &Struct::default_instance(),
&Timestamp::default_instance(), &UInt32Value::default_instance(),
&UInt64Value::default_instance(),
};
for (const auto* wrong_type : wrong_types) {
ExpectNotWrapped(cel_value, *wrong_type);
}
}
TEST_F(CelProtoWrapperTest, WrapFailureErrorToAny) {
auto cel_value = CreateNoSuchFieldError(arena(), "error_field");
ExpectNotWrapped(cel_value, Any::default_instance());
}
TEST_F(CelProtoWrapperTest, DebugString) {
google::protobuf::Empty e;
EXPECT_EQ(UnwrapMessageToValue(&e, &ProtobufValueFactoryImpl, arena())
.DebugString(),
"Message: opaque");
ListValue list_value;
list_value.add_values()->set_bool_value(true);
list_value.add_values()->set_number_value(1.0);
list_value.add_values()->set_string_value("test");
CelValue value =
UnwrapMessageToValue(&list_value, &ProtobufValueFactoryImpl, arena());
EXPECT_EQ(value.DebugString(),
"CelList: [bool: 1, double: 1.000000, string: test]");
Struct value_struct;
auto& value1 = (*value_struct.mutable_fields())["a"];
value1.set_bool_value(true);
auto& value2 = (*value_struct.mutable_fields())["b"];
value2.set_number_value(1.0);
auto& value3 = (*value_struct.mutable_fields())["c"];
value3.set_string_value("test");
value =
UnwrapMessageToValue(&value_struct, &ProtobufValueFactoryImpl, arena());
EXPECT_THAT(
value.DebugString(),
testing::AllOf(testing::StartsWith("CelMap: {"),
testing::HasSubstr("<string: a>: <bool: 1>"),
testing::HasSubstr("<string: b>: <double: 1.0"),
testing::HasSubstr("<string: c>: <string: test>")));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/structs/cel_proto_wrap_util.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/structs/cel_proto_wrap_util_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
c8c785cb-0a1c-4856-9184-3546aad84b13 | cpp | google/arolla | switch_index | arolla/util/switch_index.h | arolla/util/switch_index_test.cc | #ifndef AROLLA_UTIL_SWITCH_INDEX_H_
#define AROLLA_UTIL_SWITCH_INDEX_H_
#include <cassert>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
namespace arolla {
#define CASE_1(k) \
case (k): \
return std::forward<Callback>(callback)(std::integral_constant<int, (k)>())
#define CASE_4(k) \
CASE_1(k); \
CASE_1(k + 1); \
CASE_1(k + 2); \
CASE_1(k + 3)
#define CASE_16(k) \
CASE_4(k); \
CASE_4(k + 4); \
CASE_4(k + 8); \
CASE_4(k + 12)
template <typename Callback>
auto switch_index_32(int n, Callback&& callback) {
assert(0 <= n && n < 32);
switch (n) {
CASE_16(0);
CASE_16(16);
}
return std::forward<Callback>(callback)(std::integral_constant<int, 31>());
}
template <typename Callback>
auto switch_index_64(int n, Callback&& callback) {
assert(0 <= n && n < 64);
switch (n) {
CASE_16(0);
CASE_16(16);
CASE_16(32);
CASE_16(48);
}
return std::forward<Callback>(callback)(std::integral_constant<int, 63>());
}
template <int N, typename Callback>
inline ABSL_ATTRIBUTE_ALWAYS_INLINE auto switch_index(int n,
Callback&& callback) {
static_assert(N == 32 || N == 64);
if constexpr (N == 32) {
return switch_index_32(n, std::forward<Callback>(callback));
} else {
return switch_index_64(n, std::forward<Callback>(callback));
}
}
#undef CASE_16
#undef CASE_4
#undef CASE_1
}
#endif | #include "arolla/util/switch_index.h"
#include <string>
#include "gtest/gtest.h"
namespace arolla::testing {
namespace {
template <int N>
void test_switch_index() {
for (int i = 0; i < N; ++i) {
EXPECT_EQ(std::to_string(i), switch_index<N>(i, [i](auto arg) {
constexpr int constexpr_i = arg();
EXPECT_EQ(i, constexpr_i);
return std::to_string(constexpr_i);
}));
}
}
TEST(SwitchIndex, switch_index_32) { test_switch_index<32>(); }
TEST(SwitchIndex, switch_index_64) { test_switch_index<64>(); }
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/switch_index.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/switch_index_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
2f2dd5f2-a8cd-4856-b362-5cee75f05ca3 | cpp | tensorflow/tensorflow | message | tensorflow/lite/testing/message.cc | tensorflow/lite/testing/message_test.cc | #include "tensorflow/lite/testing/message.h"
#include <stack>
#include <string>
#include "tensorflow/lite/testing/tokenize.h"
namespace tflite {
namespace testing {
class MessageStack : public TokenProcessor {
public:
explicit MessageStack(Message* first_node) {
nodes_.push(first_node);
valid_ = true;
}
void ConsumeToken(std::string* token) override {
if (!valid_) return;
Message* current_node = nodes_.top();
if (*token == "{") {
if (previous_token_.empty()) {
valid_ = false;
return;
}
nodes_.push(current_node ? current_node->AddChild(previous_token_)
: nullptr);
previous_token_.clear();
} else if (*token == "}") {
if (nodes_.size() == 1 || !previous_token_.empty()) {
valid_ = false;
return;
}
if (current_node) {
current_node->Finish();
}
nodes_.pop();
} else if (*token == ":") {
if (previous_token_.empty()) {
valid_ = false;
return;
}
} else {
if (previous_token_.empty()) {
previous_token_.swap(*token);
} else {
if (current_node) {
current_node->SetField(previous_token_, *token);
}
previous_token_.clear();
}
}
}
bool valid() const { return valid_; }
private:
std::stack<Message*> nodes_;
std::string previous_token_;
bool valid_;
};
bool Message::Read(std::istream* input, Message* message) {
MessageStack stack(message);
Tokenize(input, &stack);
return stack.valid();
}
}
} | #include "tensorflow/lite/testing/message.h"
#include <map>
#include <string>
#include <gtest/gtest.h>
namespace tflite {
namespace testing {
namespace {
class TestMessage : public Message {
public:
TestMessage() {}
explicit TestMessage(const std::string& text_to_parse) {
std::stringstream ss(text_to_parse);
finished_ = Message::Read(&ss, this);
}
void SetField(const std::string& name, const std::string& value) override {
fields_[name] = value;
}
Message* AddChild(const std::string& name) override {
TestMessage* m = new TestMessage;
m->name_ = name;
return Store(m);
}
void Finish() override { finished_ = true; }
int NumChildren() const { return Children().size(); }
const TestMessage* GetChild(int i) const {
return dynamic_cast<TestMessage*>(Children()[i].get());
}
int NumFields() const { return fields_.size(); }
const std::string& GetField(const std::string& key) const {
return fields_.at(key);
}
const std::string& name() const { return name_; }
bool finished() const { return finished_; }
protected:
std::string name_;
std::map<std::string, std::string> fields_;
bool finished_ = false;
};
TEST(MessageTest, Simple) {
TestMessage message("x{a:1 b:2} y{} z{c:3} d:4");
ASSERT_TRUE(message.finished());
ASSERT_EQ(message.NumFields(), 1);
EXPECT_EQ(message.GetField("d"), "4");
ASSERT_EQ(message.NumChildren(), 3);
auto* x = message.GetChild(0);
EXPECT_EQ(x->name(), "x");
ASSERT_EQ(x->NumFields(), 2);
EXPECT_EQ(x->GetField("a"), "1");
EXPECT_EQ(x->GetField("b"), "2");
auto* y = message.GetChild(1);
EXPECT_EQ(y->name(), "y");
ASSERT_EQ(y->NumFields(), 0);
auto* z = message.GetChild(2);
EXPECT_EQ(z->name(), "z");
ASSERT_EQ(z->NumFields(), 1);
EXPECT_EQ(z->GetField("c"), "3");
}
TEST(MessageTest, Unnamed) {
TestMessage message("x{c:3} {} y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 1);
}
TEST(MessageTest, TooManyBraces) {
TestMessage message("x{c:3} } y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 1);
}
TEST(MessageTest, LeftoverToken) {
TestMessage message("x{c:3} z{test} y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 2);
}
TEST(MessageTest, MissingKey) {
TestMessage message("x{c:3} z{:test} y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 2);
}
TEST(MessageTest, MissingValue) {
TestMessage message("x{c:3} z{test:} y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/message.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/message_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
75518bb5-a390-42b6-b985-bea3835768f4 | cpp | google/cel-cpp | comprehension_slots | eval/eval/comprehension_slots.h | eval/eval/comprehension_slots_test.cc | #ifndef THIRD_PARTY_CEL_CPP_EVAL_EVAL_COMPREHENSION_SLOTS_H_
#define THIRD_PARTY_CEL_CPP_EVAL_EVAL_COMPREHENSION_SLOTS_H_
#include <cstddef>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/log/absl_check.h"
#include "absl/types/optional.h"
#include "common/value.h"
#include "eval/eval/attribute_trail.h"
namespace google::api::expr::runtime {
class ComprehensionSlots {
public:
struct Slot {
cel::Value value;
AttributeTrail attribute;
};
static ComprehensionSlots& GetEmptyInstance() {
static absl::NoDestructor<ComprehensionSlots> instance(0);
return *instance;
}
explicit ComprehensionSlots(size_t size) : size_(size), slots_(size) {}
ComprehensionSlots(const ComprehensionSlots&) = delete;
ComprehensionSlots& operator=(const ComprehensionSlots&) = delete;
ComprehensionSlots(ComprehensionSlots&&) = default;
ComprehensionSlots& operator=(ComprehensionSlots&&) = default;
Slot* Get(size_t index) {
ABSL_DCHECK_LT(index, slots_.size());
auto& slot = slots_[index];
if (!slot.has_value()) return nullptr;
return &slot.value();
}
void Reset() {
slots_.clear();
slots_.resize(size_);
}
void ClearSlot(size_t index) {
ABSL_DCHECK_LT(index, slots_.size());
slots_[index] = absl::nullopt;
}
void Set(size_t index) {
ABSL_DCHECK_LT(index, slots_.size());
slots_[index].emplace();
}
void Set(size_t index, cel::Value value) {
Set(index, std::move(value), AttributeTrail());
}
void Set(size_t index, cel::Value value, AttributeTrail attribute) {
ABSL_DCHECK_LT(index, slots_.size());
slots_[index] = Slot{std::move(value), std::move(attribute)};
}
size_t size() const { return slots_.size(); }
private:
size_t size_;
std::vector<absl::optional<Slot>> slots_;
};
}
#endif | #include "eval/eval/comprehension_slots.h"
#include "base/attribute.h"
#include "base/type_provider.h"
#include "common/memory.h"
#include "common/type.h"
#include "common/type_factory.h"
#include "common/type_manager.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "common/values/legacy_value_manager.h"
#include "eval/eval/attribute_trail.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
using ::cel::Attribute;
using ::absl_testing::IsOkAndHolds;
using ::cel::MemoryManagerRef;
using ::cel::StringValue;
using ::cel::TypeFactory;
using ::cel::TypeManager;
using ::cel::TypeProvider;
using ::cel::Value;
using ::cel::ValueManager;
using ::testing::Truly;
TEST(ComprehensionSlots, Basic) {
cel::common_internal::LegacyValueManager factory(
MemoryManagerRef::ReferenceCounting(), TypeProvider::Builtin());
ComprehensionSlots slots(4);
ComprehensionSlots::Slot* unset = slots.Get(0);
EXPECT_EQ(unset, nullptr);
slots.Set(0, factory.CreateUncheckedStringValue("abcd"),
AttributeTrail(Attribute("fake_attr")));
auto* slot0 = slots.Get(0);
ASSERT_TRUE(slot0 != nullptr);
EXPECT_THAT(slot0->value, Truly([](const Value& v) {
return v.Is<StringValue>() &&
v.GetString().ToString() == "abcd";
}))
<< "value is 'abcd'";
EXPECT_THAT(slot0->attribute.attribute().AsString(),
IsOkAndHolds("fake_attr"));
slots.ClearSlot(0);
EXPECT_EQ(slots.Get(0), nullptr);
slots.Set(3, factory.CreateUncheckedStringValue("abcd"),
AttributeTrail(Attribute("fake_attr")));
auto* slot3 = slots.Get(3);
ASSERT_TRUE(slot3 != nullptr);
EXPECT_THAT(slot3->value, Truly([](const Value& v) {
return v.Is<StringValue>() &&
v.GetString().ToString() == "abcd";
}))
<< "value is 'abcd'";
slots.Reset();
slot0 = slots.Get(0);
EXPECT_TRUE(slot0 == nullptr);
slot3 = slots.Get(3);
EXPECT_TRUE(slot3 == nullptr);
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/comprehension_slots.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/comprehension_slots_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
8b3d1a79-9e0e-4afa-b82c-826ddee12d52 | cpp | tensorflow/tensorflow | xla_launch_util | tensorflow/compiler/jit/xla_launch_util.cc | tensorflow/compiler/jit/xla_launch_util_test.cc | #include "tensorflow/compiler/jit/xla_launch_util.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <set>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/jit/pjrt_tensor_buffer.h"
#include "tensorflow/compiler/jit/pjrt_tensor_buffer_util.h"
#include "tensorflow/compiler/jit/variable_info.h"
#include "tensorflow/compiler/jit/variable_info_util.h"
#include "tensorflow/compiler/tf2xla/const_analysis.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "xla/client/local_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/pjrt/pjrt_stream_executor_client.h"
#include "xla/pjrt/tracked_device_buffer.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/tsl/framework/device_id_utils.h"
#include "xla/tsl/framework/serving_device_selector_policies.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.h"
#include "tensorflow/core/common_runtime/gpu_device_context.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/tfrt/common/async_value_tensor.h"
#include "tensorflow/core/util/stream_executor_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
using xla::ScopedShapedBuffer;
using xla::ShapedBuffer;
se::Platform::Id XlaPlatformInfoFromDevice(DeviceBase* device_base) {
auto device = static_cast<Device*>(device_base);
se::Platform::Id platform_id = nullptr;
if (device->device_type() == DEVICE_CPU) {
platform_id = se::host::kHostPlatformId;
}
return platform_id;
}
absl::flat_hash_map<int, int> CreateVariableLookup(
const std::vector<VariableInfo>& variables) {
absl::flat_hash_map<int, int> variable_lookup;
for (int i = 0; i < variables.size(); i++) {
variable_lookup[variables[i].index()] = i;
}
return variable_lookup;
}
}
std::vector<const Tensor*> InputsFromContext(OpKernelContext* ctx) {
std::vector<const Tensor*> inputs;
inputs.reserve(ctx->num_inputs());
for (int input_idx = 0; input_idx < ctx->num_inputs(); input_idx++) {
inputs.push_back(&ctx->input(input_idx));
}
return inputs;
}
absl::StatusOr<std::vector<int>> GetConstantInputIndicesFromContext(
OpKernelContext* ctx) {
std::vector<int> constant_input_indices;
TF_RETURN_IF_ERROR(GetCompileTimeConstInputs(
&ctx->op_kernel(), &constant_input_indices, ctx->function_library()));
if (!absl::c_all_of(constant_input_indices, [&](int idx) {
return ctx->input_memory_type(idx) == HOST_MEMORY;
})) {
return errors::Internal("Unexpected device placement for a constant input");
}
return constant_input_indices;
}
XlaComputationLaunchContext::XlaComputationLaunchContext(
xla::LocalClient* client, se::DeviceMemoryAllocator* xla_allocator,
int device_ordinal, bool allocate_xla_tensors, bool use_multiple_streams)
: client_(client),
xla_allocator_(xla_allocator),
allocate_xla_tensors_(allocate_xla_tensors),
use_multiple_streams_(use_multiple_streams),
device_ordinal_(device_ordinal) {
if (use_multiple_streams_) {
CHECK(allocate_xla_tensors_) << "To use multiple streams correctly we must "
"be allocating XLA tensors!";
}
}
static void PopulateExecutionInputBuffer(xla::ExecutionInput& execution_input,
xla::ShapeIndex index,
se::DeviceMemoryBase buffer,
bool donate_buffer, int device_ordinal,
se::DeviceMemoryAllocator* allocator) {
xla::MaybeOwningDeviceMemory* in_buffer =
execution_input.MutableBuffer(index);
if (donate_buffer) {
*in_buffer = se::OwningDeviceMemory(buffer, device_ordinal, allocator);
} else {
*in_buffer = buffer;
}
}
absl::StatusOr<std::vector<xla::ExecutionInput>>
XlaComputationLaunchContext::PopulateInputs(
OpKernelContext* ctx,
const XlaCompiler::CompilationResult* compilation_result,
const std::map<int, const Tensor*>& resource_vars,
int missing_ctx_input_prefix,
const xla::HloInputOutputAliasConfig& input_output_alias) {
std::vector<xla::ExecutionInput> arguments;
arguments.reserve(compilation_result->xla_input_shapes.size());
for (int i = 0; i < compilation_result->xla_input_shapes.size(); ++i) {
int arg_num = compilation_result->input_mapping[i];
CHECK_GE(arg_num, missing_ctx_input_prefix);
const xla::Shape& device_shape = compilation_result->xla_input_shapes[i];
const xla::Shape& host_shape =
xla::ShapeUtil::DeviceShapeToHostShape(device_shape);
auto resource_var_it = resource_vars.find(arg_num);
bool is_resource_variable = resource_var_it != resource_vars.end();
bool is_updated_resource_variable =
is_resource_variable &&
absl::c_any_of(compilation_result->resource_updates,
[&](const XlaCompiler::ResourceUpdate& update) {
return update.input_index == arg_num &&
update.modified;
});
const Tensor* t = is_resource_variable
? resource_var_it->second
: &(ctx->input(arg_num - missing_ctx_input_prefix));
CHECK(t);
bool donate_buffer =
t->RefCountIsOne() && is_updated_resource_variable &&
input_output_alias.ParameterHasAlias(i, xla::ShapeIndex{});
VLOG(3) << "Processing input: " << i
<< "; is_resource_variable=" << is_resource_variable
<< "; is_updated_resource_variable=" << is_updated_resource_variable
<< "; donate_buffer=" << donate_buffer;
if (use_multiple_streams_) {
CHECK(ctx->op_device_context() && ctx->op_device_context()->stream())
<< "Must have a stream available when using XLA tensors!";
XlaTensor* xla_tensor = XlaTensor::FromTensor(t);
CHECK(xla_tensor);
xla_tensor->WaitForDefinitionEventOnStream(
ctx->op_device_context()->stream());
}
arguments.emplace_back(device_shape, host_shape);
xla::ExecutionInput& execution_input = arguments.back();
se::DeviceMemoryBase dmem = XlaTensor::DeviceMemoryFromTensor(*t);
PopulateExecutionInputBuffer(execution_input, xla::ShapeIndex{}, dmem,
donate_buffer, device_ordinal_,
xla_allocator_);
}
return std::move(arguments);
}
static Tensor MakeTensor(DataType dtype, const TensorShape& shape,
se::DeviceMemoryBase buffer, Allocator* allocator) {
size_t expected_size = shape.num_elements() * DataTypeSize(dtype);
auto* tensor_buffer = new XlaTensorBuffer(buffer.opaque(), expected_size,
buffer.size(), allocator);
Tensor t(dtype, shape, tensor_buffer);
tensor_buffer->Unref();
return t;
}
static absl::StatusOr<Tensor> GetOrCreateTensorForOutput(
xla::ScopedShapedBuffer& output, int output_num, OpKernelContext* ctx,
int missing_ctx_input_prefix,
const xla::HloInputOutputAliasConfig& input_output_alias,
absl::Span<const int> input_mapping,
const std::map<int, const Tensor*>& resource_vars_snapshots,
DataType output_dtype, const TensorShape& output_shape,
Allocator* output_allocator, bool allocate_xla_tensors, se::Stream* stream,
bool use_multiple_streams, std::shared_ptr<se::Event> definition_event) {
xla::ShapeIndex output_index = input_output_alias.shape().IsTuple()
? xla::ShapeIndex({output_num})
: xla::ShapeIndex({});
CHECK(input_output_alias.shape().IsTuple() || output_num == 0);
if (std::optional<xla::HloInputOutputAliasConfig::Alias> alias =
input_output_alias.GetAliasedParameter(output_index)) {
VLOG(3) << "Found alias: " << alias->ToString();
int tf_param =
input_mapping[alias->parameter_number] - missing_ctx_input_prefix;
const Tensor input_tensor =
ctx->input(tf_param).dtype() != DT_RESOURCE
? ctx->input(tf_param)
: *resource_vars_snapshots.at(missing_ctx_input_prefix + tf_param);
se::DeviceMemoryBase input_buffer =
XlaTensor::DeviceMemoryFromTensor(input_tensor);
se::DeviceMemoryBase output_buffer = output.buffer({output_num});
if (input_buffer.opaque() == output_buffer.opaque()) {
output.set_buffer(se::OwningDeviceMemory(), {output_num});
return input_tensor;
}
}
if (allocate_xla_tensors) {
Tensor output_tensor;
TF_RETURN_IF_ERROR(
ctx->allocate_temp(output_dtype, output_shape, &output_tensor));
if (output_tensor.TotalBytes() > 0) {
XlaTensor* xla_tensor = XlaTensor::FromTensor(&output_tensor);
TF_RET_CHECK(xla_tensor);
xla_tensor->set_shaped_buffer(output.TakeSubTree({output_num}));
if (use_multiple_streams) {
xla_tensor->ResetDefinitionEvent(definition_event, stream);
}
}
return output_tensor;
}
se::DeviceMemoryBase output_buffer = output.buffer({output_num});
Tensor output_tensor =
MakeTensor(output_dtype, output_shape, output_buffer, output_allocator);
output.set_buffer(se::OwningDeviceMemory(), {output_num});
return output_tensor;
}
Status SetOutputForConstant(
OpKernelContext* ctx, bool requires_copy_to_device,
const XlaCompiler::CompilationResult* compilation_result, int output_num) {
CHECK(compilation_result->outputs[output_num].is_constant);
const Tensor& const_tensor =
compilation_result->outputs[output_num].constant_value;
Tensor* output_tensor;
if (requires_copy_to_device && const_tensor.TotalBytes() > 0) {
VLOG(1) << "Constant output tensor on device";
TF_RETURN_IF_ERROR(
ctx->allocate_output(output_num, const_tensor.shape(), &output_tensor));
Device* device = dynamic_cast<Device*>(ctx->device());
if (device == nullptr) {
return errors::Internal("DeviceBase was not a Device.");
}
ctx->op_device_context()->CopyCPUTensorToDevice(
&const_tensor, device, output_tensor,
[&](Status status) { TF_CHECK_OK(status); });
if (device->device_type() == DEVICE_GPU) {
auto* gpu_device_context =
static_cast<GPUDeviceContext*>(ctx->op_device_context());
TF_RETURN_IF_ERROR(gpu_device_context->stream()->WaitFor(
gpu_device_context->host_to_device_stream()));
}
} else {
ctx->set_output(output_num, const_tensor);
output_tensor = ctx->mutable_output(output_num);
}
return absl::OkStatus();
}
static absl::StatusOr<Var*> GetOrCreateResourceVar(
OpKernelContext* ctx, const ResourceHandle& handle,
const XlaCompiler::ResourceUpdate& write) {
Var* variable = nullptr;
TF_RETURN_IF_ERROR(
LookupOrCreateResource<Var>(ctx, handle, &variable, [&write](Var** ptr) {
*ptr = new Var(write.type);
return absl::OkStatus();
}));
return variable;
}
absl::StatusOr<std::vector<VariableInfo>> GatherVariableInfo(
OpKernelContext* ctx,
const XlaCompiler::CompilationResult& compilation_result,
int missing_ctx_input_prefix) {
std::vector<VariableInfo> out;
out.reserve(compilation_result.resource_updates.size());
for (int i = 0; i < compilation_result.resource_updates.size(); ++i) {
const XlaCompiler::ResourceUpdate& write =
compilation_result.resource_updates[i];
int actual_input_index = write.input_index - missing_ctx_input_prefix;
if (actual_input_index < 0 || actual_input_index >= ctx->num_inputs()) {
return errors::Internal("Invalid input index for variable write.");
}
const ResourceHandle handle = HandleFromInput(ctx, actual_input_index);
TF_ASSIGN_OR_RETURN(Var * variable,
GetOrCreateResourceVar(ctx, handle, write));
out.emplace_back(actual_input_index, handle.name(), variable,
handle.definition_stack_trace());
}
return std::move(out);
}
Status XlaComputationLaunchContext::PopulateOutputs(
OpKernelContext* ctx,
const XlaCompiler::CompilationResult* compilation_result,
ScopedShapedBuffer output, int missing_ctx_input_prefix,
absl::Span<VariableInfo> variable_infos,
const xla::HloInputOutputAliasConfig& input_output_alias,
const std::map<int, const Tensor*>& resource_vars) {
se::Stream* stream =
ctx->op_device_context() ? ctx->op_device_context()->stream() : nullptr;
Allocator* allocator = ctx->device()->GetAllocator({});
VLOG(2) << "Result tuple shape: " << output.on_host_shape().DebugString();
VLOG(2) << "Result tuple shape (on device): "
<< output.on_device_shape().DebugString();
CHECK_EQ(ctx->num_outputs(), compilation_result->outputs.size());
if (!output.on_host_shape().IsTuple()) {
ShapedBuffer nontuple_buffer = output.release();
ShapedBuffer buffer(
xla::ShapeUtil::MakeTupleShape({nontuple_buffer.on_host_shape()}),
xla::ShapeUtil::MakeTupleShape({nontuple_buffer.on_device_shape()}),
output.device_ordinal());
buffer.buffers().CopySubtreeFrom(nontuple_buffer.buffers(),
{},
{0});
output = ScopedShapedBuffer(std::move(buffer), output.memory_allocator());
}
std::shared_ptr<se::Event> definition_event;
if (use_multiple_streams_ && stream) {
TF_ASSIGN_OR_RETURN(definition_event, stream->parent()->CreateEvent());
TF_RETURN_IF_ERROR(stream->RecordEvent(definition_event.get()));
}
for (const XlaOutputDescription& descr : compilation_result->outputs) {
if (descr.type == DT_VARIANT) {
return errors::Unimplemented(
"Support for TensorList crossing the XLA/TF boundary "
"is not implemented");
}
}
std::vector<TensorShape> output_tensor_shapes;
output_tensor_shapes.reserve(ctx->num_outputs());
if (output.on_host_shape().is_dynamic()) {
const se::Platform* platform = nullptr;
if (stream != nullptr) {
platform = stream->parent()->GetPlatform();
} else {
TF_ASSIGN_OR_RETURN(platform,
se::PlatformManager::PlatformWithId(
XlaPlatformInfoFromDevice(ctx->device())));
}
TF_ASSIGN_OR_RETURN(auto transfer_manager,
xla::TransferManager::GetForPlatform(platform));
xla::Shape output_device_shape = output.on_device_shape();
TF_RETURN_IF_ERROR(transfer_manager->ReadDynamicShapes(
stream, &output, &output_device_shape));
output.set_shapes(output_device_shape, output_device_shape);
for (int i = 0; i < ctx->num_outputs(); ++i) {
const xla::Shape& subshape =
xla::ShapeUtil::GetSubshape(output_device_shape, {i});
TensorShape shape;
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(subshape, &shape));
output_tensor_shapes.push_back(shape);
}
} else {
for (int i = 0; i < ctx->num_outputs(); ++i) {
output_tensor_shapes.push_back(compilation_result->outputs[i].shape);
}
}
int output_num = 0;
for (int i = 0, end = ctx->num_outputs(); i < end; ++i) {
const TensorShape& shape = output_tensor_shapes[i];
const DataType& type = compilation_result->outputs[i].type;
VLOG(2) << "Populating output for retval " << i << " shape "
<< shape.DebugString() << " type " << DataTypeString(type);
if (compilation_result->outputs[i].is_constant) {
TF_RETURN_IF_ERROR(SetOutputForConstant(
ctx, stream != nullptr,
compilation_result, i));
} else if (type == DT_RESOURCE) {
int input_index =
compilation_result->outputs[i].input_index - missing_ctx_input_prefix;
TF_RET_CHECK(input_index >= 0 && input_index < ctx->num_inputs())
<< "Invalid input for outputs " << i << ": " << input_index;
ctx->set_output(i, ctx->input(input_index));
} else {
TF_ASSIGN_OR_RETURN(
Tensor output_tensor,
GetOrCreateTensorForOutput(
output, output_num, ctx, missing_ctx_input_prefix,
input_output_alias, compilation_result->input_mapping,
resource_vars, ctx->expected_output_dtype(i), shape, allocator,
allocate_xla_tensors_, stream, use_multiple_streams_,
definition_event));
ctx->set_output(i, output_tensor);
++output_num;
}
}
absl::flat_hash_map<int, int> variable_info_lookup;
for (int i = 0; i < variable_infos.size(); i++) {
variable_info_lookup.emplace(variable_infos[i].index(), i);
}
for (int i = 0, end = compilation_result->resource_updates.size(); i < end;
++i) {
const XlaCompiler::ResourceUpdate& write =
compilation_result->resource_updates[i];
int actual_input_index = write.input_index - missing_ctx_input_prefix;
CHECK_GE(actual_input_index, 0);
CHECK_LT(actual_input_index, ctx->num_inputs());
Var* var = variable_infos[variable_info_lookup[actual_input_index]].var();
CHECK(var);
VLOG(2) << "Updating variable #" << i
<< " at input index: " << actual_input_index << " with shape "
<< write.shape.DebugString() << "; variable tensor has shape: "
<< var->tensor()->shape().DebugString();
if (var->is_initialized && var->tensor()->dtype() != write.type) {
return errors::Internal("Mismatched type in variable write");
}
TF_ASSIGN_OR_RETURN(
Tensor output_tensor,
GetOrCreateTensorForOutput(output, output_num, ctx,
missing_ctx_input_prefix, input_output_alias,
compilation_result->input_mapping,
resource_vars, write.type, write.shape,
allocator, allocate_xla_tensors_, stream,
use_multiple_streams_, definition_event));
var->is_initialized |= write.modified;
*var->tensor() = output_tensor;
++output_num;
}
return absl::OkStatus();
}
absl::StatusOr<std::vector<XlaCompiler::Argument>>
XlaComputationLaunchContext::BuildXlaCompilerArguments(
absl::Span<int const> must_be_constant_idxs,
absl::Span<const Tensor* const> inputs,
absl::Span<VariableInfo const> variable_args, Device* device) {
if (!must_be_constant_idxs.empty() &&
!absl::c_is_sorted(must_be_constant_idxs)) {
return absl::InvalidArgumentError("must_be_constant_idxs is not sorted");
}
VLOG(2) << "Must be const args: {"
<< absl::StrJoin(must_be_constant_idxs, ",") << "} out of "
<< inputs.size() << " args";
std::vector<XlaCompiler::Argument> out;
out.resize(inputs.size());
DeviceContext* device_context = nullptr;
if (device != nullptr) {
TF_RETURN_IF_ERROR(device->TryGetDeviceContext(&device_context));
bool using_default_context = false;
auto cleanup = absl::MakeCleanup([&] {
if (device_context != nullptr && !using_default_context) {
device_context->Unref();
}
});
if (device_context == nullptr) {
using_default_context = true;
auto* dev_info = device->tensorflow_accelerator_device_info();
if (dev_info) device_context = dev_info->default_context;
}
}
absl::flat_hash_map<int, const VariableInfo*> variable_info_lookup;
TF_CHECK_OK(CreateVariableInfoLookup(variable_args, variable_info_lookup));
for (int64_t input_num = 0; input_num < inputs.size(); ++input_num) {
const Tensor* input = inputs[input_num];
XlaCompiler::Argument& arg = out[input_num];
if (variable_info_lookup.count(input_num) && device != nullptr) {
TF_RET_CHECK(input->dtype() == DT_RESOURCE);
const VariableInfo& variable = *variable_info_lookup[input_num];
arg.name = std::string(variable.name());
arg.kind = XlaCompiler::Argument::kResource;
arg.resource_kind = XlaResource::kVariable;
arg.definition_stack_trace = variable.definition_stack_trace();
if (variable.var() && variable.var()->is_initialized) {
const Tensor* value = variable.var()->tensor();
arg.type = value->dtype();
arg.shape = value->shape();
arg.initialized = true;
} else {
arg.initialized = false;
arg.type = DT_INVALID;
arg.shape = TensorShape();
}
if (absl::c_binary_search(must_be_constant_idxs, input_num)) {
TF_RET_CHECK(variable.var() && variable.var()->is_initialized);
const Tensor* value = variable.var()->tensor();
Tensor value_on_host(value->dtype(), value->shape());
if (!device_context) {
value_on_host = *value;
} else {
TF_RETURN_IF_ERROR(device_context->CopyDeviceTensorToCPUSync(
value, "", device, &value_on_host));
}
arg.kind = XlaCompiler::Argument::kConstantResource;
arg.constant_value = value_on_host;
}
} else if (absl::c_binary_search(must_be_constant_idxs, input_num)) {
arg.kind = XlaCompiler::Argument::kConstant;
arg.type = input->dtype();
arg.shape = input->shape();
arg.constant_value = *input;
} else {
TF_RET_CHECK(input->dtype() != DT_RESOURCE);
if (input->NumElements() > 0) {
arg.kind = XlaCompiler::Argument::kParameter;
} else {
arg.kind = XlaCompiler::Argument::kConstant;
arg.constant_value = *input;
}
arg.type = input->dtype();
arg.shape = input->shape();
}
}
return out;
}
Status PreparePjRtExecutableArguments(
int num_missing_prefix_ctx_inputs, const std::vector<int>& input_mapping,
const std::vector<const Tensor*>& inputs,
const absl::flat_hash_map<int, const Tensor*>& variable_snapshots,
xla::PjRtClient* pjrt_client, xla::PjRtDevice* pjrt_device,
bool use_pjrt_tensor_buffer, std::vector<xla::PjRtBuffer*>* args,
std::vector<std::unique_ptr<xla::PjRtBuffer>>* owned_args,
absl::flat_hash_set<int>* non_donatable_input_indices) {
for (auto arg_num : input_mapping) {
const Tensor* tensor;
if (auto it = variable_snapshots.find(arg_num);
it != variable_snapshots.end()) {
tensor = it->second;
} else {
tensor = inputs[arg_num - num_missing_prefix_ctx_inputs];
}
AsyncValueTensor* av_tensor = AsyncValueTensor::FromTensor(tensor);
if (use_pjrt_tensor_buffer) {
if (av_tensor != nullptr) {
return absl::InvalidArgumentError(
"If use_pjrt_tensor_buffer is set, the input tensor should not "
"contain an AsyncValueTensor.");
}
const PjRtTensorBuffer* pjrt_tensor_buffer =
dynamic_cast<const PjRtTensorBuffer*>(DMAHelper::buffer(tensor));
if (pjrt_tensor_buffer != nullptr) {
args->push_back(pjrt_tensor_buffer->pjrt_buffer());
} else {
auto dmem = se::DeviceMemoryBase(
const_cast<char*>(tensor->tensor_data().data()),
tensor->tensor_data().size());
absl::Span<const std::shared_ptr<xla::BufferSequencingEvent>>
definition_events;
auto device_buffer = std::make_shared<xla::TrackedDeviceBuffer>(
nullptr, pjrt_device,
std::initializer_list<se::DeviceMemoryBase>{dmem},
definition_events, []() {});
xla::Shape device_shape;
TF_RETURN_IF_ERROR(TensorShapeToXLAShape(
tensor->dtype(), tensor->shape(), &device_shape));
std::unique_ptr<xla::PjRtBuffer> pjrt_buffer =
std::make_unique<xla::PjRtStreamExecutorBuffer>(
device_shape, std::move(device_buffer), pjrt_client,
pjrt_device,
pjrt_device->default_memory_space().value_or(nullptr));
owned_args->push_back(std::move(pjrt_buffer));
args->push_back(owned_args->back().get());
}
} else {
if (av_tensor->GetBuffer() == nullptr) {
CHECK_EQ(tensor->NumElements(), 0);
continue;
}
args->push_back(av_tensor->GetBuffer().get());
}
if (!tensor->RefCountIsOne()) {
non_donatable_input_indices->insert(args->size() - 1);
}
}
return absl::OkStatus();
}
Status PopulateCtxOutputsFromPjRtExecutableOutputs(
int num_missing_prefix_ctx_inputs, const std::vector<const Tensor*>& inputs,
const std::vector<VariableInfo>& variables,
const XlaCompiler::CompilationResult& compilation_result,
const bool use_pjrt_tensor_buffer,
std::vector<std::unique_ptr<xla::PjRtBuffer>>& executable_outputs,
OpKernelContext* ctx) {
int output_num = 0;
for (int i = 0, end = ctx->num_outputs(); i < end; ++i) {
const DataType& type = compilation_result.outputs[i].type;
VLOG(2) << "Populating output for retval " << i << " type "
<< DataTypeString(type);
if (type == DT_VARIANT) {
return absl::UnimplementedError(
"Support for TensorList crossing the XLA/TF boundary "
"is not implemented");
}
if (compilation_result.outputs[i].is_constant) {
bool requires_copy_to_device = GetDeviceType(ctx) != DEVICE_CPU;
TF_RETURN_IF_ERROR(SetOutputForConstant(ctx, requires_copy_to_device,
&compilation_result, i));
} else if (type == DT_RESOURCE) {
int input_index = compilation_result.outputs[i].input_index -
num_missing_prefix_ctx_inputs;
TF_RET_CHECK(input_index >= 0 && input_index < ctx->num_inputs())
<< "Invalid input for outputs " << i << ": " << input_index;
ctx->set_output(i, *inputs[input_index]);
} else {
xla::PjRtBuffer* output_buffer = executable_outputs[output_num].get();
if (output_buffer->IsTuple()) {
return absl::InvalidArgumentError(
"Tuple PJRT buffer output is not supported.");
}
absl::Span<const int64_t> dims;
std::optional<std::vector<int64_t>> logical_dims_storage;
if (output_buffer->has_dynamic_dimensions()) {
TF_ASSIGN_OR_RETURN(std::vector<int64_t> logical_dims,
output_buffer->logical_dimensions());
logical_dims_storage.emplace(std::move(logical_dims));
dims = *logical_dims_storage;
} else {
dims = output_buffer->dimensions();
}
TensorShape tensor_shape;
for (int i = 0; i < dims.size(); ++i) {
TF_RETURN_IF_ERROR(tensor_shape.AddDimWithStatus(dims[i]));
}
if (use_pjrt_tensor_buffer) {
TF_ASSIGN_OR_RETURN(
Tensor output_tensor,
MakeTensorFromPjRtBuffer(
type, tensor_shape, std::move(executable_outputs[output_num])));
ctx->set_output(i, output_tensor);
} else {
Tensor* output_tensor;
TF_RETURN_IF_ERROR(
ctx->allocate_output(i, tensor_shape, &output_tensor));
auto output_avt = AsyncValueTensor::FromTensor(output_tensor);
output_avt->SetBuffer(std::move(executable_outputs[output_num]));
}
++output_num;
}
}
const auto& variable_lookup = CreateVariableLookup(variables);
for (int i = 0; i < compilation_result.resource_updates.size(); ++i) {
const XlaCompiler::ResourceUpdate& write =
compilation_result.resource_updates[i];
int actual_input_index = write.input_index - num_missing_prefix_ctx_inputs;
CHECK_GE(actual_input_index, 0);
CHECK_LT(actual_input_index, ctx->num_inputs());
auto it = variable_lookup.find(actual_input_index);
if (it == variable_lookup.end()) {
continue;
}
Var* var = variables[it->second].var();
CHECK(var);
VLOG(2) << "Updating variable #" << i
<< " at input index: " << actual_input_index << " with shape "
<< write.shape.DebugString() << "; variable tensor has shape: "
<< var->tensor()->shape().DebugString();
if (var->is_initialized && var->tensor()->dtype() != write.type) {
return errors::Internal("Mismatched type in variable write");
}
if (use_pjrt_tensor_buffer) {
TF_RETURN_IF_ERROR(PjRtTensorBufferUtil::UpdateOrMakeTensorWithPjRtBuffer(
write.type, write.shape, std::move(executable_outputs[output_num]),
var->tensor()));
} else {
TF_RETURN_IF_ERROR(
ctx->allocate_temp(write.type, write.shape, var->tensor()));
AsyncValueTensor::FromTensor(var->tensor())
->SetBuffer(std::move(executable_outputs[output_num]));
}
var->is_initialized |= write.modified;
++output_num;
}
return absl::OkStatus();
}
xla::ExecuteOptions GetPjRtExecuteOptions(
const DeviceType& device_type,
absl::flat_hash_set<int> non_donatable_input_indices) {
xla::ExecuteOptions options;
options.arguments_are_tupled = false;
options.untuple_result = true;
options.launch_id = 1;
if (device_type == DEVICE_GPU) {
options.strict_shape_checking = false;
}
options.use_major_to_minor_data_layout_for_callbacks = true;
options.non_donatable_input_indices = std::move(non_donatable_input_indices);
return options;
}
DeviceType GetDeviceType(OpKernelContext* ctx) {
auto* device =
tensorflow::down_cast<Device*>(ctx->device()->UnderlyingDevice());
return DeviceType(device->device_type());
}
Status RunPjRtExecutable(
const std::vector<const Tensor*>& inputs,
const std::vector<VariableInfo>& variables,
const XlaCompiler::CompilationResult& compilation_result,
xla::PjRtClient* pjrt_client, xla::PjRtLoadedExecutable* executable,
OpKernelContext* ctx) {
absl::flat_hash_map<int, const Tensor*> variable_snapshots;
for (int i = 0; i < variables.size(); i++) {
variable_snapshots[variables[i].index()] = variables[i].var()->tensor();
}
return RunPjRtExecutable(0, inputs,
variable_snapshots, variables, compilation_result,
pjrt_client, executable, ctx);
}
Status RunPjRtExecutable(
int num_missing_prefix_ctx_inputs, const std::vector<const Tensor*>& inputs,
const absl::flat_hash_map<int, const Tensor*>& variable_snapshots,
const std::vector<VariableInfo>& updated_variables,
const XlaCompiler::CompilationResult& compilation_result,
xla::PjRtClient* pjrt_client, xla::PjRtLoadedExecutable* executable,
OpKernelContext* ctx) {
const bool use_pjrt_tensor_buffer = ctx->device()
->tensorflow_accelerator_device_info()
->use_pjrt_tensor_buffer;
const DeviceType& device_type = GetDeviceType(ctx);
const int pjrt_device_id =
tsl::GetDeviceIdFromDeviceParsedName(ctx->device()->parsed_name());
TF_ASSIGN_OR_RETURN(xla::PjRtDevice * device,
pjrt_client->LookupAddressableDevice(
xla::PjRtLocalDeviceId(pjrt_device_id)));
gpu::GpuServingDeviceSelectorResource* device_selector_resource = nullptr;
if (device_type == DEVICE_GPU) {
auto rm = ctx->resource_manager();
TF_RETURN_IF_ERROR(rm->LookupOrCreate<
gpu::GpuServingDeviceSelectorResource>(
rm->default_container(), gpu::kGpuServingDeviceSelectorResourceName,
&device_selector_resource,
[&](gpu::GpuServingDeviceSelectorResource** device_selector_resource) {
*device_selector_resource = new gpu::GpuServingDeviceSelectorResource(
pjrt_client->addressable_device_count(),
std::make_unique<tsl::RoundRobinPolicy>());
return absl::OkStatus();
}));
core::ScopedUnref device_selector_resource_ref(device_selector_resource);
TF_ASSIGN_OR_RETURN(absl::string_view fingerprint,
executable->FingerprintExecutable());
device_selector_resource->selector()->Enqueue(pjrt_device_id, fingerprint);
}
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<xla::PjRtBuffer>> execute_outputs,
RunPjRtExecutable(num_missing_prefix_ctx_inputs, inputs,
variable_snapshots, updated_variables, device_type,
use_pjrt_tensor_buffer, compilation_result, device,
pjrt_client, executable));
if (device_selector_resource != nullptr) {
device_selector_resource->selector()->Completed(pjrt_device_id,
false);
}
TF_RETURN_IF_ERROR(PopulateCtxOutputsFromPjRtExecutableOutputs(
num_missing_prefix_ctx_inputs, inputs, updated_variables,
compilation_result, use_pjrt_tensor_buffer, execute_outputs, ctx));
return absl::OkStatus();
}
absl::StatusOr<std::vector<std::unique_ptr<xla::PjRtBuffer>>> RunPjRtExecutable(
int num_missing_prefix_ctx_inputs, const std::vector<const Tensor*>& inputs,
const absl::flat_hash_map<int, const Tensor*>& variable_snapshots,
const std::vector<VariableInfo>& updated_variables,
const DeviceType& device_type, bool use_pjrt_tensor_buffer,
const XlaCompiler::CompilationResult& compilation_result,
xla::PjRtDevice* device, xla::PjRtClient* pjrt_client,
xla::PjRtLoadedExecutable* executable) {
std::vector<xla::PjRtBuffer*> executable_args;
executable_args.reserve(compilation_result.input_mapping.size());
std::vector<std::unique_ptr<xla::PjRtBuffer>> owned_executable_args;
absl::flat_hash_set<int> non_donatable_input_indices;
TF_RETURN_IF_ERROR(PreparePjRtExecutableArguments(
num_missing_prefix_ctx_inputs, compilation_result.input_mapping, inputs,
variable_snapshots, pjrt_client, device, use_pjrt_tensor_buffer,
&executable_args, &owned_executable_args, &non_donatable_input_indices));
std::vector<std::unique_ptr<xla::PjRtBuffer>> execute_outputs;
std::optional<xla::PjRtFuture<>> future;
if (executable->num_replicas() != 1 || executable->num_partitions() != 1) {
TF_ASSIGN_OR_RETURN(
execute_outputs,
executable->ExecuteSharded(
executable_args, device,
GetPjRtExecuteOptions(device_type,
std::move(non_donatable_input_indices)),
future));
} else {
TF_ASSIGN_OR_RETURN(
execute_outputs,
executable->ExecutePortable(
executable_args, device,
GetPjRtExecuteOptions(device_type,
std::move(non_donatable_input_indices)),
future));
}
if (!owned_executable_args.empty() && future.has_value()) {
future->OnReady([owned_executable_args =
std::move(owned_executable_args)](Status s) {});
}
return execute_outputs;
}
} | #include "tensorflow/compiler/jit/xla_launch_util.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/compiler/jit/device_compiler.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/pjrt_device_compiler_client.h"
#include "tensorflow/compiler/jit/variable_info.h"
#include "tensorflow/compiler/jit/variable_info_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/tfrt_cpu_pjrt_client.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tsl/framework/allocator.h"
#include "xla/tsl/framework/device_id_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/tfrt/common/create_pjrt_client_util.h"
#include "tensorflow/core/tfrt/common/pjrt_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
using PjRtDeviceCompiler =
DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>;
using PjRtDeviceExecutablePersistor =
DeviceExecutablePersistor<xla::PjRtLoadedExecutable, xla::PjRtClient>;
absl::flat_hash_map<int, const Tensor*> GetVariableSnapshots(
const std::vector<VariableInfo>& variables) {
absl::flat_hash_map<int, const Tensor*> variable_snapshots;
for (int i = 0; i < variables.size(); i++) {
variable_snapshots[variables[i].index()] = variables[i].var()->tensor();
}
return variable_snapshots;
}
class PjRtExecutionUtilTest : public OpsTestBase {
public:
PjRtExecutionUtilTest() {
auto& rollout_config = GetXlaOpsCommonFlags()->tf_xla_use_device_api;
rollout_config.enabled_for_xla_launch_ = true;
rollout_config.enabled_for_compile_on_demand_ = true;
GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true;
auto device_type = DeviceType(DEVICE_XLA_CPU);
rollout_config.AllowForDeviceInXlaLaunch(device_type);
rollout_config.AllowForDeviceInXlaCompileOnDemand(device_type);
auto jit_device_type = DeviceType(DEVICE_CPU_XLA_JIT);
auto device =
DeviceFactory::NewDevice(device_type.type_string(), SessionOptions(),
"/job:localhost/replica:0/task:0");
device_ = device.get();
SetDevice(device_type, std::move(device));
TF_CHECK_OK(SetPjRtClientInTFGlobalResourceManager(
device_type,
xla::GetTfrtCpuClient(true, 1)
.value()));
TF_CHECK_OK(device_->TryGetDeviceContext(&device_context_));
AllocatorAttributes host_alloc_attr;
host_alloc_attr.set_on_host(true);
host_allocator_ = device_->GetAllocator(host_alloc_attr);
AllocatorAttributes device_alloc_attr;
device_alloc_attr.set_on_host(false);
device_allocator_ = device_->GetAllocator(device_alloc_attr);
auto pjrt_client_or = GetOrCreatePjRtClient(device_type_);
TF_CHECK_OK(pjrt_client_or.status());
pjrt_client_ = pjrt_client_or.value();
device_compiler_ = new PjRtDeviceCompiler(
std::make_unique<PjRtDeviceExecutablePersistor>(
PjRtDeviceExecutablePersistor::Config(), jit_device_type),
std::make_unique<PjRtDeviceCompilerClient>(pjrt_client_));
profiler_ = new DeviceCompilationProfiler();
compiler_options_.device_type = jit_device_type;
compiler_options_.client = nullptr;
compiler_options_.flib_def = flib_def_.get();
}
~PjRtExecutionUtilTest() override {
for (const auto& tensor : tensors_) {
delete tensor;
}
tensors_.clear();
device_context_->Unref();
core::ScopedUnref device_compiler_ref(device_compiler_);
core::ScopedUnref profiler_ref(profiler_);
}
template <typename T>
Tensor* CreateHostTensor(const TensorShape& shape,
const gtl::ArraySlice<T> data) {
Tensor* host_tensor =
new Tensor(host_allocator_, DataTypeToEnum<T>::v(), shape);
test::FillValues<T>(host_tensor, data);
tensors_.push_back(host_tensor);
return host_tensor;
}
template <typename T>
Tensor* CreateDeviceTensor(const TensorShape& shape,
const gtl::ArraySlice<T> data) {
Tensor* host_tensor = CreateHostTensor<T>(shape, data);
Tensor* device_tensor =
new Tensor(device_allocator_, DataTypeToEnum<T>::v(), shape);
TF_EXPECT_OK(device_context_->CopyCPUTensorToDeviceSync(
host_tensor, device_, device_tensor));
tensors_.push_back(device_tensor);
return device_tensor;
}
Tensor* GetOutput(int output_index) {
CHECK_LT(output_index, context_->num_outputs());
Tensor* device_tensor = context_->mutable_output(output_index);
managed_outputs_.resize(context_->num_outputs());
if (managed_outputs_[output_index]) {
return managed_outputs_[output_index];
}
Tensor* host_tensor = new Tensor(host_allocator_, device_tensor->dtype(),
device_tensor->shape());
TF_EXPECT_OK(device_context_->CopyDeviceTensorToCPUSync(
device_tensor, "", device_, host_tensor));
managed_outputs_[output_index] = host_tensor;
return host_tensor;
}
void CompileToExecutable(const std::vector<XlaCompiler::Argument>& args,
const XlaCompiler::CompilationResult** result,
xla::PjRtLoadedExecutable** executable,
XlaCompiler::CompileOptions compile_options = {}) {
TF_EXPECT_OK(device_compiler_->CompileSingleOpIfNeeded(
compiler_options_, args, compile_options, context_.get(), profiler_,
result, executable));
}
absl::StatusOr<std::vector<std::unique_ptr<xla::PjRtBuffer>>> RunExecutable(
const std::vector<const Tensor*>& inputs,
const std::vector<VariableInfo>& variables,
const XlaCompiler::CompilationResult* result,
xla::PjRtLoadedExecutable* executable) {
TF_ASSIGN_OR_RETURN(auto pjrt_device,
pjrt_client_->LookupAddressableDevice(
xla::PjRtLocalDeviceId(device_->parsed_name().id)));
std::vector<xla::PjRtBuffer*> executable_args;
executable_args.reserve(result->input_mapping.size());
absl::flat_hash_set<int> non_donatable_input_indices;
TF_EXPECT_OK(PreparePjRtExecutableArguments(
0, result->input_mapping, inputs,
GetVariableSnapshots(variables), nullptr,
nullptr, false,
&executable_args, {}, &non_donatable_input_indices));
xla::ExecuteOptions exe_options;
exe_options.arguments_are_tupled = false;
exe_options.untuple_result = true;
return executable->ExecutePortable(executable_args, pjrt_device,
exe_options);
}
template <typename T>
Var* CreateVariable(const string& name, const TensorShape& shape,
const gtl::ArraySlice<T> data) {
Tensor* init_var_value = CreateDeviceTensor<T>(shape, data);
Var* var = new Var(DataTypeToEnum<T>::v());
*var->tensor() = *init_var_value;
var->is_initialized = true;
return var;
}
template <typename T>
void AddVariableInput(const string& name, const TensorShape& shape,
const gtl::ArraySlice<T> data) {
Var* var = CreateVariable<T>(name, shape, data);
ResourceMgr* rm = device_->resource_manager();
TF_ASSERT_OK(rm->Create(rm->default_container(), name, var));
ResourceHandle handle;
handle.set_device(device_->name());
handle.set_container(rm->default_container());
handle.set_name(name);
TypeIndex type_index = TypeIndex::Make<Var>();
handle.set_hash_code(type_index.hash_code());
handle.set_maybe_type_name(type_index.name());
Tensor* input = new Tensor(host_allocator_, DT_RESOURCE, TensorShape({}));
input->scalar<ResourceHandle>()() = handle;
tensors_.push_back(input);
inputs_.push_back({nullptr, input});
}
protected:
DeviceContext* device_context_;
Allocator* host_allocator_;
Allocator* device_allocator_;
XlaCompiler::Options compiler_options_;
xla::PjRtClient* pjrt_client_;
PjRtDeviceCompiler* device_compiler_;
DeviceCompilationProfiler* profiler_;
};
TEST_F(PjRtExecutionUtilTest, PreparePjRtExecutableArguments) {
std::vector<const Tensor*> inputs;
inputs.push_back(CreateDeviceTensor<int32_t>(TensorShape({1, 3}), {0, 0, 0}));
inputs.push_back(CreateDeviceTensor<int32_t>(TensorShape({1, 3}), {1, 2, 3}));
inputs.push_back(CreateDeviceTensor<int32_t>(TensorShape({1, 3}), {4, 5, 6}));
int num_missing_prefix_ctx_inputs = 2;
std::vector<int> input_mapping{3, 4};
std::vector<VariableInfo> variables;
std::vector<xla::PjRtBuffer*> exec_args;
exec_args.reserve(input_mapping.size());
absl::flat_hash_set<int> non_donatable_input_indices;
TF_EXPECT_OK(PreparePjRtExecutableArguments(
num_missing_prefix_ctx_inputs, input_mapping, inputs,
GetVariableSnapshots(variables),
nullptr, nullptr,
false, &exec_args,
{}, &non_donatable_input_indices));
EXPECT_EQ(exec_args.size(), 2);
std::shared_ptr<xla::Literal> literal1 = *exec_args[0]->ToLiteralSync();
EXPECT_TRUE(xla::LiteralTestUtil::Equal(
*literal1, xla::LiteralUtil::CreateR2<int32_t>({{1, 2, 3}})));
std::shared_ptr<xla::Literal> literal2 = *exec_args[1]->ToLiteralSync();
EXPECT_TRUE(xla::LiteralTestUtil::Equal(
*literal2, xla::LiteralUtil::CreateR2<int32_t>({{4, 5, 6}})));
}
TEST_F(PjRtExecutionUtilTest, PreparePjRtExecutableArgumentsVariableInputs) {
std::vector<VariableInfo> variables;
Var* var1 = CreateVariable<int32>("v1", TensorShape({1, 2}), {1, 2});
variables.emplace_back(3, "v1", var1);
Var* var2 = CreateVariable<int32>("v2", TensorShape({1, 2}), {3, 4});
variables.emplace_back(4, "v2", var2);
std::vector<const Tensor*> inputs;
inputs.push_back(CreateDeviceTensor<int32_t>(TensorShape({1, 3}), {0, 0, 0}));
int num_missing_prefix_ctx_inputs = 2;
std::vector<int> input_mapping{3, 4};
std::vector<xla::PjRtBuffer*> exec_args;
exec_args.reserve(input_mapping.size());
absl::flat_hash_set<int> non_donatable_input_indices;
TF_EXPECT_OK(PreparePjRtExecutableArguments(
num_missing_prefix_ctx_inputs, input_mapping, inputs,
GetVariableSnapshots(variables),
nullptr, nullptr,
false, &exec_args,
{}, &non_donatable_input_indices));
EXPECT_EQ(exec_args.size(), 2);
std::shared_ptr<xla::Literal> literal1 = *exec_args[0]->ToLiteralSync();
EXPECT_TRUE(xla::LiteralTestUtil::Equal(
*literal1, xla::LiteralUtil::CreateR2<int32_t>({{1, 2}})));
std::shared_ptr<xla::Literal> literal2 = *exec_args[1]->ToLiteralSync();
EXPECT_TRUE(xla::LiteralTestUtil::Equal(
*literal2, xla::LiteralUtil::CreateR2<int32_t>({{3, 4}})));
}
TEST_F(PjRtExecutionUtilTest, PopulateCtxOutputs) {
XlaOpRegistry::RegisterCompilationKernels();
TF_EXPECT_OK(NodeDefBuilder("AddV2", "AddV2")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_INT32)
.Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
Tensor* a = CreateDeviceTensor<int32>(TensorShape({1, 3}), {1, 2, 3});
Tensor* b = CreateDeviceTensor<int32>(TensorShape({1, 3}), {4, 5, 6});
inputs_.push_back({nullptr, a});
inputs_.push_back({nullptr, b});
CreateContext();
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({1, 3});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].type = DT_INT32;
args[1].shape = TensorShape({1, 3});
const XlaCompiler::CompilationResult* result;
xla::PjRtLoadedExecutable* executable;
CompileToExecutable(args, &result, &executable);
std::vector<const Tensor*> inputs;
inputs.push_back(a);
inputs.push_back(b);
TF_ASSERT_OK_AND_ASSIGN(auto execute_outputs,
RunExecutable(inputs, {}, result, executable));
TF_EXPECT_OK(PopulateCtxOutputsFromPjRtExecutableOutputs(
0, inputs, {}, *result,
false, execute_outputs, context_.get()));
Tensor* expected = CreateHostTensor<int32>(TensorShape({1, 3}), {5, 7, 9});
test::ExpectTensorEqual<int32>(*expected, *GetOutput(0));
}
TEST_F(PjRtExecutionUtilTest, PopulateCtxOutputsDynamicShape) {
XlaOpRegistry::RegisterCompilationKernels();
TF_EXPECT_OK(NodeDefBuilder("testWhere", "Where")
.Input(FakeInput(DT_FLOAT))
.Attr("T", DT_FLOAT)
.Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
Tensor* a =
CreateDeviceTensor<float>(TensorShape({2, 3}), {0., 1., 1., 0., 0., 0.});
inputs_.push_back({nullptr, a});
CreateContext();
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_FLOAT;
args[0].shape = TensorShape({2, 3});
const XlaCompiler::CompilationResult* result;
xla::PjRtLoadedExecutable* executable;
CompileToExecutable(args, &result, &executable);
std::vector<const Tensor*> inputs;
inputs.push_back(a);
TF_ASSERT_OK_AND_ASSIGN(auto execute_outputs,
RunExecutable(inputs, {}, result, executable));
TF_EXPECT_OK(PopulateCtxOutputsFromPjRtExecutableOutputs(
0, inputs, {}, *result,
false, execute_outputs, context_.get()));
Tensor* expected = CreateHostTensor<int64>(TensorShape({2, 2}), {0, 1, 0, 2});
test::ExpectTensorEqual<int64>(*expected, *GetOutput(0));
}
TEST_F(PjRtExecutionUtilTest, PopulateCtxOutputsVariableInputs) {
XlaOpRegistry::RegisterCompilationKernels();
TF_EXPECT_OK(NodeDefBuilder("AddV2", "AddV2")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_INT32)
.Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddVariableInput<int32>("var1", TensorShape({1, 2}), {1, 2});
AddVariableInput<int32>("var2", TensorShape({1, 2}), {3, 4});
CreateContext();
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].initialized = true;
args[0].type = DT_INT32;
args[0].shape = TensorShape({1, 2});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].initialized = true;
args[1].type = DT_INT32;
args[1].shape = TensorShape({1, 2});
const XlaCompiler::CompilationResult* result;
xla::PjRtLoadedExecutable* executable;
CompileToExecutable(args, &result, &executable);
std::vector<const Tensor*> inputs = InputsFromContext(context_.get());
std::vector<int> variables_indices =
GetResourceVariableIndicesFromContext(context_.get());
std::vector<VariableInfo> variables;
variables.reserve(variables_indices.size());
TF_ASSERT_OK(GetVariableInfosFromInputs(context_->resource_manager(),
context_->device(), inputs,
variables_indices, &variables));
TF_ASSERT_OK_AND_ASSIGN(auto execute_outputs,
RunExecutable(inputs, variables, result, executable));
TF_EXPECT_OK(PopulateCtxOutputsFromPjRtExecutableOutputs(
0, inputs, variables, *result,
false, execute_outputs, context_.get()));
Tensor* expected = CreateHostTensor<int32>(TensorShape({1, 2}), {4, 6});
test::ExpectTensorEqual<int32>(*expected, *GetOutput(0));
}
TEST_F(PjRtExecutionUtilTest, PopulateCtxOutputsResourceUpdates) {
XlaOpRegistry::RegisterCompilationKernels();
TF_EXPECT_OK(NodeDefBuilder("AssignAddVariableOp", "AssignAddVariableOp")
.Input(FakeInput(DT_RESOURCE))
.Input(FakeInput(DT_INT32))
.Attr("dtype", DT_INT32)
.Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddVariableInput<int32>("var", TensorShape({1, 3}), {1, 2, 3});
Tensor* a = CreateDeviceTensor<int32>(TensorShape({1, 3}), {2, 2, 2});
inputs_.push_back({nullptr, a});
CreateContext();
std::vector<const Tensor*> inputs = InputsFromContext(context_.get());
std::vector<int> variables_indices =
GetResourceVariableIndicesFromContext(context_.get());
std::vector<VariableInfo> variables;
variables.reserve(variables_indices.size());
TF_ASSERT_OK(GetVariableInfosFromInputs(context_->resource_manager(),
context_->device(), inputs,
variables_indices, &variables));
TF_ASSERT_OK_AND_ASSIGN(std::vector<int> constant_input_indices,
GetConstantInputIndicesFromContext(context_.get()));
TF_ASSERT_OK(LockVariables(absl::MakeSpan(variables)));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<XlaCompiler::Argument> args,
XlaComputationLaunchContext::BuildXlaCompilerArguments(
constant_input_indices, inputs, variables,
static_cast<Device*>(context_->device())));
const XlaCompiler::CompilationResult* result;
xla::PjRtLoadedExecutable* executable;
CompileToExecutable(args, &result, &executable);
TF_ASSERT_OK_AND_ASSIGN(auto execute_outputs,
RunExecutable(inputs, variables, result, executable));
TF_EXPECT_OK(PopulateCtxOutputsFromPjRtExecutableOutputs(
0, inputs, variables, *result,
false, execute_outputs, context_.get()));
EXPECT_EQ(context_->num_outputs(), 0);
ResourceMgr* rm = device_->resource_manager();
Var* var = nullptr;
TF_ASSERT_OK(rm->Lookup(rm->default_container(), "var", &var));
core::ScopedUnref var_ref(var);
Tensor* device_tensor = var->tensor();
Tensor* host_tensor = new Tensor(host_allocator_, device_tensor->dtype(),
device_tensor->shape());
tensors_.push_back(host_tensor);
TF_ASSERT_OK(device_context_->CopyDeviceTensorToCPUSync(
device_tensor, "", device_, host_tensor));
Tensor* expected = CreateHostTensor<int32>(TensorShape({1, 3}), {3, 4, 5});
test::ExpectTensorEqual<int32>(*expected, *host_tensor);
}
TEST(XlaLaunchUtilTest, GetPjRtExecuteOptions) {
xla::ExecuteOptions options =
GetPjRtExecuteOptions(DeviceType(DEVICE_GPU), {});
EXPECT_FALSE(options.arguments_are_tupled);
EXPECT_TRUE(options.untuple_result);
EXPECT_FALSE(options.strict_shape_checking);
EXPECT_TRUE(options.use_major_to_minor_data_layout_for_callbacks);
}
TEST_F(PjRtExecutionUtilTest, RunPjRtExecutable) {
XlaOpRegistry::RegisterCompilationKernels();
TF_EXPECT_OK(NodeDefBuilder("AddV2", "AddV2")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_INT32)
.Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddVariableInput<int32>("var1", TensorShape({1, 2}), {1, 2});
AddVariableInput<int32>("var2", TensorShape({1, 2}), {3, 4});
CreateContext();
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].initialized = true;
args[0].type = DT_INT32;
args[0].shape = TensorShape({1, 2});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].initialized = true;
args[1].type = DT_INT32;
args[1].shape = TensorShape({1, 2});
const XlaCompiler::CompilationResult* result;
xla::PjRtLoadedExecutable* executable;
CompileToExecutable(args, &result, &executable);
std::vector<const Tensor*> inputs = InputsFromContext(context_.get());
std::vector<int> variables_indices =
GetResourceVariableIndicesFromContext(context_.get());
std::vector<VariableInfo> variables;
variables.reserve(variables_indices.size());
TF_ASSERT_OK(GetVariableInfosFromInputs(context_->resource_manager(),
context_->device(), inputs,
variables_indices, &variables));
TF_ASSERT_OK(RunPjRtExecutable(inputs, variables, *result, pjrt_client_,
executable, context_.get()));
Tensor* expected = CreateHostTensor<int32>(TensorShape({1, 2}), {4, 6});
test::ExpectTensorEqual<int32>(*expected, *GetOutput(0));
}
TEST_F(PjRtExecutionUtilTest,
RunPjRtExecutableWithVariableSnapshotsAndMissingInputs) {
XlaOpRegistry::RegisterCompilationKernels();
TF_EXPECT_OK(NodeDefBuilder("Fill", "Fill")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Attr("index_type", DT_INT32)
.Attr("T", DT_INT32)
.Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
Tensor* dims = CreateHostTensor<int32>(TensorShape({1}), {2});
Tensor* value = CreateDeviceTensor<int32>(TensorShape(), {1});
inputs_.push_back({nullptr, dims});
inputs_.push_back({nullptr, value});
CreateContext();
TF_ASSERT_OK_AND_ASSIGN(std::vector<int> constant_input_indices,
GetConstantInputIndicesFromContext(context_.get()));
EXPECT_EQ(constant_input_indices.size(), 1);
std::vector<const Tensor*> inputs = InputsFromContext(context_.get());
std::vector<int> variables_indices =
GetResourceVariableIndicesFromContext(context_.get());
absl::flat_hash_map<int, const Tensor*> variable_snapshots;
const XlaCompiler::CompilationResult* result;
xla::PjRtLoadedExecutable* executable;
{
std::vector<VariableInfo> variables;
variables.reserve(variables_indices.size());
TF_ASSERT_OK(GetVariableInfosFromInputs(context_->resource_manager(),
context_->device(), inputs,
variables_indices, &variables));
TF_ASSERT_OK(LockVariables(absl::MakeSpan(variables)));
variable_snapshots = GetVariableSnapshots(variables);
TF_ASSERT_OK_AND_ASSIGN(
std::vector<XlaCompiler::Argument> args,
XlaComputationLaunchContext::BuildXlaCompilerArguments(
constant_input_indices, inputs, variables,
static_cast<Device*>(context_->device())));
CompileToExecutable(args, &result, &executable);
}
inputs = {inputs.begin() + constant_input_indices.size(), inputs.end()};
{
TF_ASSERT_OK_AND_ASSIGN(std::vector<VariableInfo> updated_variables,
GatherVariableInfo(context_.get(), *result,
constant_input_indices.size()));
TF_ASSERT_OK(LockVariables(absl::MakeSpan(updated_variables)));
TF_ASSERT_OK(RunPjRtExecutable(
constant_input_indices.size(), inputs, variable_snapshots,
updated_variables, *result, pjrt_client_, executable, context_.get()));
}
Tensor* expected = CreateHostTensor<int32>(TensorShape({2}), {1, 1});
test::ExpectTensorEqual<int32>(*expected, *GetOutput(0));
}
TEST_F(PjRtExecutionUtilTest, RunPjRtExecutableWithoutCtx) {
XlaOpRegistry::RegisterCompilationKernels();
TF_ASSERT_OK(NodeDefBuilder("AddV2", "AddV2")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_INT32)
.Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddVariableInput<int32>("var1", TensorShape({1, 2}), {1, 2});
AddVariableInput<int32>("var2", TensorShape({1, 2}), {3, 4});
CreateContext();
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].initialized = true;
args[0].type = DT_INT32;
args[0].shape = TensorShape({1, 2});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].initialized = true;
args[1].type = DT_INT32;
args[1].shape = TensorShape({1, 2});
const XlaCompiler::CompilationResult* result;
xla::PjRtLoadedExecutable* executable;
CompileToExecutable(args, &result, &executable);
std::vector<const Tensor*> inputs = InputsFromContext(context_.get());
std::vector<int> variables_indices =
GetResourceVariableIndicesFromContext(context_.get());
std::vector<VariableInfo> variables;
variables.reserve(variables_indices.size());
TF_ASSERT_OK(GetVariableInfosFromInputs(context_->resource_manager(),
context_->device(), inputs,
variables_indices, &variables));
const bool use_pjrt_tensor_buffer = context_->device()
->tensorflow_accelerator_device_info()
->use_pjrt_tensor_buffer;
const DeviceType& device_type = GetDeviceType(context_.get());
const int pjrt_device_id =
tsl::GetDeviceIdFromDeviceParsedName(context_->device()->parsed_name());
TF_ASSERT_OK_AND_ASSIGN(xla::PjRtDevice * pjrt_device,
pjrt_client_->LookupAddressableDevice(
xla::PjRtLocalDeviceId(pjrt_device_id)));
absl::flat_hash_map<int, const Tensor*> variable_snapshots;
for (int i = 0; i < variables.size(); i++) {
variable_snapshots[variables[i].index()] = variables[i].var()->tensor();
}
TF_ASSERT_OK_AND_ASSIGN(
std::vector<std::unique_ptr<xla::PjRtBuffer>> execute_outputs,
RunPjRtExecutable(0, inputs,
variable_snapshots, variables, device_type,
use_pjrt_tensor_buffer, *result, pjrt_device,
pjrt_client_, executable));
for (const auto& output : execute_outputs) {
TF_ASSERT_OK(output->GetReadyFuture().Await());
}
ASSERT_EQ(execute_outputs.size(), 1);
std::shared_ptr<xla::Literal> literal = *execute_outputs[0]->ToLiteralSync();
EXPECT_TRUE(xla::LiteralTestUtil::Equal(
*literal, xla::LiteralUtil::CreateR2<int32_t>({{4, 6}})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_launch_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_launch_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
04214a7c-da83-4558-8b92-d0c7af9fc73b | cpp | google/arolla | batch_arithmetic | arolla/qexpr/operators/math/batch_arithmetic.h | arolla/qexpr/operators/math/batch_arithmetic_test.cc | #ifndef AROLLA_QEXPR_OPERATORS_MATH_BATCH_ARITHMETIC_H_
#define AROLLA_QEXPR_OPERATORS_MATH_BATCH_ARITHMETIC_H_
#include <type_traits>
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "Eigen/Core"
namespace arolla {
namespace batch_arithmetic_internal {
template <typename T>
using DynamicEigenVector = Eigen::Array<T, 1, Eigen::Dynamic, Eigen::RowMajor>;
template <typename T>
using DynamicEigenVectorView = Eigen::Map<const DynamicEigenVector<T>>;
template <typename T>
using DynamicMutableEigenVectorView = Eigen::Map<DynamicEigenVector<T>>;
template <typename T, class OP, typename InternalT = T>
struct BinaryEigenOperation {
void operator()(absl::Span<T> result, absl::Span<const T> a,
absl::Span<const T> b) const {
auto size = a.size();
DCHECK_EQ(size, b.size());
DCHECK_EQ(size, result.size());
static_assert(
sizeof(T) == sizeof(InternalT) &&
std::is_floating_point_v<T> == std::is_floating_point_v<InternalT>,
"Incorrect InternalT");
const auto* a_data = reinterpret_cast<const InternalT*>(a.data());
const auto* b_data = reinterpret_cast<const InternalT*>(b.data());
auto* result_data = reinterpret_cast<InternalT*>(result.data());
DynamicEigenVectorView<InternalT> eigen_a(a_data, size);
DynamicEigenVectorView<InternalT> eigen_b(b_data, size);
DynamicMutableEigenVectorView<InternalT> eigen_result(result_data, size);
OP::Apply(eigen_a, eigen_b, &eigen_result);
}
};
struct ProdOp {
template <typename T, typename RT>
static void Apply(const T& a, const T& b, RT* c) {
*c = a * b;
}
};
struct AddOp {
template <typename T, typename RT>
static void Apply(const T& a, const T& b, RT* c) {
*c = a + b;
}
};
struct SubOp {
template <typename T, typename RT>
static void Apply(const T& a, const T& b, RT* c) {
*c = a - b;
}
};
template <typename T>
static auto MakeUnsignedIfIntegralFn() {
if constexpr (std::is_integral_v<T>) {
return std::make_unsigned_t<T>();
} else {
return T();
}
}
template <typename T>
using UnsignedIfIntegral = decltype(MakeUnsignedIfIntegralFn<T>());
}
template <typename T>
using BatchAdd = batch_arithmetic_internal::BinaryEigenOperation<
T, batch_arithmetic_internal::AddOp,
batch_arithmetic_internal::UnsignedIfIntegral<T>>;
template <typename T>
using BatchSub = batch_arithmetic_internal::BinaryEigenOperation<
T, batch_arithmetic_internal::SubOp,
batch_arithmetic_internal::UnsignedIfIntegral<T>>;
template <typename T>
using BatchProd = batch_arithmetic_internal::BinaryEigenOperation<
T, batch_arithmetic_internal::ProdOp,
batch_arithmetic_internal::UnsignedIfIntegral<T>>;
template <typename T>
T BatchAggSum(absl::Span<const T> data) {
batch_arithmetic_internal::DynamicEigenVectorView<T> e_data(data.data(),
data.size());
return e_data.sum();
}
}
#endif | #include "arolla/qexpr/operators/math/batch_arithmetic.h"
#include <cstdint>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/types/span.h"
namespace arolla {
namespace {
TEST(BatchArithmetic, BatchAdd) {
std::vector<float> arg1{1., 3., 2.};
std::vector<float> arg2{3.5, 1.5, 2.};
std::vector<float> res(3);
BatchAdd<float>()(absl::Span<float>(res), arg1, arg2);
EXPECT_THAT(res, testing::ElementsAre(4.5, 4.5, 4.0));
}
TEST(BatchArithmetic, BatchSub) {
std::vector<int64_t> arg1{1, 3, 2};
std::vector<int64_t> arg2{3, 1, 2};
std::vector<int64_t> res(3);
BatchSub<int64_t>()(absl::Span<int64_t>(res), arg1, arg2);
EXPECT_THAT(res, testing::ElementsAre(-2, 2, 0));
}
TEST(BatchArithmetic, BatchProd) {
std::vector<double> arg1{1., 3., 2.};
std::vector<double> arg2{3.5, 1.5, 2.};
std::vector<double> res(3);
BatchProd<double>()(absl::Span<double>(res), arg1, arg2);
EXPECT_THAT(res, testing::ElementsAre(3.5, 4.5, 4.0));
}
TEST(BatchArithmetic, AggSum) {
std::vector<float> arg{1., 3., 2.};
EXPECT_EQ(BatchAggSum<float>(arg), 6.);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/math/batch_arithmetic.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/math/batch_arithmetic_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
64afe01c-f225-4810-a1ae-f62d116283b3 | cpp | tensorflow/tensorflow | summary_db_writer | tensorflow/core/summary/summary_db_writer.cc | tensorflow/core/summary/summary_db_writer_test.cc | #include "tensorflow/core/summary/summary_db_writer.h"
#include <deque>
#include "tensorflow/core/summary/summary_converter.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/db/sqlite.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/util/event.pb.h"
#define CALL_SUPPORTED_TYPES(m) \
TF_CALL_tstring(m) \
TF_CALL_half(m) \
TF_CALL_float(m) \
TF_CALL_double(m) \
TF_CALL_complex64(m) \
TF_CALL_complex128(m) \
TF_CALL_int8(m) \
TF_CALL_int16(m) \
TF_CALL_int32(m) \
TF_CALL_int64(m) \
TF_CALL_uint8(m) \
TF_CALL_uint16(m) \
TF_CALL_uint32(m) \
TF_CALL_uint64(m)
namespace tensorflow {
namespace {
const uint64 kIdTiers[] = {
0x7fffffULL,
0x7fffffffULL,
0x7fffffffffffULL,
};
const int kMaxIdTier = sizeof(kIdTiers) / sizeof(uint64) - 1;
const int kIdCollisionDelayMicros = 10;
const int kMaxIdCollisions = 21;
const int64_t kAbsent = 0LL;
const char* kScalarPluginName = "scalars";
const char* kImagePluginName = "images";
const char* kAudioPluginName = "audio";
const char* kHistogramPluginName = "histograms";
const int64_t kReserveMinBytes = 32;
const double kReserveMultiplier = 1.5;
const int64_t kPreallocateRows = 1000;
const uint64 kFlushBytes = 1024 * 1024;
double DoubleTime(uint64 micros) {
return static_cast<double>(micros) / 1.0e6;
}
string StringifyShape(const TensorShape& shape) {
string result;
bool first = true;
for (const auto& dim : shape) {
if (first) {
first = false;
} else {
strings::StrAppend(&result, ",");
}
strings::StrAppend(&result, dim.size);
}
return result;
}
Status CheckSupportedType(const Tensor& t) {
#define CASE(T) \
case DataTypeToEnum<T>::value: \
break;
switch (t.dtype()) {
CALL_SUPPORTED_TYPES(CASE)
default:
return errors::Unimplemented(DataTypeString(t.dtype()),
" tensors unsupported on platform");
}
return absl::OkStatus();
#undef CASE
}
Tensor AsScalar(const Tensor& t) {
Tensor t2{t.dtype(), {}};
#define CASE(T) \
case DataTypeToEnum<T>::value: \
t2.scalar<T>()() = t.flat<T>()(0); \
break;
switch (t.dtype()) {
CALL_SUPPORTED_TYPES(CASE)
default:
t2 = {DT_FLOAT, {}};
t2.scalar<float>()() = NAN;
break;
}
return t2;
#undef CASE
}
void PatchPluginName(SummaryMetadata* metadata, const char* name) {
if (metadata->plugin_data().plugin_name().empty()) {
metadata->mutable_plugin_data()->set_plugin_name(name);
}
}
Status SetDescription(Sqlite* db, int64_t id, const StringPiece& markdown) {
const char* sql = R"sql(
INSERT OR REPLACE INTO Descriptions (id, description) VALUES (?, ?)
)sql";
SqliteStatement insert_desc;
TF_RETURN_IF_ERROR(db->Prepare(sql, &insert_desc));
insert_desc.BindInt(1, id);
insert_desc.BindText(2, markdown);
return insert_desc.StepAndReset();
}
class IdAllocator {
public:
IdAllocator(Env* env, Sqlite* db) : env_{env}, db_{db} {
DCHECK(env_ != nullptr);
DCHECK(db_ != nullptr);
}
Status CreateNewId(int64_t* id) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
Status s;
SqliteStatement stmt;
TF_RETURN_IF_ERROR(db_->Prepare("INSERT INTO Ids (id) VALUES (?)", &stmt));
for (int i = 0; i < kMaxIdCollisions; ++i) {
int64_t tid = MakeRandomId();
stmt.BindInt(1, tid);
s = stmt.StepAndReset();
if (s.ok()) {
*id = tid;
break;
}
if (s.code() != error::INVALID_ARGUMENT) break;
if (tier_ < kMaxIdTier) {
LOG(INFO) << "IdAllocator collision at tier " << tier_ << " (of "
<< kMaxIdTier << ") so auto-adjusting to a higher tier";
++tier_;
} else {
LOG(WARNING) << "IdAllocator (attempt #" << i << ") "
<< "resulted in a collision at the highest tier; this "
"is problematic if it happens often; you can try "
"pruning the Ids table; you can also file a bug "
"asking for the ID space to be increased; otherwise "
"writes will gradually slow down over time until they "
"become impossible";
}
env_->SleepForMicroseconds((1 << i) * kIdCollisionDelayMicros);
}
return s;
}
private:
int64_t MakeRandomId() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t id = static_cast<int64_t>(random::New64() & kIdTiers[tier_]);
if (id == kAbsent) ++id;
return id;
}
mutex mu_;
Env* const env_;
Sqlite* const db_;
int tier_ TF_GUARDED_BY(mu_) = 0;
IdAllocator(const IdAllocator&) = delete;
void operator=(const IdAllocator&) = delete;
};
class GraphWriter {
public:
static Status Save(Sqlite* db, SqliteTransaction* txn, IdAllocator* ids,
GraphDef* graph, uint64 now, int64_t run_id,
int64_t* graph_id)
SQLITE_EXCLUSIVE_TRANSACTIONS_REQUIRED(*db) {
TF_RETURN_IF_ERROR(ids->CreateNewId(graph_id));
GraphWriter saver{db, txn, graph, now, *graph_id};
saver.MapNameToNodeId();
TF_RETURN_WITH_CONTEXT_IF_ERROR(saver.SaveNodeInputs(), "SaveNodeInputs");
TF_RETURN_WITH_CONTEXT_IF_ERROR(saver.SaveNodes(), "SaveNodes");
TF_RETURN_WITH_CONTEXT_IF_ERROR(saver.SaveGraph(run_id), "SaveGraph");
return absl::OkStatus();
}
private:
GraphWriter(Sqlite* db, SqliteTransaction* txn, GraphDef* graph, uint64 now,
int64_t graph_id)
: db_(db), txn_(txn), graph_(graph), now_(now), graph_id_(graph_id) {}
void MapNameToNodeId() {
size_t toto = static_cast<size_t>(graph_->node_size());
name_copies_.reserve(toto);
name_to_node_id_.reserve(toto);
for (int node_id = 0; node_id < graph_->node_size(); ++node_id) {
name_copies_.emplace_back(graph_->node(node_id).name());
name_to_node_id_.emplace(name_copies_.back(), node_id);
}
}
Status SaveNodeInputs() {
const char* sql = R"sql(
INSERT INTO NodeInputs (
graph_id,
node_id,
idx,
input_node_id,
input_node_idx,
is_control
) VALUES (?, ?, ?, ?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db_->Prepare(sql, &insert));
for (int node_id = 0; node_id < graph_->node_size(); ++node_id) {
const NodeDef& node = graph_->node(node_id);
for (int idx = 0; idx < node.input_size(); ++idx) {
StringPiece name = node.input(idx);
int64_t input_node_id;
int64_t input_node_idx = 0;
int64_t is_control = 0;
size_t i = name.rfind(':');
if (i != StringPiece::npos) {
if (!strings::safe_strto64(name.substr(i + 1, name.size() - i - 1),
&input_node_idx)) {
return errors::DataLoss("Bad NodeDef.input: ", name);
}
name.remove_suffix(name.size() - i);
}
if (!name.empty() && name[0] == '^') {
name.remove_prefix(1);
is_control = 1;
}
auto e = name_to_node_id_.find(name);
if (e == name_to_node_id_.end()) {
return errors::DataLoss("Could not find node: ", name);
}
input_node_id = e->second;
insert.BindInt(1, graph_id_);
insert.BindInt(2, node_id);
insert.BindInt(3, idx);
insert.BindInt(4, input_node_id);
insert.BindInt(5, input_node_idx);
insert.BindInt(6, is_control);
unflushed_bytes_ += insert.size();
TF_RETURN_WITH_CONTEXT_IF_ERROR(insert.StepAndReset(), node.name(),
" -> ", name);
TF_RETURN_IF_ERROR(MaybeFlush());
}
}
return absl::OkStatus();
}
Status SaveNodes() {
const char* sql = R"sql(
INSERT INTO Nodes (
graph_id,
node_id,
node_name,
op,
device,
node_def)
VALUES (?, ?, ?, ?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db_->Prepare(sql, &insert));
for (int node_id = 0; node_id < graph_->node_size(); ++node_id) {
NodeDef* node = graph_->mutable_node(node_id);
insert.BindInt(1, graph_id_);
insert.BindInt(2, node_id);
insert.BindText(3, node->name());
insert.BindText(4, node->op());
insert.BindText(5, node->device());
node->clear_name();
node->clear_op();
node->clear_device();
node->clear_input();
string node_def;
if (node->SerializeToString(&node_def)) {
insert.BindBlobUnsafe(6, node_def);
}
unflushed_bytes_ += insert.size();
TF_RETURN_WITH_CONTEXT_IF_ERROR(insert.StepAndReset(), node->name());
TF_RETURN_IF_ERROR(MaybeFlush());
}
return absl::OkStatus();
}
Status SaveGraph(int64_t run_id) {
const char* sql = R"sql(
INSERT OR REPLACE INTO Graphs (
run_id,
graph_id,
inserted_time,
graph_def
) VALUES (?, ?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db_->Prepare(sql, &insert));
if (run_id != kAbsent) insert.BindInt(1, run_id);
insert.BindInt(2, graph_id_);
insert.BindDouble(3, DoubleTime(now_));
graph_->clear_node();
string graph_def;
if (graph_->SerializeToString(&graph_def)) {
insert.BindBlobUnsafe(4, graph_def);
}
return insert.StepAndReset();
}
Status MaybeFlush() {
if (unflushed_bytes_ >= kFlushBytes) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(txn_->Commit(), "flushing ",
unflushed_bytes_, " bytes");
unflushed_bytes_ = 0;
}
return absl::OkStatus();
}
Sqlite* const db_;
SqliteTransaction* const txn_;
uint64 unflushed_bytes_ = 0;
GraphDef* const graph_;
const uint64 now_;
const int64_t graph_id_;
std::vector<string> name_copies_;
std::unordered_map<StringPiece, int64_t, StringPieceHasher> name_to_node_id_;
GraphWriter(const GraphWriter&) = delete;
void operator=(const GraphWriter&) = delete;
};
class RunMetadata {
public:
RunMetadata(IdAllocator* ids, const string& experiment_name,
const string& run_name, const string& user_name)
: ids_{ids},
experiment_name_{experiment_name},
run_name_{run_name},
user_name_{user_name} {
DCHECK(ids_ != nullptr);
}
const string& experiment_name() { return experiment_name_; }
const string& run_name() { return run_name_; }
const string& user_name() { return user_name_; }
int64_t run_id() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
return run_id_;
}
Status SetGraph(Sqlite* db, uint64 now, double computed_time,
std::unique_ptr<GraphDef> g) SQLITE_TRANSACTIONS_EXCLUDED(*db)
TF_LOCKS_EXCLUDED(mu_) {
int64_t run_id;
{
mutex_lock lock(mu_);
TF_RETURN_IF_ERROR(InitializeRun(db, now, computed_time));
run_id = run_id_;
}
int64_t graph_id;
SqliteTransaction txn(*db);
TF_RETURN_IF_ERROR(
GraphWriter::Save(db, &txn, ids_, g.get(), now, run_id, &graph_id));
return txn.Commit();
}
Status GetTagId(Sqlite* db, uint64 now, double computed_time,
const string& tag_name, int64_t* tag_id,
const SummaryMetadata& metadata) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
TF_RETURN_IF_ERROR(InitializeRun(db, now, computed_time));
auto e = tag_ids_.find(tag_name);
if (e != tag_ids_.end()) {
*tag_id = e->second;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(ids_->CreateNewId(tag_id));
tag_ids_[tag_name] = *tag_id;
TF_RETURN_IF_ERROR(
SetDescription(db, *tag_id, metadata.summary_description()));
const char* sql = R"sql(
INSERT INTO Tags (
run_id,
tag_id,
tag_name,
inserted_time,
display_name,
plugin_name,
plugin_data
) VALUES (
:run_id,
:tag_id,
:tag_name,
:inserted_time,
:display_name,
:plugin_name,
:plugin_data
)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db->Prepare(sql, &insert));
if (run_id_ != kAbsent) insert.BindInt(":run_id", run_id_);
insert.BindInt(":tag_id", *tag_id);
insert.BindTextUnsafe(":tag_name", tag_name);
insert.BindDouble(":inserted_time", DoubleTime(now));
insert.BindTextUnsafe(":display_name", metadata.display_name());
insert.BindTextUnsafe(":plugin_name", metadata.plugin_data().plugin_name());
insert.BindBlobUnsafe(":plugin_data", metadata.plugin_data().content());
return insert.StepAndReset();
}
private:
Status InitializeUser(Sqlite* db, uint64 now)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (user_id_ != kAbsent || user_name_.empty()) return absl::OkStatus();
const char* get_sql = R"sql(
SELECT user_id FROM Users WHERE user_name = ?
)sql";
SqliteStatement get;
TF_RETURN_IF_ERROR(db->Prepare(get_sql, &get));
get.BindText(1, user_name_);
bool is_done;
TF_RETURN_IF_ERROR(get.Step(&is_done));
if (!is_done) {
user_id_ = get.ColumnInt(0);
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(ids_->CreateNewId(&user_id_));
const char* insert_sql = R"sql(
INSERT INTO Users (
user_id,
user_name,
inserted_time
) VALUES (?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db->Prepare(insert_sql, &insert));
insert.BindInt(1, user_id_);
insert.BindText(2, user_name_);
insert.BindDouble(3, DoubleTime(now));
TF_RETURN_IF_ERROR(insert.StepAndReset());
return absl::OkStatus();
}
Status InitializeExperiment(Sqlite* db, uint64 now, double computed_time)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (experiment_name_.empty()) return absl::OkStatus();
if (experiment_id_ == kAbsent) {
TF_RETURN_IF_ERROR(InitializeUser(db, now));
const char* get_sql = R"sql(
SELECT
experiment_id,
started_time
FROM
Experiments
WHERE
user_id IS ?
AND experiment_name = ?
)sql";
SqliteStatement get;
TF_RETURN_IF_ERROR(db->Prepare(get_sql, &get));
if (user_id_ != kAbsent) get.BindInt(1, user_id_);
get.BindText(2, experiment_name_);
bool is_done;
TF_RETURN_IF_ERROR(get.Step(&is_done));
if (!is_done) {
experiment_id_ = get.ColumnInt(0);
experiment_started_time_ = get.ColumnInt(1);
} else {
TF_RETURN_IF_ERROR(ids_->CreateNewId(&experiment_id_));
experiment_started_time_ = computed_time;
const char* insert_sql = R"sql(
INSERT INTO Experiments (
user_id,
experiment_id,
experiment_name,
inserted_time,
started_time,
is_watching
) VALUES (?, ?, ?, ?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db->Prepare(insert_sql, &insert));
if (user_id_ != kAbsent) insert.BindInt(1, user_id_);
insert.BindInt(2, experiment_id_);
insert.BindText(3, experiment_name_);
insert.BindDouble(4, DoubleTime(now));
insert.BindDouble(5, computed_time);
insert.BindInt(6, 0);
TF_RETURN_IF_ERROR(insert.StepAndReset());
}
}
if (computed_time < experiment_started_time_) {
experiment_started_time_ = computed_time;
const char* update_sql = R"sql(
UPDATE
Experiments
SET
started_time = ?
WHERE
experiment_id = ?
)sql";
SqliteStatement update;
TF_RETURN_IF_ERROR(db->Prepare(update_sql, &update));
update.BindDouble(1, computed_time);
update.BindInt(2, experiment_id_);
TF_RETURN_IF_ERROR(update.StepAndReset());
}
return absl::OkStatus();
}
Status InitializeRun(Sqlite* db, uint64 now, double computed_time)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (run_name_.empty()) return absl::OkStatus();
TF_RETURN_IF_ERROR(InitializeExperiment(db, now, computed_time));
if (run_id_ == kAbsent) {
TF_RETURN_IF_ERROR(ids_->CreateNewId(&run_id_));
run_started_time_ = computed_time;
const char* insert_sql = R"sql(
INSERT OR REPLACE INTO Runs (
experiment_id,
run_id,
run_name,
inserted_time,
started_time
) VALUES (?, ?, ?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db->Prepare(insert_sql, &insert));
if (experiment_id_ != kAbsent) insert.BindInt(1, experiment_id_);
insert.BindInt(2, run_id_);
insert.BindText(3, run_name_);
insert.BindDouble(4, DoubleTime(now));
insert.BindDouble(5, computed_time);
TF_RETURN_IF_ERROR(insert.StepAndReset());
}
if (computed_time < run_started_time_) {
run_started_time_ = computed_time;
const char* update_sql = R"sql(
UPDATE
Runs
SET
started_time = ?
WHERE
run_id = ?
)sql";
SqliteStatement update;
TF_RETURN_IF_ERROR(db->Prepare(update_sql, &update));
update.BindDouble(1, computed_time);
update.BindInt(2, run_id_);
TF_RETURN_IF_ERROR(update.StepAndReset());
}
return absl::OkStatus();
}
mutex mu_;
IdAllocator* const ids_;
const string experiment_name_;
const string run_name_;
const string user_name_;
int64_t experiment_id_ TF_GUARDED_BY(mu_) = kAbsent;
int64_t run_id_ TF_GUARDED_BY(mu_) = kAbsent;
int64_t user_id_ TF_GUARDED_BY(mu_) = kAbsent;
double experiment_started_time_ TF_GUARDED_BY(mu_) = 0.0;
double run_started_time_ TF_GUARDED_BY(mu_) = 0.0;
std::unordered_map<string, int64_t> tag_ids_ TF_GUARDED_BY(mu_);
RunMetadata(const RunMetadata&) = delete;
void operator=(const RunMetadata&) = delete;
};
class SeriesWriter {
public:
SeriesWriter(int64_t series, RunMetadata* meta)
: series_{series}, meta_{meta} {
DCHECK(series_ > 0);
}
Status Append(Sqlite* db, int64_t step, uint64 now, double computed_time,
const Tensor& t) SQLITE_TRANSACTIONS_EXCLUDED(*db)
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
if (rowids_.empty()) {
Status s = Reserve(db, t);
if (!s.ok()) {
rowids_.clear();
return s;
}
}
int64_t rowid = rowids_.front();
Status s = Write(db, rowid, step, computed_time, t);
if (s.ok()) {
++count_;
}
rowids_.pop_front();
return s;
}
Status Finish(Sqlite* db) SQLITE_TRANSACTIONS_EXCLUDED(*db)
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
if (!rowids_.empty()) {
SqliteTransaction txn(*db);
const char* sql = R"sql(
DELETE FROM Tensors WHERE rowid = ?
)sql";
SqliteStatement deleter;
TF_RETURN_IF_ERROR(db->Prepare(sql, &deleter));
for (size_t i = count_; i < rowids_.size(); ++i) {
deleter.BindInt(1, rowids_.front());
TF_RETURN_IF_ERROR(deleter.StepAndReset());
rowids_.pop_front();
}
TF_RETURN_IF_ERROR(txn.Commit());
rowids_.clear();
}
return absl::OkStatus();
}
private:
Status Write(Sqlite* db, int64_t rowid, int64_t step, double computed_time,
const Tensor& t) SQLITE_TRANSACTIONS_EXCLUDED(*db) {
if (t.dtype() == DT_STRING) {
if (t.dims() == 0) {
return Update(db, step, computed_time, t, t.scalar<tstring>()(), rowid);
} else {
SqliteTransaction txn(*db);
TF_RETURN_IF_ERROR(
Update(db, step, computed_time, t, StringPiece(), rowid));
TF_RETURN_IF_ERROR(UpdateNdString(db, t, rowid));
return txn.Commit();
}
} else {
return Update(db, step, computed_time, t, t.tensor_data(), rowid);
}
}
Status Update(Sqlite* db, int64_t step, double computed_time, const Tensor& t,
const StringPiece& data, int64_t rowid) {
const char* sql = R"sql(
UPDATE OR REPLACE
Tensors
SET
step = ?,
computed_time = ?,
dtype = ?,
shape = ?,
data = ?
WHERE
rowid = ?
)sql";
SqliteStatement stmt;
TF_RETURN_IF_ERROR(db->Prepare(sql, &stmt));
stmt.BindInt(1, step);
stmt.BindDouble(2, computed_time);
stmt.BindInt(3, t.dtype());
stmt.BindText(4, StringifyShape(t.shape()));
stmt.BindBlobUnsafe(5, data);
stmt.BindInt(6, rowid);
TF_RETURN_IF_ERROR(stmt.StepAndReset());
return absl::OkStatus();
}
Status UpdateNdString(Sqlite* db, const Tensor& t, int64_t tensor_rowid)
SQLITE_EXCLUSIVE_TRANSACTIONS_REQUIRED(*db) {
DCHECK_EQ(t.dtype(), DT_STRING);
DCHECK_GT(t.dims(), 0);
const char* deleter_sql = R"sql(
DELETE FROM TensorStrings WHERE tensor_rowid = ?
)sql";
SqliteStatement deleter;
TF_RETURN_IF_ERROR(db->Prepare(deleter_sql, &deleter));
deleter.BindInt(1, tensor_rowid);
TF_RETURN_WITH_CONTEXT_IF_ERROR(deleter.StepAndReset(), tensor_rowid);
const char* inserter_sql = R"sql(
INSERT INTO TensorStrings (
tensor_rowid,
idx,
data
) VALUES (?, ?, ?)
)sql";
SqliteStatement inserter;
TF_RETURN_IF_ERROR(db->Prepare(inserter_sql, &inserter));
auto flat = t.flat<tstring>();
for (int64_t i = 0; i < flat.size(); ++i) {
inserter.BindInt(1, tensor_rowid);
inserter.BindInt(2, i);
inserter.BindBlobUnsafe(3, flat(i));
TF_RETURN_WITH_CONTEXT_IF_ERROR(inserter.StepAndReset(), "i=", i);
}
return absl::OkStatus();
}
Status Reserve(Sqlite* db, const Tensor& t) SQLITE_TRANSACTIONS_EXCLUDED(*db)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
SqliteTransaction txn(*db);
unflushed_bytes_ = 0;
if (t.dtype() == DT_STRING) {
if (t.dims() == 0) {
TF_RETURN_IF_ERROR(ReserveData(db, &txn, t.scalar<tstring>()().size()));
} else {
TF_RETURN_IF_ERROR(ReserveTensors(db, &txn, kReserveMinBytes));
}
} else {
TF_RETURN_IF_ERROR(ReserveData(db, &txn, t.tensor_data().size()));
}
return txn.Commit();
}
Status ReserveData(Sqlite* db, SqliteTransaction* txn, size_t size)
SQLITE_EXCLUSIVE_TRANSACTIONS_REQUIRED(*db)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t space =
static_cast<int64_t>(static_cast<double>(size) * kReserveMultiplier);
if (space < kReserveMinBytes) space = kReserveMinBytes;
return ReserveTensors(db, txn, space);
}
Status ReserveTensors(Sqlite* db, SqliteTransaction* txn,
int64_t reserved_bytes)
SQLITE_EXCLUSIVE_TRANSACTIONS_REQUIRED(*db)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
const char* sql = R"sql(
INSERT INTO Tensors (
series,
data
) VALUES (?, ZEROBLOB(?))
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db->Prepare(sql, &insert));
for (int64_t i = 0; i < kPreallocateRows; ++i) {
insert.BindInt(1, series_);
insert.BindInt(2, reserved_bytes);
TF_RETURN_WITH_CONTEXT_IF_ERROR(insert.StepAndReset(), "i=", i);
rowids_.push_back(db->last_insert_rowid());
unflushed_bytes_ += reserved_bytes;
TF_RETURN_IF_ERROR(MaybeFlush(db, txn));
}
return absl::OkStatus();
}
Status MaybeFlush(Sqlite* db, SqliteTransaction* txn)
SQLITE_EXCLUSIVE_TRANSACTIONS_REQUIRED(*db)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (unflushed_bytes_ >= kFlushBytes) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(txn->Commit(), "flushing ",
unflushed_bytes_, " bytes");
unflushed_bytes_ = 0;
}
return absl::OkStatus();
}
mutex mu_;
const int64_t series_;
RunMetadata* const meta_;
uint64 count_ TF_GUARDED_BY(mu_) = 0;
std::deque<int64_t> rowids_ TF_GUARDED_BY(mu_);
uint64 unflushed_bytes_ TF_GUARDED_BY(mu_) = 0;
SeriesWriter(const SeriesWriter&) = delete;
void operator=(const SeriesWriter&) = delete;
};
class RunWriter {
public:
explicit RunWriter(RunMetadata* meta) : meta_{meta} {}
Status Append(Sqlite* db, int64_t tag_id, int64_t step, uint64 now,
double computed_time, const Tensor& t)
SQLITE_TRANSACTIONS_EXCLUDED(*db) TF_LOCKS_EXCLUDED(mu_) {
SeriesWriter* writer = GetSeriesWriter(tag_id);
return writer->Append(db, step, now, computed_time, t);
}
Status Finish(Sqlite* db) SQLITE_TRANSACTIONS_EXCLUDED(*db)
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
if (series_writers_.empty()) return absl::OkStatus();
for (auto i = series_writers_.begin(); i != series_writers_.end(); ++i) {
if (!i->second) continue;
TF_RETURN_WITH_CONTEXT_IF_ERROR(i->second->Finish(db),
"finish tag_id=", i->first);
i->second.reset();
}
return absl::OkStatus();
}
private:
SeriesWriter* GetSeriesWriter(int64_t tag_id) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock sl(mu_);
auto spot = series_writers_.find(tag_id);
if (spot == series_writers_.end()) {
SeriesWriter* writer = new SeriesWriter(tag_id, meta_);
series_writers_[tag_id].reset(writer);
return writer;
} else {
return spot->second.get();
}
}
mutex mu_;
RunMetadata* const meta_;
std::unordered_map<int64_t, std::unique_ptr<SeriesWriter>> series_writers_
TF_GUARDED_BY(mu_);
RunWriter(const RunWriter&) = delete;
void operator=(const RunWriter&) = delete;
};
class SummaryDbWriter : public SummaryWriterInterface {
public:
SummaryDbWriter(Env* env, Sqlite* db, const string& experiment_name,
const string& run_name, const string& user_name)
: SummaryWriterInterface(),
env_{env},
db_{db},
ids_{env_, db_},
meta_{&ids_, experiment_name, run_name, user_name},
run_{&meta_} {
DCHECK(env_ != nullptr);
db_->Ref();
}
~SummaryDbWriter() override {
core::ScopedUnref unref(db_);
Status s = run_.Finish(db_);
if (!s.ok()) {
LOG(ERROR) << s;
}
int64_t run_id = meta_.run_id();
if (run_id == kAbsent) return;
const char* sql = R"sql(
UPDATE Runs SET finished_time = ? WHERE run_id = ?
)sql";
SqliteStatement update;
s = db_->Prepare(sql, &update);
if (s.ok()) {
update.BindDouble(1, DoubleTime(env_->NowMicros()));
update.BindInt(2, run_id);
s = update.StepAndReset();
}
if (!s.ok()) {
LOG(ERROR) << "Failed to set Runs[" << run_id << "].finish_time: " << s;
}
}
Status Flush() override { return absl::OkStatus(); }
Status WriteTensor(int64_t global_step, Tensor t, const string& tag,
const string& serialized_metadata) override {
TF_RETURN_IF_ERROR(CheckSupportedType(t));
SummaryMetadata metadata;
if (!metadata.ParseFromString(serialized_metadata)) {
return errors::InvalidArgument("Bad serialized_metadata");
}
return Write(global_step, t, tag, metadata);
}
Status WriteScalar(int64_t global_step, Tensor t,
const string& tag) override {
TF_RETURN_IF_ERROR(CheckSupportedType(t));
SummaryMetadata metadata;
PatchPluginName(&metadata, kScalarPluginName);
return Write(global_step, AsScalar(t), tag, metadata);
}
Status WriteGraph(int64_t global_step, std::unique_ptr<GraphDef> g) override {
uint64 now = env_->NowMicros();
return meta_.SetGraph(db_, now, DoubleTime(now), std::move(g));
}
Status WriteEvent(std::unique_ptr<Event> e) override {
return MigrateEvent(std::move(e));
}
Status WriteHistogram(int64_t global_step, Tensor t,
const string& tag) override {
uint64 now = env_->NowMicros();
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(DoubleTime(now));
TF_RETURN_IF_ERROR(
AddTensorAsHistogramToSummary(t, tag, e->mutable_summary()));
return MigrateEvent(std::move(e));
}
Status WriteImage(int64_t global_step, Tensor t, const string& tag,
int max_images, Tensor bad_color) override {
uint64 now = env_->NowMicros();
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(DoubleTime(now));
TF_RETURN_IF_ERROR(AddTensorAsImageToSummary(t, tag, max_images, bad_color,
e->mutable_summary()));
return MigrateEvent(std::move(e));
}
Status WriteAudio(int64_t global_step, Tensor t, const string& tag,
int max_outputs, float sample_rate) override {
uint64 now = env_->NowMicros();
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(DoubleTime(now));
TF_RETURN_IF_ERROR(AddTensorAsAudioToSummary(
t, tag, max_outputs, sample_rate, e->mutable_summary()));
return MigrateEvent(std::move(e));
}
string DebugString() const override { return "SummaryDbWriter"; }
private:
Status Write(int64_t step, const Tensor& t, const string& tag,
const SummaryMetadata& metadata) {
uint64 now = env_->NowMicros();
double computed_time = DoubleTime(now);
int64_t tag_id;
TF_RETURN_IF_ERROR(
meta_.GetTagId(db_, now, computed_time, tag, &tag_id, metadata));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
run_.Append(db_, tag_id, step, now, computed_time, t),
meta_.user_name(), "/", meta_.experiment_name(), "/", meta_.run_name(),
"/", tag, "@", step);
return absl::OkStatus();
}
Status MigrateEvent(std::unique_ptr<Event> e) {
switch (e->what_case()) {
case Event::WhatCase::kSummary: {
uint64 now = env_->NowMicros();
auto summaries = e->mutable_summary();
for (int i = 0; i < summaries->value_size(); ++i) {
Summary::Value* value = summaries->mutable_value(i);
TF_RETURN_WITH_CONTEXT_IF_ERROR(
MigrateSummary(e.get(), value, now), meta_.user_name(), "/",
meta_.experiment_name(), "/", meta_.run_name(), "/", value->tag(),
"@", e->step());
}
break;
}
case Event::WhatCase::kGraphDef:
TF_RETURN_WITH_CONTEXT_IF_ERROR(
MigrateGraph(e.get(), e->graph_def()), meta_.user_name(), "/",
meta_.experiment_name(), "/", meta_.run_name(), "/__graph__@",
e->step());
break;
default:
break;
}
return absl::OkStatus();
}
Status MigrateGraph(const Event* e, const string& graph_def) {
uint64 now = env_->NowMicros();
std::unique_ptr<GraphDef> graph{new GraphDef};
if (!ParseProtoUnlimited(graph.get(), graph_def)) {
return errors::InvalidArgument("bad proto");
}
return meta_.SetGraph(db_, now, e->wall_time(), std::move(graph));
}
Status MigrateSummary(const Event* e, Summary::Value* s, uint64 now) {
switch (s->value_case()) {
case Summary::Value::ValueCase::kTensor:
TF_RETURN_WITH_CONTEXT_IF_ERROR(MigrateTensor(e, s, now), "tensor");
break;
case Summary::Value::ValueCase::kSimpleValue:
TF_RETURN_WITH_CONTEXT_IF_ERROR(MigrateScalar(e, s, now), "scalar");
break;
case Summary::Value::ValueCase::kHisto:
TF_RETURN_WITH_CONTEXT_IF_ERROR(MigrateHistogram(e, s, now), "histo");
break;
case Summary::Value::ValueCase::kImage:
TF_RETURN_WITH_CONTEXT_IF_ERROR(MigrateImage(e, s, now), "image");
break;
case Summary::Value::ValueCase::kAudio:
TF_RETURN_WITH_CONTEXT_IF_ERROR(MigrateAudio(e, s, now), "audio");
break;
default:
break;
}
return absl::OkStatus();
}
Status MigrateTensor(const Event* e, Summary::Value* s, uint64 now) {
Tensor t;
if (!t.FromProto(s->tensor())) return errors::InvalidArgument("bad proto");
TF_RETURN_IF_ERROR(CheckSupportedType(t));
int64_t tag_id;
TF_RETURN_IF_ERROR(meta_.GetTagId(db_, now, e->wall_time(), s->tag(),
&tag_id, s->metadata()));
return run_.Append(db_, tag_id, e->step(), now, e->wall_time(), t);
}
Status MigrateScalar(const Event* e, Summary::Value* s, uint64 now) {
Tensor t{DT_FLOAT, {}};
t.scalar<float>()() = s->simple_value();
int64_t tag_id;
PatchPluginName(s->mutable_metadata(), kScalarPluginName);
TF_RETURN_IF_ERROR(meta_.GetTagId(db_, now, e->wall_time(), s->tag(),
&tag_id, s->metadata()));
return run_.Append(db_, tag_id, e->step(), now, e->wall_time(), t);
}
Status MigrateHistogram(const Event* e, Summary::Value* s, uint64 now) {
const HistogramProto& histo = s->histo();
int k = histo.bucket_size();
if (k != histo.bucket_limit_size()) {
return errors::InvalidArgument("size mismatch");
}
Tensor t{DT_DOUBLE, {k, 3}};
auto data = t.flat<double>();
for (int i = 0, j = 0; i < k; ++i) {
double left_edge = (i == 0) ? std::numeric_limits<double>::min()
: histo.bucket_limit(i - 1);
data(j++) = left_edge;
data(j++) = histo.bucket_limit(i);
data(j++) = histo.bucket(i);
}
int64_t tag_id;
PatchPluginName(s->mutable_metadata(), kHistogramPluginName);
TF_RETURN_IF_ERROR(meta_.GetTagId(db_, now, e->wall_time(), s->tag(),
&tag_id, s->metadata()));
return run_.Append(db_, tag_id, e->step(), now, e->wall_time(), t);
}
Status MigrateImage(const Event* e, Summary::Value* s, uint64 now) {
Tensor t{DT_STRING, {3}};
auto img = s->mutable_image();
t.flat<tstring>()(0) = strings::StrCat(img->width());
t.flat<tstring>()(1) = strings::StrCat(img->height());
t.flat<tstring>()(2) = std::move(*img->mutable_encoded_image_string());
int64_t tag_id;
PatchPluginName(s->mutable_metadata(), kImagePluginName);
TF_RETURN_IF_ERROR(meta_.GetTagId(db_, now, e->wall_time(), s->tag(),
&tag_id, s->metadata()));
return run_.Append(db_, tag_id, e->step(), now, e->wall_time(), t);
}
Status MigrateAudio(const Event* e, Summary::Value* s, uint64 now) {
Tensor t{DT_STRING, {1, 2}};
auto wav = s->mutable_audio();
t.flat<tstring>()(0) = std::move(*wav->mutable_encoded_audio_string());
t.flat<tstring>()(1) = "";
int64_t tag_id;
PatchPluginName(s->mutable_metadata(), kAudioPluginName);
TF_RETURN_IF_ERROR(meta_.GetTagId(db_, now, e->wall_time(), s->tag(),
&tag_id, s->metadata()));
return run_.Append(db_, tag_id, e->step(), now, e->wall_time(), t);
}
Env* const env_;
Sqlite* const db_;
IdAllocator ids_;
RunMetadata meta_;
RunWriter run_;
};
}
Status CreateSummaryDbWriter(Sqlite* db, const string& experiment_name,
const string& run_name, const string& user_name,
Env* env, SummaryWriterInterface** result) {
*result = new SummaryDbWriter(env, db, experiment_name, run_name, user_name);
return absl::OkStatus();
}
} | #include "tensorflow/core/summary/summary_db_writer.h"
#include "tensorflow/core/summary/schema.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/db/sqlite.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
namespace {
Tensor MakeScalarInt64(int64_t x) {
Tensor t(DT_INT64, TensorShape({}));
t.scalar<int64_t>()() = x;
return t;
}
class FakeClockEnv : public EnvWrapper {
public:
FakeClockEnv() : EnvWrapper(Env::Default()), current_millis_(0) {}
void AdvanceByMillis(const uint64 millis) { current_millis_ += millis; }
uint64 NowMicros() const override { return current_millis_ * 1000; }
uint64 NowSeconds() const override { return current_millis_ * 1000; }
private:
uint64 current_millis_;
};
class SummaryDbWriterTest : public ::testing::Test {
protected:
void SetUp() override {
TF_ASSERT_OK(Sqlite::Open(":memory:", SQLITE_OPEN_READWRITE, &db_));
TF_ASSERT_OK(SetupTensorboardSqliteDb(db_));
}
void TearDown() override {
if (writer_ != nullptr) {
writer_->Unref();
writer_ = nullptr;
}
db_->Unref();
db_ = nullptr;
}
int64_t QueryInt(const string& sql) {
SqliteStatement stmt = db_->PrepareOrDie(sql);
bool is_done;
Status s = stmt.Step(&is_done);
if (!s.ok() || is_done) {
LOG(ERROR) << s << " due to " << sql;
return -1;
}
return stmt.ColumnInt(0);
}
double QueryDouble(const string& sql) {
SqliteStatement stmt = db_->PrepareOrDie(sql);
bool is_done;
Status s = stmt.Step(&is_done);
if (!s.ok() || is_done) {
LOG(ERROR) << s << " due to " << sql;
return -1;
}
return stmt.ColumnDouble(0);
}
string QueryString(const string& sql) {
SqliteStatement stmt = db_->PrepareOrDie(sql);
bool is_done;
Status s = stmt.Step(&is_done);
if (!s.ok() || is_done) {
LOG(ERROR) << s << " due to " << sql;
return "MISSINGNO";
}
return stmt.ColumnString(0);
}
FakeClockEnv env_;
Sqlite* db_ = nullptr;
SummaryWriterInterface* writer_ = nullptr;
};
TEST_F(SummaryDbWriterTest, WriteHistogram_VerifyTensorValues) {
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "histtest", "test1", "user1", &env_,
&writer_));
int step = 0;
std::unique_ptr<Event> e{new Event};
e->set_step(step);
e->set_wall_time(123);
Summary::Value* s = e->mutable_summary()->add_value();
s->set_tag("normal/myhisto");
double dummy_value = 10.123;
HistogramProto* proto = s->mutable_histo();
proto->Clear();
proto->set_min(dummy_value);
proto->set_max(dummy_value);
proto->set_num(dummy_value);
proto->set_sum(dummy_value);
proto->set_sum_squares(dummy_value);
int size = 3;
double bucket_limits[] = {-30.5, -10.5, -5.5};
double bucket[] = {-10, 10, 20};
for (int i = 0; i < size; i++) {
proto->add_bucket_limit(bucket_limits[i]);
proto->add_bucket(bucket[i]);
}
TF_ASSERT_OK(writer_->WriteEvent(std::move(e)));
TF_ASSERT_OK(writer_->Flush());
writer_->Unref();
writer_ = nullptr;
string result = QueryString("SELECT data FROM Tensors");
const double* val = reinterpret_cast<const double*>(result.data());
double histarray[] = {std::numeric_limits<double>::min(),
-30.5,
-10,
-30.5,
-10.5,
10,
-10.5,
-5.5,
20};
int histarray_size = 9;
for (int i = 0; i < histarray_size; i++) {
EXPECT_EQ(histarray[i], val[i]);
}
}
TEST_F(SummaryDbWriterTest, NothingWritten_NoRowsCreated) {
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "mad-science", "train", "jart", &env_,
&writer_));
TF_ASSERT_OK(writer_->Flush());
writer_->Unref();
writer_ = nullptr;
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Ids"));
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Users"));
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Experiments"));
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Runs"));
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Tags"));
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Tensors"));
}
TEST_F(SummaryDbWriterTest, TensorsWritten_RowsGetInitialized) {
SummaryMetadata metadata;
metadata.set_display_name("display_name");
metadata.set_summary_description("description");
metadata.mutable_plugin_data()->set_plugin_name("plugin_name");
metadata.mutable_plugin_data()->set_content("plugin_data");
SummaryMetadata metadata_nope;
metadata_nope.set_display_name("nope");
metadata_nope.set_summary_description("nope");
metadata_nope.mutable_plugin_data()->set_plugin_name("nope");
metadata_nope.mutable_plugin_data()->set_content("nope");
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "mad-science", "train", "jart", &env_,
&writer_));
env_.AdvanceByMillis(23);
TF_ASSERT_OK(writer_->WriteTensor(1, MakeScalarInt64(123LL), "taggy",
metadata.SerializeAsString()));
env_.AdvanceByMillis(23);
TF_ASSERT_OK(writer_->WriteTensor(2, MakeScalarInt64(314LL), "taggy",
metadata_nope.SerializeAsString()));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Users"));
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Experiments"));
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Runs"));
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Tags"));
ASSERT_EQ(1000LL, QueryInt("SELECT COUNT(*) FROM Tensors"));
int64_t user_id = QueryInt("SELECT user_id FROM Users");
int64_t experiment_id = QueryInt("SELECT experiment_id FROM Experiments");
int64_t run_id = QueryInt("SELECT run_id FROM Runs");
int64_t tag_id = QueryInt("SELECT tag_id FROM Tags");
EXPECT_LT(0LL, user_id);
EXPECT_LT(0LL, experiment_id);
EXPECT_LT(0LL, run_id);
EXPECT_LT(0LL, tag_id);
EXPECT_EQ("jart", QueryString("SELECT user_name FROM Users"));
EXPECT_EQ(0.023, QueryDouble("SELECT inserted_time FROM Users"));
EXPECT_EQ(user_id, QueryInt("SELECT user_id FROM Experiments"));
EXPECT_EQ("mad-science",
QueryString("SELECT experiment_name FROM Experiments"));
EXPECT_EQ(0.023, QueryDouble("SELECT inserted_time FROM Experiments"));
EXPECT_EQ(experiment_id, QueryInt("SELECT experiment_id FROM Runs"));
EXPECT_EQ("train", QueryString("SELECT run_name FROM Runs"));
EXPECT_EQ(0.023, QueryDouble("SELECT inserted_time FROM Runs"));
EXPECT_EQ(run_id, QueryInt("SELECT run_id FROM Tags"));
EXPECT_EQ("taggy", QueryString("SELECT tag_name FROM Tags"));
EXPECT_EQ(0.023, QueryDouble("SELECT inserted_time FROM Tags"));
EXPECT_EQ("display_name", QueryString("SELECT display_name FROM Tags"));
EXPECT_EQ("plugin_name", QueryString("SELECT plugin_name FROM Tags"));
EXPECT_EQ("plugin_data", QueryString("SELECT plugin_data FROM Tags"));
EXPECT_EQ("description", QueryString("SELECT description FROM Descriptions"));
EXPECT_EQ(tag_id, QueryInt("SELECT series FROM Tensors WHERE step = 1"));
EXPECT_EQ(0.023,
QueryDouble("SELECT computed_time FROM Tensors WHERE step = 1"));
EXPECT_EQ(tag_id, QueryInt("SELECT series FROM Tensors WHERE step = 2"));
EXPECT_EQ(0.046,
QueryDouble("SELECT computed_time FROM Tensors WHERE step = 2"));
}
TEST_F(SummaryDbWriterTest, EmptyParentNames_NoParentsCreated) {
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "", "", "", &env_, &writer_));
TF_ASSERT_OK(writer_->WriteTensor(1, MakeScalarInt64(123LL), "taggy", ""));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Users"));
ASSERT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Experiments"));
ASSERT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Runs"));
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Tags"));
ASSERT_EQ(1000LL, QueryInt("SELECT COUNT(*) FROM Tensors"));
}
TEST_F(SummaryDbWriterTest, WriteEvent_Scalar) {
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "", "", "", &env_, &writer_));
std::unique_ptr<Event> e{new Event};
e->set_step(7);
e->set_wall_time(123.456);
Summary::Value* s = e->mutable_summary()->add_value();
s->set_tag("π");
s->set_simple_value(3.14f);
s = e->mutable_summary()->add_value();
s->set_tag("φ");
s->set_simple_value(1.61f);
TF_ASSERT_OK(writer_->WriteEvent(std::move(e)));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(2LL, QueryInt("SELECT COUNT(*) FROM Tags"));
ASSERT_EQ(2000LL, QueryInt("SELECT COUNT(*) FROM Tensors"));
int64_t tag1_id = QueryInt("SELECT tag_id FROM Tags WHERE tag_name = 'π'");
int64_t tag2_id = QueryInt("SELECT tag_id FROM Tags WHERE tag_name = 'φ'");
EXPECT_GT(tag1_id, 0LL);
EXPECT_GT(tag2_id, 0LL);
EXPECT_EQ(123.456, QueryDouble(strings::StrCat(
"SELECT computed_time FROM Tensors WHERE series = ",
tag1_id, " AND step = 7")));
EXPECT_EQ(123.456, QueryDouble(strings::StrCat(
"SELECT computed_time FROM Tensors WHERE series = ",
tag2_id, " AND step = 7")));
}
TEST_F(SummaryDbWriterTest, WriteGraph) {
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "", "R", "", &env_, &writer_));
env_.AdvanceByMillis(23);
GraphDef graph;
graph.mutable_library()->add_gradient()->set_function_name("funk");
NodeDef* node = graph.add_node();
node->set_name("x");
node->set_op("Placeholder");
node = graph.add_node();
node->set_name("y");
node->set_op("Placeholder");
node = graph.add_node();
node->set_name("z");
node->set_op("Love");
node = graph.add_node();
node->set_name("+");
node->set_op("Add");
node->add_input("x");
node->add_input("y");
node->add_input("^z");
node->set_device("tpu/lol");
std::unique_ptr<Event> e{new Event};
graph.SerializeToString(e->mutable_graph_def());
TF_ASSERT_OK(writer_->WriteEvent(std::move(e)));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Runs"));
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Graphs"));
ASSERT_EQ(4LL, QueryInt("SELECT COUNT(*) FROM Nodes"));
ASSERT_EQ(3LL, QueryInt("SELECT COUNT(*) FROM NodeInputs"));
ASSERT_EQ(QueryInt("SELECT run_id FROM Runs"),
QueryInt("SELECT run_id FROM Graphs"));
int64_t graph_id = QueryInt("SELECT graph_id FROM Graphs");
EXPECT_GT(graph_id, 0LL);
EXPECT_EQ(0.023, QueryDouble("SELECT inserted_time FROM Graphs"));
GraphDef graph2;
graph2.ParseFromString(QueryString("SELECT graph_def FROM Graphs"));
EXPECT_EQ(0, graph2.node_size());
EXPECT_EQ("funk", graph2.library().gradient(0).function_name());
EXPECT_EQ("x", QueryString("SELECT node_name FROM Nodes WHERE node_id = 0"));
EXPECT_EQ("y", QueryString("SELECT node_name FROM Nodes WHERE node_id = 1"));
EXPECT_EQ("z", QueryString("SELECT node_name FROM Nodes WHERE node_id = 2"));
EXPECT_EQ("+", QueryString("SELECT node_name FROM Nodes WHERE node_id = 3"));
EXPECT_EQ("Placeholder",
QueryString("SELECT op FROM Nodes WHERE node_id = 0"));
EXPECT_EQ("Placeholder",
QueryString("SELECT op FROM Nodes WHERE node_id = 1"));
EXPECT_EQ("Love", QueryString("SELECT op FROM Nodes WHERE node_id = 2"));
EXPECT_EQ("Add", QueryString("SELECT op FROM Nodes WHERE node_id = 3"));
EXPECT_EQ("", QueryString("SELECT device FROM Nodes WHERE node_id = 0"));
EXPECT_EQ("", QueryString("SELECT device FROM Nodes WHERE node_id = 1"));
EXPECT_EQ("", QueryString("SELECT device FROM Nodes WHERE node_id = 2"));
EXPECT_EQ("tpu/lol",
QueryString("SELECT device FROM Nodes WHERE node_id = 3"));
EXPECT_EQ(graph_id,
QueryInt("SELECT graph_id FROM NodeInputs WHERE idx = 0"));
EXPECT_EQ(graph_id,
QueryInt("SELECT graph_id FROM NodeInputs WHERE idx = 1"));
EXPECT_EQ(graph_id,
QueryInt("SELECT graph_id FROM NodeInputs WHERE idx = 2"));
EXPECT_EQ(3LL, QueryInt("SELECT node_id FROM NodeInputs WHERE idx = 0"));
EXPECT_EQ(3LL, QueryInt("SELECT node_id FROM NodeInputs WHERE idx = 1"));
EXPECT_EQ(3LL, QueryInt("SELECT node_id FROM NodeInputs WHERE idx = 2"));
EXPECT_EQ(0LL,
QueryInt("SELECT input_node_id FROM NodeInputs WHERE idx = 0"));
EXPECT_EQ(1LL,
QueryInt("SELECT input_node_id FROM NodeInputs WHERE idx = 1"));
EXPECT_EQ(2LL,
QueryInt("SELECT input_node_id FROM NodeInputs WHERE idx = 2"));
EXPECT_EQ(0LL, QueryInt("SELECT is_control FROM NodeInputs WHERE idx = 0"));
EXPECT_EQ(0LL, QueryInt("SELECT is_control FROM NodeInputs WHERE idx = 1"));
EXPECT_EQ(1LL, QueryInt("SELECT is_control FROM NodeInputs WHERE idx = 2"));
}
TEST_F(SummaryDbWriterTest, UsesIdsTable) {
SummaryMetadata metadata;
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "mad-science", "train", "jart", &env_,
&writer_));
env_.AdvanceByMillis(23);
TF_ASSERT_OK(writer_->WriteTensor(1, MakeScalarInt64(123LL), "taggy",
metadata.SerializeAsString()));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(4LL, QueryInt("SELECT COUNT(*) FROM Ids"));
EXPECT_EQ(4LL, QueryInt(strings::StrCat(
"SELECT COUNT(*) FROM Ids WHERE id IN (",
QueryInt("SELECT user_id FROM Users"), ", ",
QueryInt("SELECT experiment_id FROM Experiments"), ", ",
QueryInt("SELECT run_id FROM Runs"), ", ",
QueryInt("SELECT tag_id FROM Tags"), ")")));
}
TEST_F(SummaryDbWriterTest, SetsRunFinishedTime) {
SummaryMetadata metadata;
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "mad-science", "train", "jart", &env_,
&writer_));
env_.AdvanceByMillis(23);
TF_ASSERT_OK(writer_->WriteTensor(1, MakeScalarInt64(123LL), "taggy",
metadata.SerializeAsString()));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(0.023, QueryDouble("SELECT started_time FROM Runs"));
ASSERT_EQ(0.0, QueryDouble("SELECT finished_time FROM Runs"));
env_.AdvanceByMillis(23);
writer_->Unref();
writer_ = nullptr;
ASSERT_EQ(0.023, QueryDouble("SELECT started_time FROM Runs"));
ASSERT_EQ(0.046, QueryDouble("SELECT finished_time FROM Runs"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/summary/summary_db_writer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/summary/summary_db_writer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2cf421c1-f469-45b8-8be7-7c73f872ea22 | cpp | tensorflow/tensorflow | list_ops_util | tensorflow/lite/kernels/variants/list_ops_util.cc | tensorflow/lite/kernels/variants/list_ops_util_test.cc | #include "tensorflow/lite/kernels/variants/list_ops_util.h"
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
namespace tflite {
namespace variants {
IntArrayUniquePtr TensorAsShape(const TfLiteTensor& shape) {
if (shape.dims->size == 0) {
return BuildTfLiteArray({});
}
const int rank = shape.dims->data[0];
const int* begin = reinterpret_cast<const int*>(shape.data.data);
const int* end = begin + rank;
return BuildTfLiteArray(std::vector<int>(begin, end));
}
IntArrayUniquePtr MergeShapesOrNull(IntArrayUniquePtr l, IntArrayUniquePtr r) {
if (l == nullptr) {
return r;
}
if (r == nullptr) {
return l;
}
if (l->size == 0) {
return r;
}
if (r->size == 0) {
return l;
}
if (l->size != r->size) {
return nullptr;
}
for (int i = 0; i < r->size; ++i) {
if (l->data[i] == -1) {
l->data[i] = r->data[i];
} else if (r->data[i] != -1 && l->data[i] != r->data[i]) {
return nullptr;
}
}
return l;
}
bool IsShapeFullyDefined(const TfLiteIntArray& shape) {
for (int i = 0; i < shape.size; ++i) {
if (shape.data[i] < 0) {
return false;
}
}
return true;
}
TfLiteStatus GetShapeIfAllEqual(const TensorArray& arr,
IntArrayUniquePtr& result) {
const TfLiteIntArray* common_shape = nullptr;
for (int i = 0; i < arr.NumElements(); ++i) {
const TfLiteTensor* cur_element = arr.At(i);
if (cur_element == nullptr) {
continue;
}
if (common_shape == nullptr) {
common_shape = cur_element->dims;
continue;
}
if (!TfLiteIntArrayEqual(common_shape, cur_element->dims)) {
return kTfLiteError;
}
}
result = common_shape != nullptr ? BuildTfLiteArray(*common_shape) : nullptr;
return kTfLiteOk;
}
}
} | #include "tensorflow/lite/kernels/variants/list_ops_util.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace variants {
namespace {
TEST(TensorAsShape, ScalarTensor_ReturnsEmptyIntArray) {
TensorUniquePtr scalar_tensor =
BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({}), kTfLiteDynamic);
IntArrayUniquePtr shape_from_tensor = TensorAsShape(*scalar_tensor);
ASSERT_THAT(shape_from_tensor.get(), DimsAre({}));
}
TEST(TensorAsShape, SingleElementTensor_ReturnsSize1Shape) {
TensorUniquePtr single_el_tensor =
BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({1}), kTfLiteDynamic);
single_el_tensor->data.i32[0] = 10;
IntArrayUniquePtr shape_from_tensor = TensorAsShape(*single_el_tensor);
ASSERT_THAT(shape_from_tensor.get(), DimsAre({10}));
}
TEST(TensorAsShape, OneDMultipleElementShape_ReturnsHighRankedShape) {
TensorUniquePtr one_d_mul_el_tensor =
BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({3}), kTfLiteDynamic);
one_d_mul_el_tensor->data.i32[0] = 10;
one_d_mul_el_tensor->data.i32[1] = 9;
one_d_mul_el_tensor->data.i32[2] = 8;
IntArrayUniquePtr shape_from_tensor = TensorAsShape(*one_d_mul_el_tensor);
ASSERT_THAT(shape_from_tensor.get(), DimsAre({10, 9, 8}));
}
TEST(MergeShapesOrNull, IncompatibleSameRank_ReturnsNull) {
IntArrayUniquePtr l = BuildTfLiteArray({2, 3});
IntArrayUniquePtr r = BuildTfLiteArray({3, 3});
EXPECT_EQ(MergeShapesOrNull(std::move(l), std::move(r)).get(), nullptr);
}
TEST(MergeShapesOrNull, NotSameRank_ReturnsNull) {
IntArrayUniquePtr l = BuildTfLiteArray({1});
IntArrayUniquePtr r = BuildTfLiteArray({1, 2});
EXPECT_EQ(MergeShapesOrNull(std::move(l), std::move(r)).get(), nullptr);
}
TEST(MergeShapesOrNull, MergeShapesOrNullSameRankNENull) {
IntArrayUniquePtr l = BuildTfLiteArray({1});
IntArrayUniquePtr r = BuildTfLiteArray({2});
EXPECT_EQ(MergeShapesOrNull(std::move(l), std::move(r)).get(), nullptr);
}
TEST(MergeShapesOrNull, RankedUnknownLKnownR_ReturnsStatic) {
IntArrayUniquePtr l = BuildTfLiteArray({-1});
IntArrayUniquePtr r = BuildTfLiteArray({2});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({2}));
}
TEST(MergeShapesOrNull, UnknownRKnownL_ReturnsStatic) {
IntArrayUniquePtr l = BuildTfLiteArray({2});
IntArrayUniquePtr r = BuildTfLiteArray({-1});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({2}));
}
TEST(MergeShapesOrNull, UnknownBoth_ReturnsUnknown) {
IntArrayUniquePtr l = BuildTfLiteArray({-1});
IntArrayUniquePtr r = BuildTfLiteArray({-1});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({-1}));
}
TEST(MergeShapesOrNull, RankedUnknownDifferentDims_ConstrainsUnknownDims) {
IntArrayUniquePtr l = BuildTfLiteArray({-1, 2, 5});
IntArrayUniquePtr r = BuildTfLiteArray({1, -1, 5});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({1, 2, 5}));
}
TEST(MergeShapesOrNull, BothUnranked_ReturnsUnranked) {
IntArrayUniquePtr l = BuildTfLiteArray({});
IntArrayUniquePtr r = BuildTfLiteArray({});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(), DimsAre({}));
}
TEST(MergeShapesOrNull, UrankedAndStatic1D_ReturnsStatic1D) {
IntArrayUniquePtr l = BuildTfLiteArray({});
IntArrayUniquePtr r = BuildTfLiteArray({1});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({1}));
}
TEST(MergeShapesOrNull, UnrankedAndStaticND_ReturnsStaticND) {
IntArrayUniquePtr l = BuildTfLiteArray({});
IntArrayUniquePtr r = BuildTfLiteArray({2, 3});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({2, 3}));
}
TEST(MergeShapesOrNull, UnrankedAndRankedUnknown_ReturnsRankedUnknown) {
IntArrayUniquePtr l = BuildTfLiteArray({});
IntArrayUniquePtr r = BuildTfLiteArray({-1});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({-1}));
}
TEST(MergeShapesOrNull, NullInput_ReturnsOther) {
EXPECT_THAT(MergeShapesOrNull(BuildTfLiteArray({3}), nullptr).get(),
DimsAre({3}));
EXPECT_THAT(MergeShapesOrNull(nullptr, BuildTfLiteArray({2})).get(),
DimsAre({2}));
EXPECT_EQ(MergeShapesOrNull(nullptr, nullptr).get(), nullptr);
}
TEST(MergeShapesOrNull, NullInput_ReturnsUnrankedOther) {
EXPECT_THAT(MergeShapesOrNull(BuildTfLiteArray({}), nullptr).get(),
DimsAre({}));
EXPECT_THAT(MergeShapesOrNull(nullptr, BuildTfLiteArray({})).get(),
DimsAre({}));
}
TEST(ElementsSameShape, NoElements_SucceedsWithNullptr) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(2);
IntArrayUniquePtr res;
ASSERT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteOk);
EXPECT_EQ(res.get(), nullptr);
}
TEST(ElementsSameShape, ZeroSize_SucceedsWithNullptr) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
IntArrayUniquePtr res;
ASSERT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteOk);
EXPECT_EQ(res.get(), nullptr);
}
TEST(ElementsSameShape, OneSize_SucceedsWithShape) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(1);
arr.Set(0, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
IntArrayUniquePtr res;
ASSERT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteOk);
EXPECT_THAT(res.get(), DimsAre({2}));
}
TEST(ElementsSameShape, MultipleElements_AllSet_SucceedsWithShape) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(2);
arr.Set(0, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
arr.Set(1, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
IntArrayUniquePtr res;
EXPECT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteOk);
EXPECT_THAT(res.get(), DimsAre({2}));
}
TEST(ElementsSameShape, MultipleElements_SomeSet_SucceedsWithShape) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(3);
arr.Set(0, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
arr.Set(2, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
IntArrayUniquePtr res;
EXPECT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteOk);
EXPECT_THAT(res.get(), DimsAre({2}));
}
TEST(ElementsSameShape, MultipleElements_SomeSetNotSameRank_Fails) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(3);
arr.Set(0, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
arr.Set(2, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2, 3}),
kTfLiteDynamic));
IntArrayUniquePtr res;
EXPECT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteError);
}
TEST(ElementsSameShape, MultipleElements_SomeSetNotSameDim_Fails) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(3);
arr.Set(0, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2, 2}),
kTfLiteDynamic));
arr.Set(2, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2, 3}),
kTfLiteDynamic));
IntArrayUniquePtr res;
EXPECT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteError);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_ops_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_ops_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce3f61da-4768-4dc2-a05f-104fc015a518 | cpp | tensorflow/tensorflow | comparators | third_party/xla/xla/hlo/builder/lib/comparators.cc | third_party/xla/xla/hlo/builder/lib/comparators_test.cc | #include "xla/hlo/builder/lib/comparators.h"
#include <limits>
#include <optional>
#include <string>
#include <vector>
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
using XlaCompareOp = XlaOp (*)(XlaOp, XlaOp, absl::Span<const int64_t>);
XlaComputation CreateScalarComparisonComputation(
const std::string& name, const std::vector<PrimitiveType>& operand_types,
XlaBuilder* builder, XlaCompareOp generator) {
CHECK_NE(operand_types.size(), 0);
std::vector<std::optional<XlaCompareOp>> generators(operand_types.size());
generators[0] = generator;
return CreateScalarComparisonComputation(name, operand_types, generators,
builder);
}
}
XlaComputation CreateScalarComparisonComputation(
const std::string& name, const std::vector<PrimitiveType>& operand_types,
const std::vector<std::optional<XlaCompareOp>>& generators,
XlaBuilder* builder) {
auto b = builder->CreateSubBuilder(name);
if (operand_types.empty()) {
b->ReportError(InvalidArgument("operand_types should not be empty"));
return b->BuildAndNoteError();
}
CHECK_EQ(operand_types.size(), generators.size());
int parameter_count = 0;
int last_generator_index = 0;
std::vector<XlaOp> lhs_params;
std::vector<XlaOp> rhs_params;
for (auto operand_type : operand_types) {
auto scalar_shape = ShapeUtil::MakeShape(operand_type, {});
auto lhs_param = Parameter(b.get(), parameter_count * 2, scalar_shape,
absl::StrCat("p.", parameter_count, ".lhs"));
auto rhs_param = Parameter(b.get(), parameter_count * 2 + 1, scalar_shape,
absl::StrCat("p.", parameter_count, ".rhs"));
lhs_params.emplace_back(lhs_param);
rhs_params.emplace_back(rhs_param);
if (generators[parameter_count].has_value()) {
last_generator_index = parameter_count;
}
parameter_count++;
}
CHECK_NE(parameter_count, 0);
XlaOp result;
XlaOp prev_equal;
for (int i = 0; i < parameter_count; i++) {
if (generators[i].has_value()) {
XlaOp cmp_op = generators[i].value()(lhs_params[i], rhs_params[i], {});
result = prev_equal.valid() ? Select(prev_equal, cmp_op, result) : cmp_op;
if (i != last_generator_index) {
XlaOp eq_op = EqTotalOrder(lhs_params[i], rhs_params[i]);
prev_equal = prev_equal.valid() ? And(prev_equal, eq_op) : eq_op;
}
}
}
CHECK(result.valid());
return b->BuildAndNoteError();
}
XlaComputation CreateScalarLtComputation(
const std::vector<PrimitiveType>& operand_types, XlaBuilder* builder) {
return CreateScalarComparisonComputation("compare-less-than", operand_types,
builder, LtTotalOrder);
}
XlaComputation CreateScalarGtComputation(
const std::vector<PrimitiveType>& operand_types, XlaBuilder* builder) {
return CreateScalarComparisonComputation(
"compare-greater-than", operand_types, builder, GtTotalOrder);
}
} | #include "xla/hlo/builder/lib/comparators.h"
#include <cmath>
#include <limits>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo.pb.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/protobuf.h"
namespace xla {
namespace {
class ComparatorsTest : public ClientLibraryTestBase {
public:
ComparatorsTest() : builder_(TestName()) {}
XlaBuilder* builder() { return &builder_; }
private:
XlaBuilder builder_;
};
template <
PrimitiveType type,
typename T = typename primitive_util::PrimitiveTypeToNative<type>::type>
void BuildComparatorAndComparisons(ComparatorsTest* test,
bool compare_less_than,
absl::InlinedVector<bool, 10>* expected) {
auto compare = compare_less_than
? CreateScalarLtComputation({type}, test->builder())
: CreateScalarGtComputation({type}, test->builder());
auto negative_nan = ConstantR0<T>(
test->builder(), -T(std::numeric_limits<float>::quiet_NaN()));
auto positive_nan = ConstantR0<T>(test->builder(),
T(std::numeric_limits<float>::quiet_NaN()));
auto negative_zero = ConstantR0<T>(test->builder(), T(-0.));
auto positive_zero = ConstantR0<T>(test->builder(), T(0.));
auto negative_infinity = MinValue(test->builder(), type);
auto positive_infinity = MaxValue(test->builder(), type);
std::vector<XlaOp> all_constants{negative_nan, negative_infinity,
negative_zero, positive_zero,
positive_infinity, positive_nan};
std::vector<XlaOp> all_comparisons;
all_comparisons.reserve(std::pow(all_constants.size(), 2));
for (const XlaOp& lhs_constant : all_constants) {
for (const XlaOp& rhs_constant : all_constants) {
all_comparisons.push_back(Broadcast(
Call(test->builder(), compare, {lhs_constant, rhs_constant}), {1}));
}
}
ConcatInDim(test->builder(), all_comparisons, 0);
expected->clear();
for (int i = 0; i < all_constants.size(); ++i) {
for (int j = 0; j < all_constants.size(); ++j) {
expected->push_back(compare_less_than ? i < j : i > j);
}
}
}
XLA_TEST_F(ComparatorsTest, CompareLtBF16) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<BF16>(this, true,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareGtBF16) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<BF16>(this, false,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareLtF16) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F16>(this, true,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareGtF16) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F16>(this, false,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareLtF32) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F32>(this, true,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareGtF32) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F32>(this, false,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareLtF64) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F64>(this, true,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareGtF64) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F64>(this, false,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
const auto kCompareStr = HloOpcodeString(xla::HloOpcode::kCompare);
const auto kParameterStr = HloOpcodeString(xla::HloOpcode::kParameter);
const auto kSelectStr = HloOpcodeString(xla::HloOpcode::kSelect);
void ExpectCompareOp(
const xla::HloInstructionProto op, xla::PrimitiveType type,
absl::string_view direction, int parameter0_number, int parameter1_number,
const tsl::protobuf::RepeatedPtrField<xla::HloInstructionProto>& all_ops) {
EXPECT_EQ(op.opcode(), kCompareStr);
const auto& operand0 = all_ops.at(op.operand_ids(0) - 1);
EXPECT_EQ(operand0.opcode(), kParameterStr);
EXPECT_EQ(operand0.parameter_number(), parameter0_number);
EXPECT_EQ(operand0.shape().element_type(), type);
const auto& operand1 = all_ops.at(op.operand_ids(1) - 1);
EXPECT_EQ(operand1.opcode(), kParameterStr);
EXPECT_EQ(operand1.parameter_number(), parameter1_number);
EXPECT_EQ(operand1.shape().element_type(), type);
}
TEST(VariadicComparatorTest, OneOperandOneComparison) {
XlaBuilder builder("test");
XlaComputation comp = CreateScalarComparisonComputation(
"computation", {U16}, {LtTotalOrder}, &builder);
EXPECT_EQ(comp.proto().computations_size(), 1);
EXPECT_EQ(comp.proto().computations(0).program_shape().parameters_size(), 2);
const auto& instr = comp.proto().computations(0).instructions();
const auto& root = instr.at(comp.proto().computations(0).root_id() - 1);
ExpectCompareOp(root, U16, "LT", 0, 1, instr);
}
TEST(VariadicComparatorTest, TwoOperandsOneComparison) {
XlaBuilder builder("test");
XlaComputation comp = CreateScalarComparisonComputation(
"computation", {U16, U32}, {LtTotalOrder, {}}, &builder);
EXPECT_EQ(comp.proto().computations_size(), 1);
EXPECT_EQ(comp.proto().computations(0).program_shape().parameters_size(), 4);
const auto& instr = comp.proto().computations(0).instructions();
const auto& root = instr.at(comp.proto().computations(0).root_id() - 1);
ExpectCompareOp(root, U16, "LT", 0, 1, instr);
}
TEST(VariadicComparatorTest, TwoOperandsTwoComparisons) {
XlaBuilder builder("test");
XlaComputation comp = CreateScalarComparisonComputation(
"computation", {U16, U32}, {LtTotalOrder, LtTotalOrder}, &builder);
EXPECT_EQ(comp.proto().computations_size(), 1);
EXPECT_EQ(comp.proto().computations(0).program_shape().parameters_size(), 4);
const auto& instr = comp.proto().computations(0).instructions();
const auto& root = instr.at(comp.proto().computations(0).root_id() - 1);
EXPECT_EQ(root.opcode(), HloOpcodeString(xla::HloOpcode::kSelect));
ExpectCompareOp(instr.at(root.operand_ids(0) - 1), U16, "EQ", 0, 1, instr);
ExpectCompareOp(instr.at(root.operand_ids(1) - 1), U32, "LT", 2, 3, instr);
ExpectCompareOp(instr.at(root.operand_ids(2) - 1), U16, "LT", 0, 1, instr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/comparators.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/comparators_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
026ad976-bcd8-45e2-a971-4ccda66bdadd | cpp | tensorflow/tensorflow | mkl_quantize_op | tensorflow/core/kernels/mkl/mkl_quantize_op.cc | tensorflow/core/kernels/mkl/mkl_quantize_op_test.cc | #ifdef INTEL_MKL
#define EIGEN_USE_THREADS
#include "dnnl.hpp"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/type_traits.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/util/mkl_util.h"
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
#include "tensorflow/core/platform/mutex.h"
#endif
using dnnl::primitive_attr;
using dnnl::prop_kind;
using dnnl::reorder;
using dnnl::stream;
namespace {
enum {
QUANTIZE_MODE_MIN_COMBINED,
QUANTIZE_MODE_MIN_FIRST,
QUANTIZE_MODE_SCALED,
};
enum {
ROUND_HALF_AWAY_FROM_ZERO,
ROUND_HALF_TO_EVEN,
};
}
namespace tensorflow {
#ifndef ENABLE_ONEDNN_V3
#define SET_MKL_LAYOUT(md) SetMklLayout(&md)
#else
#define SET_MKL_LAYOUT(md) SetMklLayout(md)
#endif
typedef Eigen::ThreadPoolDevice CPUDevice;
struct MklReorderWithScaleFwdParams {
memory::dims src_dims;
memory::desc src_md;
memory::desc dst_md;
#ifdef ENABLE_ONEDNN_V3
memory::desc scale_md;
#endif
string dtypes = string("");
struct PostOpParam {
string name;
std::vector<float> param;
};
PostOpParam post_op_params;
#ifndef ENABLE_ONEDNN_V3
MklReorderWithScaleFwdParams(memory::dims src_dims, memory::desc src_md,
memory::desc dst_md)
: src_dims(src_dims), src_md(src_md), dst_md(dst_md) {}
#else
MklReorderWithScaleFwdParams(memory::dims src_dims, memory::desc src_md,
memory::desc dst_md, memory::desc scale_md)
: src_dims(src_dims),
src_md(src_md),
dst_md(dst_md),
scale_md(scale_md) {}
#endif
};
class MklReorderWithScalePrimitive : public MklPrimitive {
public:
explicit MklReorderWithScalePrimitive(
const MklReorderWithScaleFwdParams& fwdParams)
: MklPrimitive(engine(engine::kind::cpu, 0)) {
Setup(fwdParams);
}
~MklReorderWithScalePrimitive() {}
std::shared_ptr<primitive> GetPrimitive() { return context_.reorder_prim; }
void Execute(void* src_data, void* dst_data,
#ifdef ENABLE_ONEDNN_V3
void* scale_data,
#endif
std::shared_ptr<stream> reorder_stream) {
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex_lock lock(primitive_execution_mu_);
#endif
#if !defined(ENABLE_ONEDNN_OPENMP) && !defined(ENABLE_ONEDNN_V3)
context_.src_mem->set_data_handle(src_data, *reorder_stream);
context_.dst_mem->set_data_handle(dst_data, *reorder_stream);
#else
context_.src_mem->set_data_handle(src_data);
context_.dst_mem->set_data_handle(dst_data);
#endif
#ifdef ENABLE_ONEDNN_V3
context_.scale_mem->set_data_handle(scale_data);
#endif
context_.reorder_prim->execute(*reorder_stream, context_.prim_args);
context_.src_mem->set_data_handle(DummyData);
context_.dst_mem->set_data_handle(DummyData);
#ifdef ENABLE_ONEDNN_V3
context_.scale_mem->set_data_handle(DummyData);
#endif
}
private:
struct ReorderContext {
std::shared_ptr<dnnl::memory> src_mem;
std::shared_ptr<dnnl::memory> dst_mem;
#ifdef ENABLE_ONEDNN_V3
std::shared_ptr<dnnl::memory> scale_mem;
#endif
std::shared_ptr<reorder::primitive_desc> reorder_pd;
std::shared_ptr<primitive> reorder_prim;
std::shared_ptr<dnnl::stream> reorder_stream;
std::unordered_map<int, dnnl::memory> prim_args;
ReorderContext()
: src_mem(nullptr),
dst_mem(nullptr),
#ifdef ENABLE_ONEDNN_V3
scale_mem(nullptr),
#endif
reorder_pd(nullptr),
reorder_prim(nullptr) {
}
} context_;
void Setup(const MklReorderWithScaleFwdParams& fwdParams) {
context_.src_mem.reset(
new memory(fwdParams.src_md, cpu_engine_, DummyData));
context_.dst_mem.reset(
new memory(fwdParams.dst_md, cpu_engine_, DummyData));
#ifdef ENABLE_ONEDNN_V3
context_.scale_mem.reset(
new memory(fwdParams.scale_md, cpu_engine_, DummyData));
#endif
dnnl::primitive_attr post_ops_attr;
#ifndef ENABLE_ONEDNN_V3
auto const& post_op_params = fwdParams.post_op_params;
DCHECK(post_op_params.name == "scale");
DCHECK_EQ(post_op_params.param.size(), 1);
std::vector<float> scales;
scales.push_back(post_op_params.param[0]);
post_ops_attr.set_output_scales(0, scales);
#else
post_ops_attr.set_scales_mask(DNNL_ARG_SRC, 0 );
#endif
context_.reorder_pd.reset(
new ReorderPd(cpu_engine_, context_.src_mem->get_desc(), cpu_engine_,
context_.dst_mem->get_desc(), post_ops_attr));
context_.reorder_prim.reset(new reorder(*context_.reorder_pd));
context_.prim_args.insert({DNNL_ARG_FROM, *context_.src_mem});
context_.prim_args.insert({DNNL_ARG_TO, *context_.dst_mem});
#ifdef ENABLE_ONEDNN_V3
context_.prim_args.insert(
{DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC, *context_.scale_mem});
#endif
}
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex primitive_execution_mu_;
#endif
};
template <typename T>
class MklReorderWithScalePrimitiveFactory : public MklPrimitiveFactory<T> {
public:
static MklReorderWithScalePrimitive* Get(
const memory* from, const memory* to,
const MklReorderWithScaleFwdParams& fwdParams) {
auto reorderPrim = static_cast<MklReorderWithScalePrimitive*>(
MklReorderWithScalePrimitiveFactory<T>::GetInstance().GetReorder(
from, to, fwdParams));
if (reorderPrim == nullptr) {
reorderPrim = new MklReorderWithScalePrimitive(fwdParams);
MklReorderWithScalePrimitiveFactory<T>::GetInstance().SetReorder(
from, to, reorderPrim, fwdParams);
}
return reorderPrim;
}
static MklReorderWithScalePrimitiveFactory& GetInstance() {
static MklReorderWithScalePrimitiveFactory instance_;
return instance_;
}
private:
MklReorderWithScalePrimitiveFactory() {}
~MklReorderWithScalePrimitiveFactory() {}
static string CreateKey(const memory* from, const memory* to,
const MklReorderWithScaleFwdParams& fwdParams) {
FactoryKeyCreator key_creator;
key_creator.AddAsKey(MklReorderPrimitiveFactory<T>::CreateKey(from, to));
if (fwdParams.post_op_params.name == "scale") {
DCHECK_EQ(fwdParams.post_op_params.param.size(), 1);
key_creator.AddAsKey(fwdParams.post_op_params.name);
key_creator.AddAsKey(fwdParams.post_op_params.param[0]);
} else {
return string("not_a_key");
}
return key_creator.GetKey();
}
MklPrimitive* GetReorder(const memory* from, const memory* to,
const MklReorderWithScaleFwdParams& fwdParams) {
string key = CreateKey(from, to, fwdParams);
return this->GetOp(key);
}
void SetReorder(const memory* from, const memory* to, MklPrimitive* op,
const MklReorderWithScaleFwdParams& fwdParams) {
string key = CreateKey(from, to, fwdParams);
this->SetOp(key, op);
}
};
template <typename Device, typename T, typename S, bool native_format = false>
class MklQuantizeV2Op : public OpKernel {
public:
explicit MklQuantizeV2Op(OpKernelConstruction* ctx) : OpKernel(ctx) {
string mode_string;
OP_REQUIRES_OK(ctx, ctx->GetAttr("mode", &mode_string));
OP_REQUIRES(ctx,
(mode_string == "MIN_COMBINED" || mode_string == "MIN_FIRST" ||
mode_string == "SCALED"),
absl::InvalidArgumentError(
absl::StrCat("Mode string must be 'MIN_COMBINED',"
" 'MIN_FIRST', or 'SCALED', is '" +
mode_string + "'")));
if (mode_string == "MIN_COMBINED") {
mode_ = QUANTIZE_MODE_MIN_COMBINED;
} else if (mode_string == "MIN_FIRST") {
mode_ = QUANTIZE_MODE_MIN_FIRST;
} else if (mode_string == "SCALED") {
mode_ = QUANTIZE_MODE_SCALED;
}
string round_mode_string;
OP_REQUIRES_OK(ctx, ctx->GetAttr("round_mode", &round_mode_string));
OP_REQUIRES(
ctx,
(round_mode_string == "HALF_AWAY_FROM_ZERO" ||
round_mode_string == "HALF_TO_EVEN"),
absl::InvalidArgumentError(absl::StrCat("Round mode string must be "
"'HALF_AWAY_FROM_ZERO' or "
"'HALF_TO_EVEN', is '" +
round_mode_string + "'")));
if (round_mode_string == "HALF_AWAY_FROM_ZERO") {
round_mode_ = ROUND_HALF_AWAY_FROM_ZERO;
} else if (round_mode_string == "HALF_TO_EVEN") {
OP_REQUIRES(ctx, mode_string == "SCALED",
absl::InvalidArgumentError(
absl::StrCat("Round mode 'HALF_TO_EVEN' "
"only supported for mode 'SCALED', "
"but mode is '" +
mode_string + "'.")));
round_mode_ = ROUND_HALF_TO_EVEN;
}
OP_REQUIRES_OK(ctx, ctx->GetAttr("narrow_range", &narrow_range_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &axis_));
OP_REQUIRES_OK(
ctx, ctx->GetAttr("ensure_minimum_range", &ensure_minimum_range_));
}
void ComputeScalar(OpKernelContext* ctx, float min_range, float max_range) {
OP_REQUIRES(ctx, (mode_ == QUANTIZE_MODE_MIN_FIRST),
absl::InvalidArgumentError(
"Scalar calculation in MKL is supported only for"
"MIN_FIRST mode for now."));
const Tensor& min_tensor = ctx->input(1);
const Tensor& max_tensor = ctx->input(2);
OP_REQUIRES(
ctx, TensorShapeUtils::IsScalar(min_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`min_input` must be rank 0 but is rank ", min_tensor.dims())));
OP_REQUIRES(
ctx, TensorShapeUtils::IsScalar(max_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`max_input` must be rank 0 but is rank ", max_tensor.dims())));
auto cpu_engine = engine(engine::kind::cpu, 0);
const unsigned int src_idx = 0;
const Tensor& src_tensor = MklGetInput(ctx, src_idx);
MklDnnShape output_mkl_shape;
output_mkl_shape.SetMklTensor(false);
Tensor* output_tensor = nullptr;
AllocateOutputSetMklShape(ctx, 0, &output_tensor, src_tensor.shape(),
output_mkl_shape, native_format);
TensorShape min_tf_shape = {};
MklDnnShape min_mkl_shape;
min_mkl_shape.SetMklTensor(false);
Tensor* output_min_tensor = nullptr;
AllocateOutputSetMklShape(ctx, 1, &output_min_tensor, min_tf_shape,
min_mkl_shape, native_format);
TensorShape max_tf_shape = {};
MklDnnShape max_mkl_shape;
max_mkl_shape.SetMklTensor(false);
Tensor* output_max_tensor = nullptr;
AllocateOutputSetMklShape(ctx, 2, &output_max_tensor, max_tf_shape,
max_mkl_shape, native_format);
float scale_factor = 0;
const int number_of_bits = sizeof(T) * 8;
const int64 number_of_steps = static_cast<int64_t>(1) << number_of_bits;
scale_factor = (number_of_steps - 1.0) / (max_range - min_range);
float* src_data = const_cast<float*>(src_tensor.flat<float>().data());
T* out_data = output_tensor->flat<T>().data();
out_data[0] = (src_data[0] - min_range) * scale_factor;
output_min_tensor->scalar<float>()() = min_range;
output_max_tensor->scalar<float>()() = max_range;
return;
}
void Compute(OpKernelContext* ctx) override {
const unsigned int src_idx = 0;
const float input_min_range = ctx->input(1).scalar<float>()();
const float input_max_range = ctx->input(2).scalar<float>()();
float min_range = std::min(0.0f, input_min_range);
float max_range;
OP_REQUIRES(ctx, (input_max_range >= input_min_range),
absl::InvalidArgumentError(
"input_max_range must be larger than input_min_range."));
const float epsilon = std::max(1.0f, std::max(fabsf(input_min_range),
fabsf(input_max_range))) *
ensure_minimum_range_;
max_range = std::max(input_max_range, min_range + epsilon);
max_range = std::max(0.0f, max_range);
auto cpu_engine = engine(engine::kind::cpu, 0);
const Tensor& src_tensor = MklGetInput(ctx, src_idx);
MklDnnShape src_mkl_shape;
GetMklShape(ctx, src_idx, &src_mkl_shape, native_format);
auto src_tf_shape = src_mkl_shape.IsMklTensor() ? src_mkl_shape.GetTfShape()
: src_tensor.shape();
auto src_dims = src_mkl_shape.IsMklTensor()
? src_mkl_shape.GetSizesAsMklDnnDims()
: TFShapeToMklDnnDims(src_tensor.shape());
auto output_dims = src_dims;
memory::format_tag dst_layout_type;
switch (src_tf_shape.dims()) {
case 0:
ComputeScalar(ctx, min_range, max_range);
return;
case 1:
dst_layout_type = memory::format_tag::x;
break;
case 2:
dst_layout_type = memory::format_tag::nc;
break;
case 3:
dst_layout_type = memory::format_tag::tnc;
break;
case 4:
dst_layout_type = memory::format_tag::nhwc;
break;
case 5:
dst_layout_type = memory::format_tag::ndhwc;
break;
default:
OP_REQUIRES_OK(ctx,
absl::AbortedError("Input dims must be <= 5 and >= 1"));
return;
}
MklDnnData<S> src(&cpu_engine);
MklDnnData<T> dst(&cpu_engine);
#ifdef ENABLE_ONEDNN_V3
MklDnnData<float> scale(&cpu_engine);
#endif
auto src_md =
src_mkl_shape.IsMklTensor()
? src_mkl_shape.GetMklLayout()
: memory::desc(src_dims, MklDnnType<S>(), dst_layout_type);
src.SetUsrMem(src_md, &src_tensor);
memory::desc dst_md =
memory::desc(src_dims, MklDnnType<T>(), dst_layout_type);
MklDnnShape output_mkl_shape;
TensorShape output_tf_shape;
if (src_mkl_shape.IsMklTensor()) {
output_mkl_shape.SetMklTensor(true);
output_mkl_shape.SET_MKL_LAYOUT(dst_md);
output_mkl_shape.SetElemType(MklDnnType<T>());
output_mkl_shape.SetTfLayout(src_mkl_shape.GetDimension(),
src_mkl_shape.GetSizesAsMklDnnDims(),
src_mkl_shape.GetTfDataFormat());
output_tf_shape.AddDim(dst_md.get_size() / sizeof(T));
} else {
output_mkl_shape.SetMklTensor(false);
output_tf_shape = MklDnnDimsToTFShape(output_dims);
}
Tensor* output_tensor = nullptr;
AllocateOutputSetMklShape(ctx, 0, &output_tensor, output_tf_shape,
output_mkl_shape, native_format);
dst.SetUsrMem(dst_md, output_tensor);
TensorShape min_tf_shape = {};
MklDnnShape min_mkl_shape;
min_mkl_shape.SetMklTensor(false);
Tensor* output_min_tensor = nullptr;
AllocateOutputSetMklShape(ctx, 1, &output_min_tensor, min_tf_shape,
min_mkl_shape, native_format);
TensorShape max_tf_shape = {};
MklDnnShape max_mkl_shape;
max_mkl_shape.SetMklTensor(false);
Tensor* output_max_tensor = nullptr;
AllocateOutputSetMklShape(ctx, 2, &output_max_tensor, max_tf_shape,
max_mkl_shape, native_format);
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(ctx);
tsl::OneDnnThreadPool eigen_tp(eigen_interface,
ThreadPoolUseCallerThread());
float scale_factor = 0;
if (mode_ == QUANTIZE_MODE_SCALED) {
const int num_bits = sizeof(T) * 8;
const float max_abs = std::max(std::abs(min_range), std::abs(max_range));
const bool is_signed = std::is_same<T, qint8>() ||
std::is_same<T, qint16>() ||
std::is_same<T, qint32>();
float target_range;
if (is_signed) {
max_range = max_abs;
min_range = -max_abs;
target_range = static_cast<float>((uint64_t{1} << num_bits) - 1) / 2.;
} else {
max_range = max_abs;
min_range = 0.0;
target_range = static_cast<float>((uint64_t{1} << num_bits) - 1);
}
scale_factor = target_range / max_abs;
#ifdef ENABLE_ONEDNN_V3
auto scale_md =
memory::desc({1}, MklDnnType<float>(), memory::format_tag::x);
MklReorderWithScaleFwdParams fwdParams(src_dims, src_md, dst_md,
scale_md);
Tensor scale_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DT_FLOAT, {1}, &scale_tensor));
scale_tensor.flat<float>()(0) = scale_factor;
scale.SetUsrMem(scale_md, &scale_tensor);
#else
MklReorderWithScaleFwdParams fwdParams(src_dims, src_md, dst_md);
#endif
fwdParams.dtypes.append(typeid(S).name());
fwdParams.dtypes.append(typeid(T).name());
fwdParams.post_op_params.name = "scale";
fwdParams.post_op_params.param.push_back(scale_factor);
MklReorderWithScalePrimitive* reorder_prim =
MklReorderWithScalePrimitiveFactory<T>::Get(
src.GetUsrMem(), dst.GetUsrMem(), fwdParams);
std::shared_ptr<stream> cpu_stream;
cpu_stream.reset(CreateStream(&eigen_tp, reorder_prim->GetEngine()));
reorder_prim->Execute(src.GetUsrMemDataHandle(),
dst.GetUsrMemDataHandle(),
#ifdef ENABLE_ONEDNN_V3
scale.GetUsrMemDataHandle(),
#endif
cpu_stream);
} else if (mode_ == QUANTIZE_MODE_MIN_FIRST) {
using namespace dnnl;
std::shared_ptr<stream> cpu_stream;
cpu_stream.reset(CreateStream(&eigen_tp, cpu_engine));
auto shift = static_cast<S>(-min_range);
memory::dims shift_dims(src_tf_shape.dims(), 1);
auto shift_md =
memory::desc(shift_dims, MklDnnType<S>(), dst_layout_type);
memory shift_mem(shift_md, cpu_engine, (void*)(&shift));
primitive_attr attr;
std::vector<float> src_0_scale{255.0f / (max_range - min_range)};
std::vector<float> src_1_scale{255.0f / (max_range - min_range)};
#ifdef ENABLE_ONEDNN_V3
attr.set_scales_mask(DNNL_ARG_SRC_0, 0);
attr.set_scales_mask(DNNL_ARG_SRC_1, 0);
auto binary_pd = binary::primitive_desc(cpu_engine, algorithm::binary_add,
src_md, shift_md, dst_md, attr);
#else
attr.set_scales(DNNL_ARG_SRC_0, 0, src_0_scale);
attr.set_scales(DNNL_ARG_SRC_1, 0, src_1_scale);
auto binary_d =
binary::desc(algorithm::binary_add, src_md, shift_md, dst_md);
auto binary_pd = binary::primitive_desc(binary_d, attr, cpu_engine);
#endif
auto binary_prim = binary(binary_pd);
auto src_0_scale_mem =
memory({{1}, MklDnnType<float>(), memory::format_tag::x}, cpu_engine,
src_0_scale.data());
auto src_1_scale_mem =
memory({{1}, MklDnnType<float>(), memory::format_tag::x}, cpu_engine,
src_1_scale.data());
std::unordered_map<int, memory> net_args{
{DNNL_ARG_SRC_0, *src.GetUsrMem()},
{DNNL_ARG_SRC_1, shift_mem},
{DNNL_ARG_DST, *dst.GetUsrMem()},
#ifdef ENABLE_ONEDNN_V3
{DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC_0, src_0_scale_mem},
{ DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC_1,
src_1_scale_mem }
#endif
};
binary_prim.execute(*cpu_stream, net_args);
} else {
OP_REQUIRES(ctx, false,
absl::UnimplementedError(
"Supported modes are MIN_FIRST and SCALED only."));
}
output_min_tensor->scalar<float>()() = min_range;
output_max_tensor->scalar<float>()() = max_range;
}
private:
float ensure_minimum_range_;
int mode_;
int round_mode_;
int axis_;
bool narrow_range_;
};
#define REGISTER_QUANTIZE(src_type, dst_type) \
REGISTER_KERNEL_BUILDER( \
Name("_MklQuantizeV2") \
.Device(DEVICE_CPU) \
.TypeConstraint<dst_type>("T") \
.Label(mkl_op_registry::kMklQuantizedOpLabel), \
MklQuantizeV2Op<CPUDevice, dst_type, src_type, true>)
REGISTER_QUANTIZE(float, qint8);
REGISTER_QUANTIZE(float, quint8);
#undef SET_MKL_LAYOUT
}
#endif | #if defined(INTEL_MKL)
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
class MklQuantizeV2OpTest : public OpsTestBase {};
TEST_F(MklQuantizeV2OpTest, small_uint8) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "_MklQuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("mode", "SCALED")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({8}),
{0.0, 1.0, 1.25, 1.75, 127.0, 255.0, 500.0, 2.0});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({8}));
Tensor expected_min(allocator(), DT_FLOAT, TensorShape({}));
Tensor expected_max(allocator(), DT_FLOAT, TensorShape({}));
test::FillValues<quint8>(&expected, {0, 1, 1, 2, 127, 255, 255, 2});
test::FillValues<float>(&expected_min, {0.0});
test::FillValues<float>(&expected_max, {255.0});
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
test::ExpectTensorEqual<float>(expected_min, *GetOutput(1));
test::ExpectTensorEqual<float>(expected_max, *GetOutput(2));
}
TEST_F(MklQuantizeV2OpTest, small_int8) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "_MklQuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<qint8>::v())
.Attr("mode", "SCALED")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({8}), {0.0, -1.0, 1.25, -1.75, -24.5,
-255.0, -80.315, 256.0});
AddInputFromArray<float>(TensorShape({}), {-50.0});
AddInputFromArray<float>(TensorShape({}), {127.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT8, TensorShape({8}));
Tensor expected_min(allocator(), DT_FLOAT, TensorShape({}));
Tensor expected_max(allocator(), DT_FLOAT, TensorShape({}));
test::FillValues<qint8>(&expected, {0, -1, 1, -2, -25, -128, -81, 127});
test::ExpectTensorEqual<qint8>(expected, *GetOutput(0));
test::FillValues<float>(&expected_min, {-127.0});
test::FillValues<float>(&expected_max, {127.0});
test::ExpectTensorEqual<float>(expected_min, *GetOutput(1));
test::ExpectTensorEqual<float>(expected_max, *GetOutput(2));
}
TEST_F(MklQuantizeV2OpTest, small_minfirst) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "_MklQuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("mode", "MIN_FIRST")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({8}),
{1.0, 1.25, 1.75, 2, 3.15, 127.0, 255.0, 500.0});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({8}));
test::FillValues<quint8>(&expected, {1, 1, 2, 2, 3, 127, 255, 255});
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
const float output_min = GetOutput(1)->scalar<float>()();
const float output_max = GetOutput(2)->scalar<float>()();
EXPECT_NEAR(0.0f, output_min, 1e-5f);
EXPECT_NEAR(255.0f, output_max, 1e-5f);
}
TEST_F(MklQuantizeV2OpTest, small_minfirst_uint) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "_MklQuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("mode", "MIN_FIRST")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({8}),
{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8});
AddInputFromArray<float>(TensorShape({}), {0.1});
AddInputFromArray<float>(TensorShape({}), {0.8});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({8}));
test::FillValues<quint8>(&expected, {32, 64, 96, 128, 159, 191, 223, 255});
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
const float output_min = GetOutput(1)->scalar<float>()();
const float output_max = GetOutput(2)->scalar<float>()();
EXPECT_NEAR(0.0f, output_min, 1e-5f);
EXPECT_NEAR(0.8f, output_max, 1e-5f);
}
TEST_F(MklQuantizeV2OpTest, small_minfirst_int) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "_MklQuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("mode", "MIN_FIRST")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({8}),
{-0.1, -0.2, -0.3, -0.4, -0.5, -0.6, -0.7, -0.8});
AddInputFromArray<float>(TensorShape({}), {-0.8});
AddInputFromArray<float>(TensorShape({}), {-0.1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({8}));
test::FillValues<quint8>(&expected, {223, 191, 159, 128, 96, 64, 32, 0});
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
const float output_min = GetOutput(1)->scalar<float>()();
const float output_max = GetOutput(2)->scalar<float>()();
EXPECT_NEAR(-0.8f, output_min, 1e-5f);
EXPECT_NEAR(0.0f, output_max, 1e-5f);
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_quantize_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_quantize_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d229d95d-83c7-4da2-964e-3b50e74751e5 | cpp | tensorflow/tensorflow | canonicalize_all_gather_for_cse | third_party/xla/xla/service/spmd/canonicalize_all_gather_for_cse.cc | third_party/xla/xla/service/spmd/canonicalize_all_gather_for_cse_test.cc | #include "xla/service/spmd/canonicalize_all_gather_for_cse.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<bool> CanonicalizeAllGatherForCSE::RunOnComputation(
HloComputation* comp) {
bool changed = false;
std::vector<HloInstruction*> ordered_hlos = comp->MakeInstructionPostOrder();
for (HloInstruction* hlo : ordered_hlos) {
HloAllGatherInstruction* ag = DynCast<HloAllGatherInstruction>(hlo);
if (!ag || ag->operand_count() > 1) {
continue;
}
HloInstruction* real_data = ag->mutable_operand(0);
while (real_data->ReshapeMerelyInsertsOrDeletes1SizedDimensions()
.has_value()) {
real_data = real_data->mutable_operand(0);
}
if (real_data == ag->operand(0)) {
continue;
}
const int64_t ag_dim = ag->all_gather_dimension();
int64_t new_ag_dim;
if (auto dims = ShapeUtil::ReshapeLeavesDimensionsUnmodified(
ag->operand(0)->shape(), real_data->shape(), {ag_dim})) {
new_ag_dim = dims->at(0);
} else {
int64_t major_elements =
Product(absl::MakeConstSpan(ag->operand(0)->shape().dimensions())
.subspan(0, ag_dim));
new_ag_dim = 0;
while (major_elements > 1) {
major_elements /= real_data->shape().dimensions(new_ag_dim++);
}
}
if (new_ag_dim == real_data->shape().rank()) {
continue;
}
const int64_t all_gather_participants =
ShapeUtil::ElementsIn(ag->shape()) /
ShapeUtil::ElementsIn(ag->operand(0)->shape());
Shape new_ag_shape = real_data->shape();
new_ag_shape.set_dimensions(
new_ag_dim,
all_gather_participants * new_ag_shape.dimensions(new_ag_dim));
std::optional<int64_t> new_channel_id =
ag->channel_id() ? std::make_optional(this->NextChannelId())
: std::nullopt;
HloInstruction* new_ag =
comp->AddInstruction(HloInstruction::CreateAllGather(
new_ag_shape, {real_data}, new_ag_dim,
ag->device_list(), ag->constrain_layout(), new_channel_id,
ag->use_global_device_ids()));
ag->SetupDerivedInstruction(new_ag);
HloInstruction* new_formatting = comp->AddInstruction(
HloInstruction::CreateReshape(ag->shape(), new_ag));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(ag, new_formatting));
changed = true;
}
return changed;
}
absl::StatusOr<bool> CanonicalizeAllGatherForCSE::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
next_channel_id_ = hlo_query::NextChannelId(*module);
for (HloComputation* comp : module->computations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool comp_changed, RunOnComputation(comp));
changed |= comp_changed;
}
return changed;
}
} | #include "xla/service/spmd/canonicalize_all_gather_for_cse.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
using ::testing::_;
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
class AllGatherCanonicalizeTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(
hlo_module, GetModuleConfigForTest()));
HloPassPipeline pipeline("all-gather-cse");
pipeline.AddPass<CanonicalizeAllGatherForCSE>();
TF_RETURN_IF_ERROR(pipeline.Run(module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
absl::Status RunPassOnModule(HloModule* module,
int64_t distance_threshold = 100) {
HloPassPipeline pipeline("all-gather-cse");
pipeline.AddPass<CanonicalizeAllGatherForCSE>();
TF_RETURN_IF_ERROR(pipeline.Run(module).status());
return absl::OkStatus();
}
};
TEST_F(AllGatherCanonicalizeTest, SimpleReshape) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[1,8]{1,0} reshape(param0)
ROOT ag = s32[2,8]{1,0} all-gather(resh), replica_groups={{0,1}},
dimensions={0}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape,
AllOf(op::Reshape(op::AllGather(_)), op::Shape("s32[2,8]")));
}
TEST_F(AllGatherCanonicalizeTest, MultipleDegenerateReshapes) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[1,8]{1,0} reshape(param0)
resh2 = s32[1,8,1,1]{3,2,1,0} reshape(resh)
ROOT ag = s32[2,8,1,1]{3,2,1,0} all-gather(resh2), replica_groups={{0,1}},
dimensions={0}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape, op::Reshape(op::AllGather(op::Parameter())));
}
TEST_F(AllGatherCanonicalizeTest, MultipleDegenerateReshapes2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[8,1,1]{2,1,0} reshape(param0)
resh2 = s32[1,8,1,1]{3,2,1,0} reshape(resh)
ROOT ag = s32[2,8,1,1]{3,2,1,0} all-gather(resh2), replica_groups={{0,1}},
dimensions={0}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape, op::Reshape(op::AllGather(op::Parameter())));
}
TEST_F(AllGatherCanonicalizeTest, MultipleDegenerateReshapesNoDim0) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[8,1,1]{2,1,0} reshape(param0)
resh2 = s32[1,8,1,1]{3,2,1,0} reshape(resh)
ROOT ag = s32[1,16,1,1]{3,2,1,0} all-gather(resh2), replica_groups={{0,1}},
dimensions={1}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape, op::Reshape(op::AllGather(op::Parameter())));
}
TEST_F(AllGatherCanonicalizeTest, NonDegenerateReshape) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[8,1,1]{2,1,0} reshape(param0)
resh2 = s32[1,4,2,1,1]{4,3,2,1,0} reshape(resh)
ROOT ag = s32[2,4,2,1,1]{4,3,2,1,0} all-gather(resh2), replica_groups={{0,1}},
dimensions={0}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape, AllOf(op::AllGather(op::Reshape(op::Reshape(_))),
op::Shape("s32[2,4,2,1,1]")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/canonicalize_all_gather_for_cse.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/canonicalize_all_gather_for_cse_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
541c6011-78dc-41c7-a579-b06864d5f305 | cpp | tensorflow/tensorflow | bfloat16_type | tensorflow/compiler/mlir/quantization/stablehlo/utils/bfloat16_type.cc | tensorflow/compiler/mlir/quantization/stablehlo/utils/bfloat16_type_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/utils/bfloat16_type.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
namespace mlir::quant::stablehlo {
bool IsLargeFloatType(Type type) {
type = getElementTypeOrSelf(type);
return isa<FloatType>(type) && type.getIntOrFloatBitWidth() > 16;
}
Type ToBfloat16Type(Type type) {
if (auto shaped = mlir::dyn_cast<ShapedType>(type)) {
const Type elem = shaped.getElementType();
if (IsLargeFloatType(elem)) {
return shaped.clone(BFloat16Type::get(type.getContext()));
}
} else if (IsLargeFloatType(type)) {
return BFloat16Type::get(type.getContext());
}
return type;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/utils/bfloat16_type.h"
#include <memory>
#include <gtest/gtest.h>
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
namespace mlir::quant::stablehlo {
namespace {
std::unique_ptr<MLIRContext> CreateContext() {
auto context = std::make_unique<MLIRContext>();
DialectRegistry mlir_registry;
RegisterCommonToolingDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
return context;
}
TEST(IsLargeFloatTypeTest, scalars) {
auto context = CreateContext();
EXPECT_FALSE(IsLargeFloatType(Float8E4M3FNType::get(context.get())));
EXPECT_FALSE(IsLargeFloatType(Float16Type::get(context.get())));
EXPECT_FALSE(IsLargeFloatType(BFloat16Type::get(context.get())));
EXPECT_TRUE(IsLargeFloatType(Float32Type::get(context.get())));
EXPECT_TRUE(IsLargeFloatType(Float64Type::get(context.get())));
EXPECT_TRUE(IsLargeFloatType(Float80Type::get(context.get())));
EXPECT_FALSE(IsLargeFloatType(IntegerType::get(context.get(), 8)));
EXPECT_FALSE(IsLargeFloatType(IntegerType::get(context.get(), 16)));
EXPECT_FALSE(IsLargeFloatType(IntegerType::get(context.get(), 32)));
}
TEST(IsLargeFloatTypeTest, tensors) {
auto context = CreateContext();
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, Float8E4M3FNType::get(context.get()))));
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, Float16Type::get(context.get()))));
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, BFloat16Type::get(context.get()))));
EXPECT_TRUE(IsLargeFloatType(
RankedTensorType::get({2, 2}, Float32Type::get(context.get()))));
EXPECT_TRUE(IsLargeFloatType(
RankedTensorType::get({2, 2}, Float64Type::get(context.get()))));
EXPECT_TRUE(IsLargeFloatType(
RankedTensorType::get({2, 2}, Float80Type::get(context.get()))));
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 8))));
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 16))));
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 32))));
}
TEST(ToBfloat16TypeTest, scalars) {
auto context = CreateContext();
EXPECT_EQ(ToBfloat16Type(Float8E4M3FNType::get(context.get())),
Float8E4M3FNType::get(context.get()));
EXPECT_EQ(ToBfloat16Type(Float16Type::get(context.get())),
Float16Type::get(context.get()));
EXPECT_EQ(ToBfloat16Type(BFloat16Type::get(context.get())),
BFloat16Type::get(context.get()));
EXPECT_EQ(ToBfloat16Type(Float32Type::get(context.get())),
BFloat16Type::get(context.get()));
EXPECT_EQ(ToBfloat16Type(Float64Type::get(context.get())),
BFloat16Type::get(context.get()));
EXPECT_EQ(ToBfloat16Type(Float80Type::get(context.get())),
BFloat16Type::get(context.get()));
EXPECT_EQ(ToBfloat16Type(IntegerType::get(context.get(), 8)),
IntegerType::get(context.get(), 8));
EXPECT_EQ(ToBfloat16Type(IntegerType::get(context.get(), 16)),
IntegerType::get(context.get(), 16));
EXPECT_EQ(ToBfloat16Type(IntegerType::get(context.get(), 32)),
IntegerType::get(context.get(), 32));
}
TEST(ToBfloat16TypeTest, tensors) {
auto context = CreateContext();
EXPECT_EQ(
ToBfloat16Type(
RankedTensorType::get({2, 2}, Float8E4M3FNType::get(context.get()))),
RankedTensorType::get({2, 2}, Float8E4M3FNType::get(context.get())));
EXPECT_EQ(ToBfloat16Type(
RankedTensorType::get({2, 2}, Float16Type::get(context.get()))),
RankedTensorType::get({2, 2}, Float16Type::get(context.get())));
EXPECT_EQ(ToBfloat16Type(RankedTensorType::get(
{2, 2}, BFloat16Type::get(context.get()))),
RankedTensorType::get({2, 2}, BFloat16Type::get(context.get())));
EXPECT_EQ(ToBfloat16Type(
RankedTensorType::get({2, 2}, Float32Type::get(context.get()))),
RankedTensorType::get({2, 2}, BFloat16Type::get(context.get())));
EXPECT_EQ(ToBfloat16Type(
RankedTensorType::get({2, 2}, Float64Type::get(context.get()))),
RankedTensorType::get({2, 2}, BFloat16Type::get(context.get())));
EXPECT_EQ(ToBfloat16Type(
RankedTensorType::get({2, 2}, Float80Type::get(context.get()))),
RankedTensorType::get({2, 2}, BFloat16Type::get(context.get())));
EXPECT_EQ(ToBfloat16Type(RankedTensorType::get(
{2, 2}, IntegerType::get(context.get(), 8))),
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 8)));
EXPECT_EQ(ToBfloat16Type(RankedTensorType::get(
{2, 2}, IntegerType::get(context.get(), 16))),
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 16)));
EXPECT_EQ(ToBfloat16Type(RankedTensorType::get(
{2, 2}, IntegerType::get(context.get(), 32))),
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 32)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/bfloat16_type.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/bfloat16_type_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eebdf9b7-6617-46ed-9659-7b074107777a | cpp | google/quiche | hpack_whole_entry_buffer | quiche/http2/hpack/decoder/hpack_whole_entry_buffer.cc | quiche/http2/hpack/decoder/hpack_whole_entry_buffer_test.cc | #include "quiche/http2/hpack/decoder/hpack_whole_entry_buffer.h"
#include "absl/strings/str_cat.h"
#include "quiche/common/platform/api/quiche_flag_utils.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_text_utils.h"
namespace http2 {
HpackWholeEntryBuffer::HpackWholeEntryBuffer(HpackWholeEntryListener* listener,
size_t max_string_size_bytes)
: max_string_size_bytes_(max_string_size_bytes) {
set_listener(listener);
}
HpackWholeEntryBuffer::~HpackWholeEntryBuffer() = default;
void HpackWholeEntryBuffer::set_listener(HpackWholeEntryListener* listener) {
QUICHE_CHECK(listener);
listener_ = listener;
}
void HpackWholeEntryBuffer::set_max_string_size_bytes(
size_t max_string_size_bytes) {
max_string_size_bytes_ = max_string_size_bytes;
}
void HpackWholeEntryBuffer::BufferStringsIfUnbuffered() {
name_.BufferStringIfUnbuffered();
value_.BufferStringIfUnbuffered();
}
void HpackWholeEntryBuffer::OnIndexedHeader(size_t index) {
QUICHE_DVLOG(2) << "HpackWholeEntryBuffer::OnIndexedHeader: index=" << index;
listener_->OnIndexedHeader(index);
}
void HpackWholeEntryBuffer::OnStartLiteralHeader(HpackEntryType entry_type,
size_t maybe_name_index) {
QUICHE_DVLOG(2) << "HpackWholeEntryBuffer::OnStartLiteralHeader: entry_type="
<< entry_type << ", maybe_name_index=" << maybe_name_index;
entry_type_ = entry_type;
maybe_name_index_ = maybe_name_index;
}
void HpackWholeEntryBuffer::OnNameStart(bool huffman_encoded, size_t len) {
QUICHE_DVLOG(2) << "HpackWholeEntryBuffer::OnNameStart: huffman_encoded="
<< (huffman_encoded ? "true" : "false") << ", len=" << len;
QUICHE_DCHECK_EQ(maybe_name_index_, 0u);
if (!error_detected_) {
if (len > max_string_size_bytes_) {
QUICHE_DVLOG(1) << "Name length (" << len
<< ") is longer than permitted ("
<< max_string_size_bytes_ << ")";
ReportError(HpackDecodingError::kNameTooLong);
QUICHE_CODE_COUNT_N(decompress_failure_3, 18, 23);
return;
}
name_.OnStart(huffman_encoded, len);
}
}
void HpackWholeEntryBuffer::OnNameData(const char* data, size_t len) {
QUICHE_DVLOG(2) << "HpackWholeEntryBuffer::OnNameData: len=" << len
<< " data:\n"
<< quiche::QuicheTextUtils::HexDump(
absl::string_view(data, len));
QUICHE_DCHECK_EQ(maybe_name_index_, 0u);
if (!error_detected_ && !name_.OnData(data, len)) {
ReportError(HpackDecodingError::kNameHuffmanError);
QUICHE_CODE_COUNT_N(decompress_failure_3, 19, 23);
}
}
void HpackWholeEntryBuffer::OnNameEnd() {
QUICHE_DVLOG(2) << "HpackWholeEntryBuffer::OnNameEnd";
QUICHE_DCHECK_EQ(maybe_name_index_, 0u);
if (!error_detected_ && !name_.OnEnd()) {
ReportError(HpackDecodingError::kNameHuffmanError);
QUICHE_CODE_COUNT_N(decompress_failure_3, 20, 23);
}
}
void HpackWholeEntryBuffer::OnValueStart(bool huffman_encoded, size_t len) {
QUICHE_DVLOG(2) << "HpackWholeEntryBuffer::OnValueStart: huffman_encoded="
<< (huffman_encoded ? "true" : "false") << ", len=" << len;
if (!error_detected_) {
if (len > max_string_size_bytes_) {
QUICHE_DVLOG(1) << "Value length (" << len << ") of ["
<< name_.GetStringIfComplete()
<< "] is longer than permitted ("
<< max_string_size_bytes_ << ")";
ReportError(HpackDecodingError::kValueTooLong);
QUICHE_CODE_COUNT_N(decompress_failure_3, 21, 23);
return;
}
value_.OnStart(huffman_encoded, len);
}
}
void HpackWholeEntryBuffer::OnValueData(const char* data, size_t len) {
QUICHE_DVLOG(2) << "HpackWholeEntryBuffer::OnValueData: len=" << len
<< " data:\n"
<< quiche::QuicheTextUtils::HexDump(
absl::string_view(data, len));
if (!error_detected_ && !value_.OnData(data, len)) {
ReportError(HpackDecodingError::kValueHuffmanError);
QUICHE_CODE_COUNT_N(decompress_failure_3, 22, 23);
}
}
void HpackWholeEntryBuffer::OnValueEnd() {
QUICHE_DVLOG(2) << "HpackWholeEntryBuffer::OnValueEnd";
if (error_detected_) {
return;
}
if (!value_.OnEnd()) {
ReportError(HpackDecodingError::kValueHuffmanError);
QUICHE_CODE_COUNT_N(decompress_failure_3, 23, 23);
return;
}
if (maybe_name_index_ == 0) {
listener_->OnLiteralNameAndValue(entry_type_, &name_, &value_);
name_.Reset();
} else {
listener_->OnNameIndexAndLiteralValue(entry_type_, maybe_name_index_,
&value_);
}
value_.Reset();
}
void HpackWholeEntryBuffer::OnDynamicTableSizeUpdate(size_t size) {
QUICHE_DVLOG(2) << "HpackWholeEntryBuffer::OnDynamicTableSizeUpdate: size="
<< size;
listener_->OnDynamicTableSizeUpdate(size);
}
void HpackWholeEntryBuffer::ReportError(HpackDecodingError error) {
if (!error_detected_) {
QUICHE_DVLOG(1) << "HpackWholeEntryBuffer::ReportError: "
<< HpackDecodingErrorToString(error);
error_detected_ = true;
listener_->OnHpackDecodeError(error);
listener_ = HpackWholeEntryNoOpListener::NoOpListener();
}
}
} | #include "quiche/http2/hpack/decoder/hpack_whole_entry_buffer.h"
#include "quiche/common/platform/api/quiche_test.h"
using ::testing::_;
using ::testing::AllOf;
using ::testing::InSequence;
using ::testing::Property;
using ::testing::StrictMock;
namespace http2 {
namespace test {
namespace {
constexpr size_t kMaxStringSize = 20;
class MockHpackWholeEntryListener : public HpackWholeEntryListener {
public:
~MockHpackWholeEntryListener() override = default;
MOCK_METHOD(void, OnIndexedHeader, (size_t index), (override));
MOCK_METHOD(void, OnNameIndexAndLiteralValue,
(HpackEntryType entry_type, size_t name_index,
HpackDecoderStringBuffer* value_buffer),
(override));
MOCK_METHOD(void, OnLiteralNameAndValue,
(HpackEntryType entry_type, HpackDecoderStringBuffer* name_buffer,
HpackDecoderStringBuffer* value_buffer),
(override));
MOCK_METHOD(void, OnDynamicTableSizeUpdate, (size_t size), (override));
MOCK_METHOD(void, OnHpackDecodeError, (HpackDecodingError error), (override));
};
class HpackWholeEntryBufferTest : public quiche::test::QuicheTest {
protected:
HpackWholeEntryBufferTest() : entry_buffer_(&listener_, kMaxStringSize) {}
~HpackWholeEntryBufferTest() override = default;
StrictMock<MockHpackWholeEntryListener> listener_;
HpackWholeEntryBuffer entry_buffer_;
};
TEST_F(HpackWholeEntryBufferTest, OnIndexedHeader) {
{
InSequence seq;
EXPECT_CALL(listener_, OnIndexedHeader(17));
entry_buffer_.OnIndexedHeader(17);
}
{
InSequence seq;
EXPECT_CALL(listener_, OnIndexedHeader(62));
entry_buffer_.OnIndexedHeader(62);
}
{
InSequence seq;
EXPECT_CALL(listener_, OnIndexedHeader(62));
entry_buffer_.OnIndexedHeader(62);
}
{
InSequence seq;
EXPECT_CALL(listener_, OnIndexedHeader(128));
entry_buffer_.OnIndexedHeader(128);
}
StrictMock<MockHpackWholeEntryListener> listener2;
entry_buffer_.set_listener(&listener2);
{
InSequence seq;
EXPECT_CALL(listener2, OnIndexedHeader(100));
entry_buffer_.OnIndexedHeader(100);
}
}
TEST_F(HpackWholeEntryBufferTest, OnDynamicTableSizeUpdate) {
{
InSequence seq;
EXPECT_CALL(listener_, OnDynamicTableSizeUpdate(4096));
entry_buffer_.OnDynamicTableSizeUpdate(4096);
}
{
InSequence seq;
EXPECT_CALL(listener_, OnDynamicTableSizeUpdate(0));
entry_buffer_.OnDynamicTableSizeUpdate(0);
}
{
InSequence seq;
EXPECT_CALL(listener_, OnDynamicTableSizeUpdate(1024));
entry_buffer_.OnDynamicTableSizeUpdate(1024);
}
{
InSequence seq;
EXPECT_CALL(listener_, OnDynamicTableSizeUpdate(1024));
entry_buffer_.OnDynamicTableSizeUpdate(1024);
}
StrictMock<MockHpackWholeEntryListener> listener2;
entry_buffer_.set_listener(&listener2);
{
InSequence seq;
EXPECT_CALL(listener2, OnDynamicTableSizeUpdate(0));
entry_buffer_.OnDynamicTableSizeUpdate(0);
}
}
TEST_F(HpackWholeEntryBufferTest, OnNameIndexAndLiteralValue) {
entry_buffer_.OnStartLiteralHeader(HpackEntryType::kNeverIndexedLiteralHeader,
123);
entry_buffer_.OnValueStart(false, 10);
entry_buffer_.OnValueData("some data.", 10);
entry_buffer_.BufferStringsIfUnbuffered();
EXPECT_CALL(
listener_,
OnNameIndexAndLiteralValue(
HpackEntryType::kNeverIndexedLiteralHeader, 123,
AllOf(Property(&HpackDecoderStringBuffer::str, "some data."),
Property(&HpackDecoderStringBuffer::BufferedLength, 10))));
entry_buffer_.OnValueEnd();
}
TEST_F(HpackWholeEntryBufferTest, OnLiteralNameAndValue) {
entry_buffer_.OnStartLiteralHeader(HpackEntryType::kIndexedLiteralHeader, 0);
entry_buffer_.OnNameStart(false, 9);
entry_buffer_.OnNameData("some-", 5);
entry_buffer_.OnNameData("name", 4);
entry_buffer_.OnNameEnd();
entry_buffer_.OnValueStart(false, 12);
entry_buffer_.OnValueData("Header Value", 12);
EXPECT_CALL(
listener_,
OnLiteralNameAndValue(
HpackEntryType::kIndexedLiteralHeader,
AllOf(Property(&HpackDecoderStringBuffer::str, "some-name"),
Property(&HpackDecoderStringBuffer::BufferedLength, 9)),
AllOf(Property(&HpackDecoderStringBuffer::str, "Header Value"),
Property(&HpackDecoderStringBuffer::BufferedLength, 0))));
entry_buffer_.OnValueEnd();
}
TEST_F(HpackWholeEntryBufferTest, NameTooLong) {
entry_buffer_.OnStartLiteralHeader(HpackEntryType::kIndexedLiteralHeader, 0);
EXPECT_CALL(listener_, OnHpackDecodeError(HpackDecodingError::kNameTooLong));
entry_buffer_.OnNameStart(false, kMaxStringSize + 1);
}
TEST_F(HpackWholeEntryBufferTest, ValueTooLong) {
entry_buffer_.OnStartLiteralHeader(HpackEntryType::kIndexedLiteralHeader, 0);
EXPECT_CALL(listener_, OnHpackDecodeError(HpackDecodingError::kValueTooLong));
entry_buffer_.OnNameStart(false, 4);
entry_buffer_.OnNameData("path", 4);
entry_buffer_.OnNameEnd();
entry_buffer_.OnValueStart(false, kMaxStringSize + 1);
}
TEST_F(HpackWholeEntryBufferTest, ValueTooLongWithoutName) {
entry_buffer_.OnStartLiteralHeader(HpackEntryType::kIndexedLiteralHeader, 1);
EXPECT_CALL(listener_, OnHpackDecodeError(HpackDecodingError::kValueTooLong));
entry_buffer_.OnValueStart(false, kMaxStringSize + 1);
}
TEST_F(HpackWholeEntryBufferTest, NameHuffmanError) {
const char data[] = "\xff\xff\xff";
entry_buffer_.OnStartLiteralHeader(HpackEntryType::kUnindexedLiteralHeader,
0);
entry_buffer_.OnNameStart(true, 4);
entry_buffer_.OnNameData(data, 3);
EXPECT_CALL(listener_,
OnHpackDecodeError(HpackDecodingError::kNameHuffmanError));
entry_buffer_.OnNameData(data, 1);
EXPECT_CALL(listener_, OnDynamicTableSizeUpdate(8096)).Times(0);
entry_buffer_.OnDynamicTableSizeUpdate(8096);
}
TEST_F(HpackWholeEntryBufferTest, ValueHuffmanError) {
const char data[] = "\x00\x00\x00";
entry_buffer_.OnStartLiteralHeader(HpackEntryType::kNeverIndexedLiteralHeader,
61);
entry_buffer_.OnValueStart(true, 3);
entry_buffer_.OnValueData(data, 3);
EXPECT_CALL(listener_,
OnHpackDecodeError(HpackDecodingError::kValueHuffmanError));
entry_buffer_.OnValueEnd();
EXPECT_CALL(listener_, OnIndexedHeader(17)).Times(0);
entry_buffer_.OnIndexedHeader(17);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/decoder/hpack_whole_entry_buffer.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/decoder/hpack_whole_entry_buffer_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
95152f6f-c076-41eb-920e-fe5928becd50 | cpp | google/quiche | hpack_decoder_state | quiche/http2/hpack/decoder/hpack_decoder_state.cc | quiche/http2/hpack/decoder/hpack_decoder_state_test.cc | #include "quiche/http2/hpack/decoder/hpack_decoder_state.h"
#include <string>
#include <utility>
#include "quiche/http2/http2_constants.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
namespace {
std::string ExtractString(HpackDecoderStringBuffer* string_buffer) {
if (string_buffer->IsBuffered()) {
return string_buffer->ReleaseString();
} else {
auto result = std::string(string_buffer->str());
string_buffer->Reset();
return result;
}
}
}
HpackDecoderState::HpackDecoderState(HpackDecoderListener* listener)
: listener_(listener),
final_header_table_size_(Http2SettingsInfo::DefaultHeaderTableSize()),
lowest_header_table_size_(final_header_table_size_),
require_dynamic_table_size_update_(false),
allow_dynamic_table_size_update_(true),
saw_dynamic_table_size_update_(false),
error_(HpackDecodingError::kOk) {
QUICHE_CHECK(listener_);
}
HpackDecoderState::~HpackDecoderState() = default;
void HpackDecoderState::ApplyHeaderTableSizeSetting(
uint32_t header_table_size) {
QUICHE_DVLOG(2) << "HpackDecoderState::ApplyHeaderTableSizeSetting("
<< header_table_size << ")";
QUICHE_DCHECK_LE(lowest_header_table_size_, final_header_table_size_);
if (header_table_size < lowest_header_table_size_) {
lowest_header_table_size_ = header_table_size;
}
final_header_table_size_ = header_table_size;
QUICHE_DVLOG(2) << "low water mark: " << lowest_header_table_size_;
QUICHE_DVLOG(2) << "final limit: " << final_header_table_size_;
}
void HpackDecoderState::OnHeaderBlockStart() {
QUICHE_DVLOG(2) << "HpackDecoderState::OnHeaderBlockStart";
QUICHE_DCHECK(error_ == HpackDecodingError::kOk)
<< HpackDecodingErrorToString(error_);
QUICHE_DCHECK_LE(lowest_header_table_size_, final_header_table_size_);
allow_dynamic_table_size_update_ = true;
saw_dynamic_table_size_update_ = false;
require_dynamic_table_size_update_ =
(lowest_header_table_size_ <
decoder_tables_.current_header_table_size() ||
final_header_table_size_ < decoder_tables_.header_table_size_limit());
QUICHE_DVLOG(2) << "HpackDecoderState::OnHeaderListStart "
<< "require_dynamic_table_size_update_="
<< require_dynamic_table_size_update_;
listener_->OnHeaderListStart();
}
void HpackDecoderState::OnIndexedHeader(size_t index) {
QUICHE_DVLOG(2) << "HpackDecoderState::OnIndexedHeader: " << index;
if (error_ != HpackDecodingError::kOk) {
return;
}
if (require_dynamic_table_size_update_) {
ReportError(HpackDecodingError::kMissingDynamicTableSizeUpdate);
return;
}
allow_dynamic_table_size_update_ = false;
const HpackStringPair* entry = decoder_tables_.Lookup(index);
if (entry != nullptr) {
listener_->OnHeader(entry->name, entry->value);
} else {
ReportError(HpackDecodingError::kInvalidIndex);
}
}
void HpackDecoderState::OnNameIndexAndLiteralValue(
HpackEntryType entry_type, size_t name_index,
HpackDecoderStringBuffer* value_buffer) {
QUICHE_DVLOG(2) << "HpackDecoderState::OnNameIndexAndLiteralValue "
<< entry_type << ", " << name_index << ", "
<< value_buffer->str();
if (error_ != HpackDecodingError::kOk) {
return;
}
if (require_dynamic_table_size_update_) {
ReportError(HpackDecodingError::kMissingDynamicTableSizeUpdate);
return;
}
allow_dynamic_table_size_update_ = false;
const HpackStringPair* entry = decoder_tables_.Lookup(name_index);
if (entry != nullptr) {
std::string value(ExtractString(value_buffer));
listener_->OnHeader(entry->name, value);
if (entry_type == HpackEntryType::kIndexedLiteralHeader) {
decoder_tables_.Insert(entry->name, std::move(value));
}
} else {
ReportError(HpackDecodingError::kInvalidNameIndex);
}
}
void HpackDecoderState::OnLiteralNameAndValue(
HpackEntryType entry_type, HpackDecoderStringBuffer* name_buffer,
HpackDecoderStringBuffer* value_buffer) {
QUICHE_DVLOG(2) << "HpackDecoderState::OnLiteralNameAndValue " << entry_type
<< ", " << name_buffer->str() << ", " << value_buffer->str();
if (error_ != HpackDecodingError::kOk) {
return;
}
if (require_dynamic_table_size_update_) {
ReportError(HpackDecodingError::kMissingDynamicTableSizeUpdate);
return;
}
allow_dynamic_table_size_update_ = false;
std::string name(ExtractString(name_buffer));
std::string value(ExtractString(value_buffer));
listener_->OnHeader(name, value);
if (entry_type == HpackEntryType::kIndexedLiteralHeader) {
decoder_tables_.Insert(std::move(name), std::move(value));
}
}
void HpackDecoderState::OnDynamicTableSizeUpdate(size_t size_limit) {
QUICHE_DVLOG(2) << "HpackDecoderState::OnDynamicTableSizeUpdate "
<< size_limit << ", required="
<< (require_dynamic_table_size_update_ ? "true" : "false")
<< ", allowed="
<< (allow_dynamic_table_size_update_ ? "true" : "false");
if (error_ != HpackDecodingError::kOk) {
return;
}
QUICHE_DCHECK_LE(lowest_header_table_size_, final_header_table_size_);
if (!allow_dynamic_table_size_update_) {
ReportError(HpackDecodingError::kDynamicTableSizeUpdateNotAllowed);
return;
}
if (require_dynamic_table_size_update_) {
if (size_limit > lowest_header_table_size_) {
ReportError(HpackDecodingError::
kInitialDynamicTableSizeUpdateIsAboveLowWaterMark);
return;
}
require_dynamic_table_size_update_ = false;
} else if (size_limit > final_header_table_size_) {
ReportError(
HpackDecodingError::kDynamicTableSizeUpdateIsAboveAcknowledgedSetting);
return;
}
decoder_tables_.DynamicTableSizeUpdate(size_limit);
if (saw_dynamic_table_size_update_) {
allow_dynamic_table_size_update_ = false;
} else {
saw_dynamic_table_size_update_ = true;
}
lowest_header_table_size_ = final_header_table_size_;
}
void HpackDecoderState::OnHpackDecodeError(HpackDecodingError error) {
QUICHE_DVLOG(2) << "HpackDecoderState::OnHpackDecodeError "
<< HpackDecodingErrorToString(error);
if (error_ == HpackDecodingError::kOk) {
ReportError(error);
}
}
void HpackDecoderState::OnHeaderBlockEnd() {
QUICHE_DVLOG(2) << "HpackDecoderState::OnHeaderBlockEnd";
if (error_ != HpackDecodingError::kOk) {
return;
}
if (require_dynamic_table_size_update_) {
ReportError(HpackDecodingError::kMissingDynamicTableSizeUpdate);
} else {
listener_->OnHeaderListEnd();
}
}
void HpackDecoderState::ReportError(HpackDecodingError error) {
QUICHE_DVLOG(2) << "HpackDecoderState::ReportError is new="
<< (error_ == HpackDecodingError::kOk ? "true" : "false")
<< ", error: " << HpackDecodingErrorToString(error);
if (error_ == HpackDecodingError::kOk) {
listener_->OnHeaderErrorDetected(HpackDecodingErrorToString(error));
error_ = error;
}
}
} | #include "quiche/http2/hpack/decoder/hpack_decoder_state.h"
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "quiche/http2/hpack/http2_hpack_constants.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/test_tools/verify_macros.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
using ::testing::AssertionResult;
using ::testing::AssertionSuccess;
using ::testing::Eq;
using ::testing::Mock;
using ::testing::StrictMock;
namespace http2 {
namespace test {
class HpackDecoderStatePeer {
public:
static HpackDecoderTables* GetDecoderTables(HpackDecoderState* state) {
return &state->decoder_tables_;
}
};
namespace {
class MockHpackDecoderListener : public HpackDecoderListener {
public:
MOCK_METHOD(void, OnHeaderListStart, (), (override));
MOCK_METHOD(void, OnHeader, (absl::string_view name, absl::string_view value),
(override));
MOCK_METHOD(void, OnHeaderListEnd, (), (override));
MOCK_METHOD(void, OnHeaderErrorDetected, (absl::string_view error_message),
(override));
};
enum StringBacking { UNBUFFERED, BUFFERED };
class HpackDecoderStateTest : public quiche::test::QuicheTest {
protected:
HpackDecoderStateTest() : decoder_state_(&listener_) {}
HpackDecoderTables* GetDecoderTables() {
return HpackDecoderStatePeer::GetDecoderTables(&decoder_state_);
}
const HpackStringPair* Lookup(size_t index) {
return GetDecoderTables()->Lookup(index);
}
size_t current_header_table_size() {
return GetDecoderTables()->current_header_table_size();
}
size_t header_table_size_limit() {
return GetDecoderTables()->header_table_size_limit();
}
void set_header_table_size_limit(size_t size) {
GetDecoderTables()->DynamicTableSizeUpdate(size);
}
void SetStringBuffer(absl::string_view s, StringBacking backing,
HpackDecoderStringBuffer* string_buffer) {
string_buffer->OnStart(false, s.size());
EXPECT_TRUE(string_buffer->OnData(s.data(), s.size()));
EXPECT_TRUE(string_buffer->OnEnd());
if (backing == BUFFERED) {
string_buffer->BufferStringIfUnbuffered();
}
}
void SetName(absl::string_view s, StringBacking backing) {
SetStringBuffer(s, backing, &name_buffer_);
}
void SetValue(absl::string_view s, StringBacking backing) {
SetStringBuffer(s, backing, &value_buffer_);
}
void SendStartAndVerifyCallback() {
EXPECT_CALL(listener_, OnHeaderListStart());
decoder_state_.OnHeaderBlockStart();
Mock::VerifyAndClearExpectations(&listener_);
}
void SendSizeUpdate(size_t size) {
decoder_state_.OnDynamicTableSizeUpdate(size);
Mock::VerifyAndClearExpectations(&listener_);
}
void SendIndexAndVerifyCallback(size_t index,
HpackEntryType ,
absl::string_view expected_name,
absl::string_view expected_value) {
EXPECT_CALL(listener_, OnHeader(Eq(expected_name), Eq(expected_value)));
decoder_state_.OnIndexedHeader(index);
Mock::VerifyAndClearExpectations(&listener_);
}
void SendValueAndVerifyCallback(size_t name_index, HpackEntryType entry_type,
absl::string_view name,
absl::string_view value,
StringBacking value_backing) {
SetValue(value, value_backing);
EXPECT_CALL(listener_, OnHeader(Eq(name), Eq(value)));
decoder_state_.OnNameIndexAndLiteralValue(entry_type, name_index,
&value_buffer_);
Mock::VerifyAndClearExpectations(&listener_);
}
void SendNameAndValueAndVerifyCallback(HpackEntryType entry_type,
absl::string_view name,
StringBacking name_backing,
absl::string_view value,
StringBacking value_backing) {
SetName(name, name_backing);
SetValue(value, value_backing);
EXPECT_CALL(listener_, OnHeader(Eq(name), Eq(value)));
decoder_state_.OnLiteralNameAndValue(entry_type, &name_buffer_,
&value_buffer_);
Mock::VerifyAndClearExpectations(&listener_);
}
void SendEndAndVerifyCallback() {
EXPECT_CALL(listener_, OnHeaderListEnd());
decoder_state_.OnHeaderBlockEnd();
Mock::VerifyAndClearExpectations(&listener_);
}
AssertionResult VerifyEntry(size_t dynamic_index, absl::string_view name,
absl::string_view value) {
const HpackStringPair* entry =
Lookup(dynamic_index + kFirstDynamicTableIndex - 1);
HTTP2_VERIFY_NE(entry, nullptr);
HTTP2_VERIFY_EQ(entry->name, name);
HTTP2_VERIFY_EQ(entry->value, value);
return AssertionSuccess();
}
AssertionResult VerifyNoEntry(size_t dynamic_index) {
const HpackStringPair* entry =
Lookup(dynamic_index + kFirstDynamicTableIndex - 1);
HTTP2_VERIFY_EQ(entry, nullptr);
return AssertionSuccess();
}
AssertionResult VerifyDynamicTableContents(
const std::vector<std::pair<absl::string_view, absl::string_view>>&
entries) {
size_t index = 1;
for (const auto& entry : entries) {
HTTP2_VERIFY_SUCCESS(VerifyEntry(index, entry.first, entry.second));
++index;
}
HTTP2_VERIFY_SUCCESS(VerifyNoEntry(index));
return AssertionSuccess();
}
StrictMock<MockHpackDecoderListener> listener_;
HpackDecoderState decoder_state_;
HpackDecoderStringBuffer name_buffer_, value_buffer_;
};
TEST_F(HpackDecoderStateTest, C3_RequestExamples) {
SendStartAndVerifyCallback();
SendIndexAndVerifyCallback(2, HpackEntryType::kIndexedHeader, ":method",
"GET");
SendIndexAndVerifyCallback(6, HpackEntryType::kIndexedHeader, ":scheme",
"http");
SendIndexAndVerifyCallback(4, HpackEntryType::kIndexedHeader, ":path", "/");
SendValueAndVerifyCallback(1, HpackEntryType::kIndexedLiteralHeader,
":authority", "www.example.com", UNBUFFERED);
SendEndAndVerifyCallback();
ASSERT_TRUE(VerifyDynamicTableContents({{":authority", "www.example.com"}}));
ASSERT_EQ(57u, current_header_table_size());
SendStartAndVerifyCallback();
SendIndexAndVerifyCallback(2, HpackEntryType::kIndexedHeader, ":method",
"GET");
SendIndexAndVerifyCallback(6, HpackEntryType::kIndexedHeader, ":scheme",
"http");
SendIndexAndVerifyCallback(4, HpackEntryType::kIndexedHeader, ":path", "/");
SendIndexAndVerifyCallback(62, HpackEntryType::kIndexedHeader, ":authority",
"www.example.com");
SendValueAndVerifyCallback(24, HpackEntryType::kIndexedLiteralHeader,
"cache-control", "no-cache", UNBUFFERED);
SendEndAndVerifyCallback();
ASSERT_TRUE(VerifyDynamicTableContents(
{{"cache-control", "no-cache"}, {":authority", "www.example.com"}}));
ASSERT_EQ(110u, current_header_table_size());
SendStartAndVerifyCallback();
SendIndexAndVerifyCallback(2, HpackEntryType::kIndexedHeader, ":method",
"GET");
SendIndexAndVerifyCallback(7, HpackEntryType::kIndexedHeader, ":scheme",
"https");
SendIndexAndVerifyCallback(5, HpackEntryType::kIndexedHeader, ":path",
"/index.html");
SendIndexAndVerifyCallback(63, HpackEntryType::kIndexedHeader, ":authority",
"www.example.com");
SendNameAndValueAndVerifyCallback(HpackEntryType::kIndexedLiteralHeader,
"custom-key", UNBUFFERED, "custom-value",
UNBUFFERED);
SendEndAndVerifyCallback();
ASSERT_TRUE(VerifyDynamicTableContents({{"custom-key", "custom-value"},
{"cache-control", "no-cache"},
{":authority", "www.example.com"}}));
ASSERT_EQ(164u, current_header_table_size());
}
TEST_F(HpackDecoderStateTest, C5_ResponseExamples) {
set_header_table_size_limit(256);
SendStartAndVerifyCallback();
SendValueAndVerifyCallback(8, HpackEntryType::kIndexedLiteralHeader,
":status", "302", BUFFERED);
SendValueAndVerifyCallback(24, HpackEntryType::kIndexedLiteralHeader,
"cache-control", "private", UNBUFFERED);
SendValueAndVerifyCallback(33, HpackEntryType::kIndexedLiteralHeader, "date",
"Mon, 21 Oct 2013 20:13:21 GMT", UNBUFFERED);
SendValueAndVerifyCallback(46, HpackEntryType::kIndexedLiteralHeader,
"location", "https:
SendEndAndVerifyCallback();
ASSERT_TRUE(
VerifyDynamicTableContents({{"location", "https:
{"date", "Mon, 21 Oct 2013 20:13:21 GMT"},
{"cache-control", "private"},
{":status", "302"}}));
ASSERT_EQ(222u, current_header_table_size());
SendStartAndVerifyCallback();
SendValueAndVerifyCallback(8, HpackEntryType::kIndexedLiteralHeader,
":status", "307", BUFFERED);
SendIndexAndVerifyCallback(65, HpackEntryType::kIndexedHeader,
"cache-control", "private");
SendIndexAndVerifyCallback(64, HpackEntryType::kIndexedHeader, "date",
"Mon, 21 Oct 2013 20:13:21 GMT");
SendIndexAndVerifyCallback(63, HpackEntryType::kIndexedHeader, "location",
"https:
SendEndAndVerifyCallback();
ASSERT_TRUE(
VerifyDynamicTableContents({{":status", "307"},
{"location", "https:
{"date", "Mon, 21 Oct 2013 20:13:21 GMT"},
{"cache-control", "private"}}));
ASSERT_EQ(222u, current_header_table_size());
SendStartAndVerifyCallback();
SendIndexAndVerifyCallback(8, HpackEntryType::kIndexedHeader, ":status",
"200");
SendIndexAndVerifyCallback(65, HpackEntryType::kIndexedHeader,
"cache-control", "private");
SendValueAndVerifyCallback(33, HpackEntryType::kIndexedLiteralHeader, "date",
"Mon, 21 Oct 2013 20:13:22 GMT", BUFFERED);
SendIndexAndVerifyCallback(64, HpackEntryType::kIndexedHeader, "location",
"https:
SendValueAndVerifyCallback(26, HpackEntryType::kIndexedLiteralHeader,
"content-encoding", "gzip", UNBUFFERED);
SendValueAndVerifyCallback(
55, HpackEntryType::kIndexedLiteralHeader, "set-cookie",
"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", BUFFERED);
SendEndAndVerifyCallback();
ASSERT_TRUE(VerifyDynamicTableContents(
{{"set-cookie",
"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"},
{"content-encoding", "gzip"},
{"date", "Mon, 21 Oct 2013 20:13:22 GMT"}}));
ASSERT_EQ(215u, current_header_table_size());
}
TEST_F(HpackDecoderStateTest, OptionalTableSizeChanges) {
SendStartAndVerifyCallback();
EXPECT_EQ(Http2SettingsInfo::DefaultHeaderTableSize(),
header_table_size_limit());
SendSizeUpdate(1024);
EXPECT_EQ(1024u, header_table_size_limit());
SendSizeUpdate(0);
EXPECT_EQ(0u, header_table_size_limit());
EXPECT_CALL(listener_, OnHeaderErrorDetected(
Eq("Dynamic table size update not allowed")));
SendSizeUpdate(0);
}
TEST_F(HpackDecoderStateTest, RequiredTableSizeChangeBeforeHeader) {
EXPECT_EQ(4096u, decoder_state_.GetCurrentHeaderTableSizeSetting());
decoder_state_.ApplyHeaderTableSizeSetting(1024);
decoder_state_.ApplyHeaderTableSizeSetting(2048);
EXPECT_EQ(2048u, decoder_state_.GetCurrentHeaderTableSizeSetting());
SendStartAndVerifyCallback();
EXPECT_EQ(Http2SettingsInfo::DefaultHeaderTableSize(),
header_table_size_limit());
SendSizeUpdate(1024);
EXPECT_EQ(1024u, header_table_size_limit());
SendSizeUpdate(1500);
EXPECT_EQ(1500u, header_table_size_limit());
SendEndAndVerifyCallback();
decoder_state_.ApplyHeaderTableSizeSetting(1024);
EXPECT_EQ(1024u, decoder_state_.GetCurrentHeaderTableSizeSetting());
SendStartAndVerifyCallback();
EXPECT_CALL(listener_,
OnHeaderErrorDetected(Eq("Missing dynamic table size update")));
decoder_state_.OnIndexedHeader(1);
decoder_state_.OnIndexedHeader(1);
decoder_state_.OnDynamicTableSizeUpdate(1);
SetValue("value", UNBUFFERED);
decoder_state_.OnNameIndexAndLiteralValue(
HpackEntryType::kIndexedLiteralHeader, 4, &value_buffer_);
SetName("name", UNBUFFERED);
decoder_state_.OnLiteralNameAndValue(HpackEntryType::kIndexedLiteralHeader,
&name_buffer_, &value_buffer_);
decoder_state_.OnHeaderBlockEnd();
decoder_state_.OnHpackDecodeError(HpackDecodingError::kIndexVarintError);
}
TEST_F(HpackDecoderStateTest, InvalidRequiredSizeUpdate) {
decoder_state_.ApplyHeaderTableSizeSetting(1024);
SendStartAndVerifyCallback();
EXPECT_EQ(Http2SettingsInfo::DefaultHeaderTableSize(),
header_table_size_limit());
EXPECT_CALL(
listener_,
OnHeaderErrorDetected(
Eq("Initial dynamic table size update is above low water mark")));
SendSizeUpdate(2048);
}
TEST_F(HpackDecoderStateTest, RequiredTableSizeChangeBeforeEnd) {
decoder_state_.ApplyHeaderTableSizeSetting(1024);
SendStartAndVerifyCallback();
EXPECT_CALL(listener_,
OnHeaderErrorDetected(Eq("Missing dynamic table size update")));
decoder_state_.OnHeaderBlockEnd();
}
TEST_F(HpackDecoderStateTest, InvalidOptionalSizeUpdate) {
SendStartAndVerifyCallback();
EXPECT_EQ(Http2SettingsInfo::DefaultHeaderTableSize(),
header_table_size_limit());
EXPECT_CALL(listener_,
OnHeaderErrorDetected(Eq(
"Dynamic table size update is above acknowledged setting")));
SendSizeUpdate(Http2SettingsInfo::DefaultHeaderTableSize() + 1);
}
TEST_F(HpackDecoderStateTest, InvalidStaticIndex) {
SendStartAndVerifyCallback();
EXPECT_CALL(listener_,
OnHeaderErrorDetected(
Eq("Invalid index in indexed header field representation")));
decoder_state_.OnIndexedHeader(0);
}
TEST_F(HpackDecoderStateTest, InvalidDynamicIndex) {
SendStartAndVerifyCallback();
EXPECT_CALL(listener_,
OnHeaderErrorDetected(
Eq("Invalid index in indexed header field representation")));
decoder_state_.OnIndexedHeader(kFirstDynamicTableIndex);
}
TEST_F(HpackDecoderStateTest, InvalidNameIndex) {
SendStartAndVerifyCallback();
EXPECT_CALL(listener_,
OnHeaderErrorDetected(Eq("Invalid index in literal header field "
"with indexed name representation")));
SetValue("value", UNBUFFERED);
decoder_state_.OnNameIndexAndLiteralValue(
HpackEntryType::kIndexedLiteralHeader, kFirstDynamicTableIndex,
&value_buffer_);
}
TEST_F(HpackDecoderStateTest, ErrorsSuppressCallbacks) {
SendStartAndVerifyCallback();
EXPECT_CALL(listener_,
OnHeaderErrorDetected(Eq("Name Huffman encoding error")));
decoder_state_.OnHpackDecodeError(HpackDecodingError::kNameHuffmanError);
decoder_state_.OnIndexedHeader(1);
decoder_state_.OnDynamicTableSizeUpdate(1);
SetValue("value", UNBUFFERED);
decoder_state_.OnNameIndexAndLiteralValue(
HpackEntryType::kIndexedLiteralHeader, 4, &value_buffer_);
SetName("name", UNBUFFERED);
decoder_state_.OnLiteralNameAndValue(HpackEntryType::kIndexedLiteralHeader,
&name_buffer_, &value_buffer_);
decoder_state_.OnHeaderBlockEnd();
decoder_state_.OnHpackDecodeError(HpackDecodingError::kIndexVarintError);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/decoder/hpack_decoder_state.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/decoder/hpack_decoder_state_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
f69aa326-3804-4aaf-a389-a40eb316c0c6 | cpp | tensorflow/tensorflow | decode_jpeg | tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_test.cc | #include <algorithm>
#include <cstddef>
#include <memory>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_register.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_decoder.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
if (!buffer) {
return nullptr;
}
#define RET_ENSURE(context, condition) \
do { \
if (!(condition)) { \
TF_LITE_KERNEL_LOG((context), "%s:%d %s was not true.", __FILE__, \
__LINE__, #condition); \
return nullptr; \
} \
} while (0)
const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map m = flexbuffers::GetRoot(buffer_t, length).AsMap();
RET_ENSURE(context, m["height"].IsInt());
RET_ENSURE(context, m["width"].IsInt());
RET_ENSURE(context, m["num_images"].IsInt());
RET_ENSURE(context, m["channels"].IsInt());
OpData* op_data = new OpData();
op_data->height = m["height"].AsInt32();
op_data->width = m["width"].AsInt32();
op_data->num_images = m["num_images"].AsInt32();
op_data->channels = m["channels"].AsInt32();
return op_data;
#undef RET_ENSURE
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE(context, op_data);
TF_LITE_ENSURE(context, op_data->height > 0);
TF_LITE_ENSURE(context, op_data->width > 0);
TF_LITE_ENSURE(context, op_data->num_images > 0);
TF_LITE_ENSURE(context, op_data->channels == 3 || op_data->channels == 4);
TF_LITE_ENSURE_EQ(context, node->inputs->size, 1);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
const TfLiteTensor* input_buffer;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, 0, &input_buffer));
TfLiteTensor* output_tensor;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, 0, &output_tensor));
TF_LITE_ENSURE_TYPES_EQ(context, input_buffer->type, kTfLiteString);
TF_LITE_ENSURE_TYPES_EQ(context, output_tensor->type, kTfLiteUInt8);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_buffer), 1);
TF_LITE_ENSURE_EQ(context, input_buffer->dims->data[0], op_data->num_images);
TfLiteIntArray* new_dims = TfLiteIntArrayCreate(4);
new_dims->data[0] = op_data->num_images;
new_dims->data[1] = op_data->height;
new_dims->data[2] = op_data->width;
new_dims->data[3] = op_data->channels;
output_tensor->type = kTfLiteUInt8;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output_tensor, new_dims));
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input_buffer;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, 0, &input_buffer));
TF_LITE_ENSURE(context, input_buffer);
TF_LITE_ENSURE(context, input_buffer->data.raw);
const int channels = op_data->channels;
const int decode_channels = 3;
TfLiteTensor* output_tensor;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, 0, &output_tensor));
unsigned char* output_arr = GetTensorData<unsigned char>(output_tensor);
Status decoder_status;
std::unique_ptr<LibjpegDecoder> decoder =
LibjpegDecoder::Create(decoder_status);
if (decoder_status.code != kTfLiteOk) {
TF_LITE_KERNEL_LOG(context, decoder_status.error_message.c_str());
return kTfLiteError;
}
const int kDecodedImageSize =
op_data->width * op_data->height * decode_channels;
const int kOutputImageSize = op_data->width * op_data->height * channels;
int output_array_offset = 0;
for (int img = 0; img < op_data->num_images; ++img) {
tflite::StringRef inputref =
tflite::GetString(input_buffer, img);
unsigned char* decoded = output_arr + output_array_offset;
Status decode_status = decoder->DecodeImage(
inputref, {op_data->height, op_data->width, decode_channels}, decoded,
kDecodedImageSize);
if (channels == 4) {
size_t height = op_data->height;
size_t src_offset = kDecodedImageSize;
size_t dst_offset = kOutputImageSize;
while (height--) {
size_t width = op_data->width;
while (width--) {
src_offset -= decode_channels;
dst_offset -= channels;
std::copy_n(decoded + src_offset, decode_channels,
decoded + dst_offset);
decoded[dst_offset + 3] = 255;
}
}
}
output_array_offset += kOutputImageSize;
if (decode_status.code != kTfLiteOk) {
TF_LITE_KERNEL_LOG(context, decode_status.error_message.c_str());
return kTfLiteError;
}
}
return kTfLiteOk;
}
TfLiteRegistration* Register_DECODE_JPEG() {
static TfLiteRegistration r = {
decode_jpeg_kernel::Init, decode_jpeg_kernel::Free,
decode_jpeg_kernel::Prepare, decode_jpeg_kernel::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_register.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_chessboard_jpeg.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_test_card_jpeg.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_decoder_test_helper.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
namespace {
using testing::ElementsAre;
const int kHeight = 300, kWidth = 250, kChannels = 3;
const int kDecodedSize = kHeight * kWidth * kChannels;
class DecodeJPEGOpModel : public SingleOpModel {
public:
DecodeJPEGOpModel(const TensorData& input, const TensorData& output,
int num_images, int height, int width, int channels) {
input_id_ = AddInput(input);
output_id_ = AddOutput(output);
flexbuffers::Builder fbb;
fbb.Map([&] {
fbb.Int("num_images", num_images);
fbb.Int("height", height);
fbb.Int("width", width);
fbb.Int("channels", channels);
});
fbb.Finish();
SetCustomOp("DECODE_JPEG", fbb.GetBuffer(),
tflite::acceleration::decode_jpeg_kernel::Register_DECODE_JPEG);
BuildInterpreter({GetShape(input_id_)});
}
int input_buffer_id() { return input_id_; }
int output_id() { return output_id_; }
std::vector<uint8_t> GetOutput() {
return ExtractVector<uint8_t>(output_id_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_id_); }
protected:
int input_id_;
int shapes_id_;
int output_id_;
};
TEST(DecodeJpegTest, TestMultipleJPEGImages) {
std::string chessboard_image(
reinterpret_cast<const char*>(g_tflite_acceleration_chessboard_jpeg),
g_tflite_acceleration_chessboard_jpeg_len);
std::string test_card_image(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
const int kNumImages = 2;
DecodeJPEGOpModel model({TensorType_STRING, {kNumImages}},
{TensorType_UINT8, {}}, kNumImages, kHeight, kWidth,
kChannels);
model.PopulateStringTensor(model.input_buffer_id(),
{chessboard_image, test_card_image});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
ASSERT_THAT(model.GetOutputShape(),
ElementsAre(kNumImages, kHeight, kWidth, kChannels));
std::vector<uint8_t> output_flattened = model.GetOutput();
std::vector<uint8_t> img1(output_flattened.begin(),
output_flattened.begin() + kDecodedSize);
EXPECT_THAT(img1, HasChessboardPatternWithTolerance(12));
std::vector<uint8_t> img2(output_flattened.begin() + kDecodedSize,
output_flattened.end());
EXPECT_THAT(img2, HasRainbowPatternWithTolerance(5));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
188d98db-14ee-4116-be46-fa26ccb5f35a | cpp | tensorflow/tensorflow | summary_image_op | tensorflow/core/kernels/summary_image_op.cc | tensorflow/core/kernels/summary_image_op_test.cc | #include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/png/png_io.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
class SummaryImageOp : public OpKernel {
public:
typedef Eigen::Tensor<uint8, 2, Eigen::RowMajor> Uint8Image;
explicit SummaryImageOp(OpKernelConstruction* context) : OpKernel(context) {
int64_t max_images_tmp;
OP_REQUIRES_OK(context, context->GetAttr("max_images", &max_images_tmp));
OP_REQUIRES(context, max_images_tmp < (1LL << 31),
errors::InvalidArgument("max_images must be < 2^31"));
max_images_ = static_cast<int32>(max_images_tmp);
const TensorProto* proto;
OP_REQUIRES_OK(context, context->GetAttr("bad_color", &proto));
OP_REQUIRES_OK(context, context->device()->MakeTensorFromProto(
*proto, AllocatorAttributes(), &bad_color_));
OP_REQUIRES(context, bad_color_.dtype() == DT_UINT8,
errors::InvalidArgument("bad_color must be uint8, got ",
DataTypeString(bad_color_.dtype())));
OP_REQUIRES(
context, TensorShapeUtils::IsVector(bad_color_.shape()),
errors::InvalidArgument("bad_color must be a vector, got shape ",
bad_color_.shape().DebugString()));
}
void Compute(OpKernelContext* c) override {
const Tensor& tags = c->input(0);
const Tensor& tensor = c->input(1);
OP_REQUIRES(c, TensorShapeUtils::IsScalar(tags.shape()),
errors::InvalidArgument("Tags must be a scalar"));
OP_REQUIRES(c,
tensor.dims() == 4 &&
(tensor.dim_size(3) == 1 || tensor.dim_size(3) == 3 ||
tensor.dim_size(3) == 4),
errors::InvalidArgument(
"Tensor must be 4-D with last dim 1, 3, or 4, not ",
tensor.shape().DebugString()));
const string& base_tag = tags.scalar<tstring>()();
OP_REQUIRES(c,
tensor.dim_size(0) < (1LL << 31) &&
tensor.dim_size(1) < (1LL << 31) &&
tensor.dim_size(2) < (1LL << 31) &&
(tensor.dim_size(1) * tensor.dim_size(2)) < (1LL << 29),
errors::InvalidArgument("Tensor too large for summary ",
tensor.shape().DebugString()));
const int batch_size = static_cast<int>(tensor.dim_size(0));
const int h = static_cast<int>(tensor.dim_size(1));
const int w = static_cast<int>(tensor.dim_size(2));
const int hw = h * w;
const int depth = static_cast<int>(tensor.dim_size(3));
OP_REQUIRES(c, hw > 0 && depth > 0,
errors::InvalidArgument(
"input tensor must have non-zero dims. Found: [",
batch_size, ", ", h, ", ", w, ", ", depth, "]."));
Summary s;
if (tensor.dtype() == DT_UINT8) {
auto ith_image = [&tensor, batch_size, hw, depth](int i) {
auto values = tensor.shaped<uint8, 3>({batch_size, hw, depth});
return typename TTypes<uint8>::ConstMatrix(
&values(i, 0, 0), Eigen::DSizes<Eigen::DenseIndex, 2>(hw, depth));
};
OP_REQUIRES_OK(
c, AddImages(base_tag, batch_size, w, h, depth, ith_image, &s));
} else if (tensor.dtype() == DT_HALF) {
NormalizeAndAddImages<Eigen::half>(c, tensor, h, w, hw, depth, batch_size,
base_tag, &s);
} else if (tensor.dtype() == DT_FLOAT) {
NormalizeAndAddImages<float>(c, tensor, h, w, hw, depth, batch_size,
base_tag, &s);
} else {
NormalizeAndAddImages<double>(c, tensor, h, w, hw, depth, batch_size,
base_tag, &s);
}
Tensor* summary_tensor = nullptr;
OP_REQUIRES_OK(c, c->allocate_output(0, TensorShape({}), &summary_tensor));
CHECK(SerializeToTString(s, &summary_tensor->scalar<tstring>()()));
}
template <class T>
void NormalizeAndAddImages(OpKernelContext* c, const Tensor& tensor, int h,
int w, int hw, int depth, int batch_size,
const string& base_tag, Summary* s) {
OP_REQUIRES(c, bad_color_.dim_size(0) >= depth,
errors::InvalidArgument(
"expected depth <= bad_color.size, got depth = ", depth,
", bad_color.size = ", bad_color_.dim_size(0)));
auto bad_color_full = bad_color_.vec<uint8>();
typename TTypes<uint8>::ConstVec bad_color(bad_color_full.data(), depth);
Uint8Image image(hw, depth);
auto ith_image = [&tensor, &image, bad_color, batch_size, hw,
depth](int i) {
auto tensor_eigen = tensor.template shaped<T, 3>({batch_size, hw, depth});
typename TTypes<T>::ConstMatrix values(
&tensor_eigen(i, 0, 0),
Eigen::DSizes<Eigen::DenseIndex, 2>(hw, depth));
NormalizeFloatImage<T>(hw, depth, values, bad_color, &image);
return image;
};
OP_REQUIRES_OK(c,
AddImages(base_tag, batch_size, w, h, depth, ith_image, s));
}
Status AddImages(const string& tag, int batch_size, int w, int h, int depth,
const std::function<Uint8Image(int)>& ith_image,
Summary* s) {
const int N = std::min<int>(max_images_, batch_size);
for (int i = 0; i < N; ++i) {
Summary::Value* v = s->add_value();
if (max_images_ > 1) {
v->set_tag(strings::StrCat(tag, "/image/", i));
} else {
v->set_tag(strings::StrCat(tag, "/image"));
}
auto image = ith_image(i);
Summary::Image* si = v->mutable_image();
si->set_height(h);
si->set_width(w);
si->set_colorspace(depth);
const int channel_bits = 8;
const int compression = -1;
if (!png::WriteImageToBuffer(
image.data(), w, h, w * depth, depth, channel_bits, compression,
si->mutable_encoded_image_string(), nullptr)) {
return errors::Internal("PNG encoding failed");
}
}
return absl::OkStatus();
}
template <class T>
static void NormalizeFloatImage(int hw, int depth,
typename TTypes<T>::ConstMatrix values,
typename TTypes<uint8>::ConstVec bad_color,
Uint8Image* image) {
if (!image->size()) return;
float image_min = std::numeric_limits<float>::infinity();
float image_max = -image_min;
for (int i = 0; i < hw; i++) {
bool finite = true;
for (int j = 0; j < depth; j++) {
if (!Eigen::numext::isfinite(values(i, j))) {
finite = false;
break;
}
}
if (finite) {
for (int j = 0; j < depth; j++) {
float value(values(i, j));
image_min = std::min(image_min, value);
image_max = std::max(image_max, value);
}
}
}
const float kZeroThreshold = 1e-6;
T scale, offset;
if (image_min < 0) {
float max_val = std::max(std::abs(image_min), std::abs(image_max));
scale = T(max_val < kZeroThreshold ? 0.0f : 127.0f / max_val);
offset = T(128.0f);
} else {
scale = T(image_max < kZeroThreshold ? 0.0f : 255.0f / image_max);
offset = T(0.0f);
}
for (int i = 0; i < hw; i++) {
bool finite = true;
for (int j = 0; j < depth; j++) {
if (!Eigen::numext::isfinite(values(i, j))) {
finite = false;
break;
}
}
if (finite) {
image->chip<0>(i) = (values.template chip<0>(i) * scale + offset)
.template cast<uint8>();
} else {
image->chip<0>(i) = bad_color;
}
}
}
private:
int32 max_images_;
Tensor bad_color_;
};
REGISTER_KERNEL_BUILDER(Name("ImageSummary").Device(DEVICE_CPU),
SummaryImageOp);
} | #include <functional>
#include <memory>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/histogram/histogram.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static void EXPECT_SummaryMatches(const Summary& actual,
const string& expected_str) {
Summary expected;
CHECK(protobuf::TextFormat::ParseFromString(expected_str, &expected));
EXPECT_EQ(expected.DebugString(), actual.DebugString());
}
class SummaryImageOpTest : public OpsTestBase {
protected:
void MakeOp(int max_images) {
TF_ASSERT_OK(NodeDefBuilder("myop", "ImageSummary")
.Input(FakeInput())
.Input(FakeInput())
.Attr("max_images", max_images)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
void CheckAndRemoveEncodedImages(Summary* summary) {
for (int i = 0; i < summary->value_size(); ++i) {
Summary::Value* value = summary->mutable_value(i);
ASSERT_TRUE(value->has_image()) << "No image for value: " << value->tag();
ASSERT_FALSE(value->image().encoded_image_string().empty())
<< "No encoded_image_string for value: " << value->tag();
if (VLOG_IS_ON(2)) {
TF_CHECK_OK(WriteStringToFile(
Env::Default(), strings::StrCat("/tmp/", value->tag(), ".png"),
value->image().encoded_image_string()));
}
value->mutable_image()->clear_encoded_image_string();
}
}
};
TEST_F(SummaryImageOpTest, ThreeGrayImagesOutOfFive4dInput) {
MakeOp(3 );
AddInputFromArray<tstring>(TensorShape({}), {"tag"});
AddInputFromArray<float>(TensorShape({5, 2, 1, 1}),
{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0});
TF_ASSERT_OK(RunOpKernel());
Tensor* out_tensor = GetOutput(0);
ASSERT_EQ(0, out_tensor->dims());
Summary summary;
ParseProtoUnlimited(&summary, out_tensor->scalar<tstring>()());
CheckAndRemoveEncodedImages(&summary);
EXPECT_SummaryMatches(summary, R"(
value { tag: 'tag/image/0' image { width: 1 height: 2 colorspace: 1} }
value { tag: 'tag/image/1' image { width: 1 height: 2 colorspace: 1} }
value { tag: 'tag/image/2' image { width: 1 height: 2 colorspace: 1} }
)");
}
TEST_F(SummaryImageOpTest, OneGrayImage4dInput) {
MakeOp(1 );
AddInputFromArray<tstring>(TensorShape({}), {"tag"});
AddInputFromArray<float>(TensorShape({5 , 2, 1, 1 }),
{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0});
TF_ASSERT_OK(RunOpKernel());
Tensor* out_tensor = GetOutput(0);
ASSERT_EQ(0, out_tensor->dims());
Summary summary;
ParseProtoUnlimited(&summary, out_tensor->scalar<tstring>()());
CheckAndRemoveEncodedImages(&summary);
EXPECT_SummaryMatches(summary, R"(
value { tag: 'tag/image' image { width: 1 height: 2 colorspace: 1} })");
}
TEST_F(SummaryImageOpTest, OneColorImage4dInput) {
MakeOp(1 );
AddInputFromArray<tstring>(TensorShape({}), {"tag"});
AddInputFromArray<float>(
TensorShape({1 , 5 , 2 , 3 }),
{
1.0f, 0.1f, 0.2f,
1.0f, 0.3f, 0.4f,
0.0f, 1.0f, 0.0f,
0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 1.0f,
0.0f, 0.0f, 1.0f,
1.0f, 1.0f, 0.0f,
1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 0.0f,
1.0f, 0.0f, 1.0f,
});
TF_ASSERT_OK(RunOpKernel());
Tensor* out_tensor = GetOutput(0);
ASSERT_EQ(0, out_tensor->dims());
Summary summary;
ParseProtoUnlimited(&summary, out_tensor->scalar<tstring>()());
CheckAndRemoveEncodedImages(&summary);
EXPECT_SummaryMatches(summary, R"(
value { tag: 'tag/image' image { width: 2 height: 5 colorspace: 3} })");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/summary_image_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/summary_image_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1d31f4c3-4a06-4301-8ccf-985a4a4f853b | cpp | google/quiche | tcp_cubic_sender_bytes | quiche/quic/core/congestion_control/tcp_cubic_sender_bytes.cc | quiche/quic/core/congestion_control/tcp_cubic_sender_bytes_test.cc | #include "quiche/quic/core/congestion_control/tcp_cubic_sender_bytes.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include "quiche/quic/core/congestion_control/prr_sender.h"
#include "quiche/quic/core/congestion_control/rtt_stats.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
namespace {
const QuicByteCount kMaxBurstBytes = 3 * kDefaultTCPMSS;
const float kRenoBeta = 0.7f;
const QuicByteCount kDefaultMinimumCongestionWindow = 2 * kDefaultTCPMSS;
}
TcpCubicSenderBytes::TcpCubicSenderBytes(
const QuicClock* clock, const RttStats* rtt_stats, bool reno,
QuicPacketCount initial_tcp_congestion_window,
QuicPacketCount max_congestion_window, QuicConnectionStats* stats)
: rtt_stats_(rtt_stats),
stats_(stats),
reno_(reno),
num_connections_(kDefaultNumConnections),
min4_mode_(false),
last_cutback_exited_slowstart_(false),
slow_start_large_reduction_(false),
no_prr_(false),
cubic_(clock),
num_acked_packets_(0),
congestion_window_(initial_tcp_congestion_window * kDefaultTCPMSS),
min_congestion_window_(kDefaultMinimumCongestionWindow),
max_congestion_window_(max_congestion_window * kDefaultTCPMSS),
slowstart_threshold_(max_congestion_window * kDefaultTCPMSS),
initial_tcp_congestion_window_(initial_tcp_congestion_window *
kDefaultTCPMSS),
initial_max_tcp_congestion_window_(max_congestion_window *
kDefaultTCPMSS),
min_slow_start_exit_window_(min_congestion_window_) {}
TcpCubicSenderBytes::~TcpCubicSenderBytes() {}
void TcpCubicSenderBytes::SetFromConfig(const QuicConfig& config,
Perspective perspective) {
if (perspective == Perspective::IS_SERVER &&
config.HasReceivedConnectionOptions()) {
if (ContainsQuicTag(config.ReceivedConnectionOptions(), kMIN4)) {
min4_mode_ = true;
SetMinCongestionWindowInPackets(1);
}
if (ContainsQuicTag(config.ReceivedConnectionOptions(), kSSLR)) {
slow_start_large_reduction_ = true;
}
if (ContainsQuicTag(config.ReceivedConnectionOptions(), kNPRR)) {
no_prr_ = true;
}
}
}
void TcpCubicSenderBytes::AdjustNetworkParameters(const NetworkParams& params) {
if (params.bandwidth.IsZero() || params.rtt.IsZero()) {
return;
}
SetCongestionWindowFromBandwidthAndRtt(params.bandwidth, params.rtt);
}
float TcpCubicSenderBytes::RenoBeta() const {
return (num_connections_ - 1 + kRenoBeta) / num_connections_;
}
void TcpCubicSenderBytes::OnCongestionEvent(
bool rtt_updated, QuicByteCount prior_in_flight, QuicTime event_time,
const AckedPacketVector& acked_packets,
const LostPacketVector& lost_packets, QuicPacketCount ,
QuicPacketCount ) {
if (rtt_updated && InSlowStart() &&
hybrid_slow_start_.ShouldExitSlowStart(
rtt_stats_->latest_rtt(), rtt_stats_->min_rtt(),
GetCongestionWindow() / kDefaultTCPMSS)) {
ExitSlowstart();
}
for (const LostPacket& lost_packet : lost_packets) {
OnPacketLost(lost_packet.packet_number, lost_packet.bytes_lost,
prior_in_flight);
}
for (const AckedPacket& acked_packet : acked_packets) {
OnPacketAcked(acked_packet.packet_number, acked_packet.bytes_acked,
prior_in_flight, event_time);
}
}
void TcpCubicSenderBytes::OnPacketAcked(QuicPacketNumber acked_packet_number,
QuicByteCount acked_bytes,
QuicByteCount prior_in_flight,
QuicTime event_time) {
largest_acked_packet_number_.UpdateMax(acked_packet_number);
if (InRecovery()) {
if (!no_prr_) {
prr_.OnPacketAcked(acked_bytes);
}
return;
}
MaybeIncreaseCwnd(acked_packet_number, acked_bytes, prior_in_flight,
event_time);
if (InSlowStart()) {
hybrid_slow_start_.OnPacketAcked(acked_packet_number);
}
}
void TcpCubicSenderBytes::OnPacketSent(
QuicTime , QuicByteCount ,
QuicPacketNumber packet_number, QuicByteCount bytes,
HasRetransmittableData is_retransmittable) {
if (InSlowStart()) {
++(stats_->slowstart_packets_sent);
}
if (is_retransmittable != HAS_RETRANSMITTABLE_DATA) {
return;
}
if (InRecovery()) {
prr_.OnPacketSent(bytes);
}
QUICHE_DCHECK(!largest_sent_packet_number_.IsInitialized() ||
largest_sent_packet_number_ < packet_number);
largest_sent_packet_number_ = packet_number;
hybrid_slow_start_.OnPacketSent(packet_number);
}
bool TcpCubicSenderBytes::CanSend(QuicByteCount bytes_in_flight) {
if (!no_prr_ && InRecovery()) {
return prr_.CanSend(GetCongestionWindow(), bytes_in_flight,
GetSlowStartThreshold());
}
if (GetCongestionWindow() > bytes_in_flight) {
return true;
}
if (min4_mode_ && bytes_in_flight < 4 * kDefaultTCPMSS) {
return true;
}
return false;
}
QuicBandwidth TcpCubicSenderBytes::PacingRate(
QuicByteCount ) const {
QuicTime::Delta srtt = rtt_stats_->SmoothedOrInitialRtt();
const QuicBandwidth bandwidth =
QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt);
return bandwidth * (InSlowStart() ? 2 : (no_prr_ && InRecovery() ? 1 : 1.25));
}
QuicBandwidth TcpCubicSenderBytes::BandwidthEstimate() const {
QuicTime::Delta srtt = rtt_stats_->smoothed_rtt();
if (srtt.IsZero()) {
return QuicBandwidth::Zero();
}
return QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt);
}
bool TcpCubicSenderBytes::InSlowStart() const {
return GetCongestionWindow() < GetSlowStartThreshold();
}
bool TcpCubicSenderBytes::IsCwndLimited(QuicByteCount bytes_in_flight) const {
const QuicByteCount congestion_window = GetCongestionWindow();
if (bytes_in_flight >= congestion_window) {
return true;
}
const QuicByteCount available_bytes = congestion_window - bytes_in_flight;
const bool slow_start_limited =
InSlowStart() && bytes_in_flight > congestion_window / 2;
return slow_start_limited || available_bytes <= kMaxBurstBytes;
}
bool TcpCubicSenderBytes::InRecovery() const {
return largest_acked_packet_number_.IsInitialized() &&
largest_sent_at_last_cutback_.IsInitialized() &&
largest_acked_packet_number_ <= largest_sent_at_last_cutback_;
}
void TcpCubicSenderBytes::OnRetransmissionTimeout(bool packets_retransmitted) {
largest_sent_at_last_cutback_.Clear();
if (!packets_retransmitted) {
return;
}
hybrid_slow_start_.Restart();
HandleRetransmissionTimeout();
}
std::string TcpCubicSenderBytes::GetDebugState() const { return ""; }
void TcpCubicSenderBytes::OnApplicationLimited(
QuicByteCount ) {}
void TcpCubicSenderBytes::SetCongestionWindowFromBandwidthAndRtt(
QuicBandwidth bandwidth, QuicTime::Delta rtt) {
QuicByteCount new_congestion_window = bandwidth.ToBytesPerPeriod(rtt);
congestion_window_ =
std::max(min_congestion_window_,
std::min(new_congestion_window,
kMaxResumptionCongestionWindow * kDefaultTCPMSS));
}
void TcpCubicSenderBytes::SetInitialCongestionWindowInPackets(
QuicPacketCount congestion_window) {
congestion_window_ = congestion_window * kDefaultTCPMSS;
}
void TcpCubicSenderBytes::SetMinCongestionWindowInPackets(
QuicPacketCount congestion_window) {
min_congestion_window_ = congestion_window * kDefaultTCPMSS;
}
void TcpCubicSenderBytes::SetNumEmulatedConnections(int num_connections) {
num_connections_ = std::max(1, num_connections);
cubic_.SetNumConnections(num_connections_);
}
void TcpCubicSenderBytes::ExitSlowstart() {
slowstart_threshold_ = congestion_window_;
}
void TcpCubicSenderBytes::OnPacketLost(QuicPacketNumber packet_number,
QuicByteCount lost_bytes,
QuicByteCount prior_in_flight) {
if (largest_sent_at_last_cutback_.IsInitialized() &&
packet_number <= largest_sent_at_last_cutback_) {
if (last_cutback_exited_slowstart_) {
++stats_->slowstart_packets_lost;
stats_->slowstart_bytes_lost += lost_bytes;
if (slow_start_large_reduction_) {
congestion_window_ = std::max(congestion_window_ - lost_bytes,
min_slow_start_exit_window_);
slowstart_threshold_ = congestion_window_;
}
}
QUIC_DVLOG(1) << "Ignoring loss for largest_missing:" << packet_number
<< " because it was sent prior to the last CWND cutback.";
return;
}
++stats_->tcp_loss_events;
last_cutback_exited_slowstart_ = InSlowStart();
if (InSlowStart()) {
++stats_->slowstart_packets_lost;
}
if (!no_prr_) {
prr_.OnPacketLost(prior_in_flight);
}
if (slow_start_large_reduction_ && InSlowStart()) {
QUICHE_DCHECK_LT(kDefaultTCPMSS, congestion_window_);
if (congestion_window_ >= 2 * initial_tcp_congestion_window_) {
min_slow_start_exit_window_ = congestion_window_ / 2;
}
congestion_window_ = congestion_window_ - kDefaultTCPMSS;
} else if (reno_) {
congestion_window_ = congestion_window_ * RenoBeta();
} else {
congestion_window_ =
cubic_.CongestionWindowAfterPacketLoss(congestion_window_);
}
if (congestion_window_ < min_congestion_window_) {
congestion_window_ = min_congestion_window_;
}
slowstart_threshold_ = congestion_window_;
largest_sent_at_last_cutback_ = largest_sent_packet_number_;
num_acked_packets_ = 0;
QUIC_DVLOG(1) << "Incoming loss; congestion window: " << congestion_window_
<< " slowstart threshold: " << slowstart_threshold_;
}
QuicByteCount TcpCubicSenderBytes::GetCongestionWindow() const {
return congestion_window_;
}
QuicByteCount TcpCubicSenderBytes::GetSlowStartThreshold() const {
return slowstart_threshold_;
}
void TcpCubicSenderBytes::MaybeIncreaseCwnd(
QuicPacketNumber , QuicByteCount acked_bytes,
QuicByteCount prior_in_flight, QuicTime event_time) {
QUIC_BUG_IF(quic_bug_10439_1, InRecovery())
<< "Never increase the CWND during recovery.";
if (!IsCwndLimited(prior_in_flight)) {
cubic_.OnApplicationLimited();
return;
}
if (congestion_window_ >= max_congestion_window_) {
return;
}
if (InSlowStart()) {
congestion_window_ += kDefaultTCPMSS;
QUIC_DVLOG(1) << "Slow start; congestion window: " << congestion_window_
<< " slowstart threshold: " << slowstart_threshold_;
return;
}
if (reno_) {
++num_acked_packets_;
if (num_acked_packets_ * num_connections_ >=
congestion_window_ / kDefaultTCPMSS) {
congestion_window_ += kDefaultTCPMSS;
num_acked_packets_ = 0;
}
QUIC_DVLOG(1) << "Reno; congestion window: " << congestion_window_
<< " slowstart threshold: " << slowstart_threshold_
<< " congestion window count: " << num_acked_packets_;
} else {
congestion_window_ = std::min(
max_congestion_window_,
cubic_.CongestionWindowAfterAck(acked_bytes, congestion_window_,
rtt_stats_->min_rtt(), event_time));
QUIC_DVLOG(1) << "Cubic; congestion window: " << congestion_window_
<< " slowstart threshold: " << slowstart_threshold_;
}
}
void TcpCubicSenderBytes::HandleRetransmissionTimeout() {
cubic_.ResetCubicState();
slowstart_threshold_ = congestion_window_ / 2;
congestion_window_ = min_congestion_window_;
}
void TcpCubicSenderBytes::OnConnectionMigration() {
hybrid_slow_start_.Restart();
prr_ = PrrSender();
largest_sent_packet_number_.Clear();
largest_acked_packet_number_.Clear();
largest_sent_at_last_cutback_.Clear();
last_cutback_exited_slowstart_ = false;
cubic_.ResetCubicState();
num_acked_packets_ = 0;
congestion_window_ = initial_tcp_congestion_window_;
max_congestion_window_ = initial_max_tcp_congestion_window_;
slowstart_threshold_ = initial_max_tcp_congestion_window_;
}
CongestionControlType TcpCubicSenderBytes::GetCongestionControlType() const {
return reno_ ? kRenoBytes : kCubicBytes;
}
} | #include "quiche/quic/core/congestion_control/tcp_cubic_sender_bytes.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <utility>
#include "quiche/quic/core/congestion_control/rtt_stats.h"
#include "quiche/quic/core/congestion_control/send_algorithm_interface.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_clock.h"
#include "quiche/quic/test_tools/quic_config_peer.h"
namespace quic {
namespace test {
const uint32_t kInitialCongestionWindowPackets = 10;
const uint32_t kMaxCongestionWindowPackets = 200;
const uint32_t kDefaultWindowTCP =
kInitialCongestionWindowPackets * kDefaultTCPMSS;
const float kRenoBeta = 0.7f;
class TcpCubicSenderBytesPeer : public TcpCubicSenderBytes {
public:
TcpCubicSenderBytesPeer(const QuicClock* clock, bool reno)
: TcpCubicSenderBytes(clock, &rtt_stats_, reno,
kInitialCongestionWindowPackets,
kMaxCongestionWindowPackets, &stats_) {}
const HybridSlowStart& hybrid_slow_start() const {
return hybrid_slow_start_;
}
float GetRenoBeta() const { return RenoBeta(); }
RttStats rtt_stats_;
QuicConnectionStats stats_;
};
class TcpCubicSenderBytesTest : public QuicTest {
protected:
TcpCubicSenderBytesTest()
: one_ms_(QuicTime::Delta::FromMilliseconds(1)),
sender_(new TcpCubicSenderBytesPeer(&clock_, true)),
packet_number_(1),
acked_packet_number_(0),
bytes_in_flight_(0) {}
int SendAvailableSendWindow() {
return SendAvailableSendWindow(kDefaultTCPMSS);
}
int SendAvailableSendWindow(QuicPacketLength ) {
int packets_sent = 0;
bool can_send = sender_->CanSend(bytes_in_flight_);
while (can_send) {
sender_->OnPacketSent(clock_.Now(), bytes_in_flight_,
QuicPacketNumber(packet_number_++), kDefaultTCPMSS,
HAS_RETRANSMITTABLE_DATA);
++packets_sent;
bytes_in_flight_ += kDefaultTCPMSS;
can_send = sender_->CanSend(bytes_in_flight_);
}
return packets_sent;
}
void AckNPackets(int n) {
sender_->rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(60),
QuicTime::Delta::Zero(), clock_.Now());
AckedPacketVector acked_packets;
LostPacketVector lost_packets;
for (int i = 0; i < n; ++i) {
++acked_packet_number_;
acked_packets.push_back(
AckedPacket(QuicPacketNumber(acked_packet_number_), kDefaultTCPMSS,
QuicTime::Zero()));
}
sender_->OnCongestionEvent(true, bytes_in_flight_, clock_.Now(),
acked_packets, lost_packets, 0, 0);
bytes_in_flight_ -= n * kDefaultTCPMSS;
clock_.AdvanceTime(one_ms_);
}
void LoseNPackets(int n) { LoseNPackets(n, kDefaultTCPMSS); }
void LoseNPackets(int n, QuicPacketLength packet_length) {
AckedPacketVector acked_packets;
LostPacketVector lost_packets;
for (int i = 0; i < n; ++i) {
++acked_packet_number_;
lost_packets.push_back(
LostPacket(QuicPacketNumber(acked_packet_number_), packet_length));
}
sender_->OnCongestionEvent(false, bytes_in_flight_, clock_.Now(),
acked_packets, lost_packets, 0, 0);
bytes_in_flight_ -= n * packet_length;
}
void LosePacket(uint64_t packet_number) {
AckedPacketVector acked_packets;
LostPacketVector lost_packets;
lost_packets.push_back(
LostPacket(QuicPacketNumber(packet_number), kDefaultTCPMSS));
sender_->OnCongestionEvent(false, bytes_in_flight_, clock_.Now(),
acked_packets, lost_packets, 0, 0);
bytes_in_flight_ -= kDefaultTCPMSS;
}
const QuicTime::Delta one_ms_;
MockClock clock_;
std::unique_ptr<TcpCubicSenderBytesPeer> sender_;
uint64_t packet_number_;
uint64_t acked_packet_number_;
QuicByteCount bytes_in_flight_;
};
TEST_F(TcpCubicSenderBytesTest, SimpleSender) {
EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
EXPECT_TRUE(sender_->CanSend(0));
EXPECT_TRUE(sender_->CanSend(0));
EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
SendAvailableSendWindow();
EXPECT_FALSE(sender_->CanSend(sender_->GetCongestionWindow()));
}
TEST_F(TcpCubicSenderBytesTest, ApplicationLimitedSlowStart) {
const int kNumberOfAcks = 5;
EXPECT_TRUE(sender_->CanSend(0));
EXPECT_TRUE(sender_->CanSend(0));
SendAvailableSendWindow();
for (int i = 0; i < kNumberOfAcks; ++i) {
AckNPackets(2);
}
QuicByteCount bytes_to_send = sender_->GetCongestionWindow();
EXPECT_EQ(kDefaultWindowTCP + kDefaultTCPMSS * 2 * 2, bytes_to_send);
}
TEST_F(TcpCubicSenderBytesTest, ExponentialSlowStart) {
const int kNumberOfAcks = 20;
EXPECT_TRUE(sender_->CanSend(0));
EXPECT_EQ(QuicBandwidth::Zero(), sender_->BandwidthEstimate());
EXPECT_TRUE(sender_->CanSend(0));
for (int i = 0; i < kNumberOfAcks; ++i) {
SendAvailableSendWindow();
AckNPackets(2);
}
const QuicByteCount cwnd = sender_->GetCongestionWindow();
EXPECT_EQ(kDefaultWindowTCP + kDefaultTCPMSS * 2 * kNumberOfAcks, cwnd);
EXPECT_EQ(QuicBandwidth::FromBytesAndTimeDelta(
cwnd, sender_->rtt_stats_.smoothed_rtt()),
sender_->BandwidthEstimate());
}
TEST_F(TcpCubicSenderBytesTest, SlowStartPacketLoss) {
sender_->SetNumEmulatedConnections(1);
const int kNumberOfAcks = 10;
for (int i = 0; i < kNumberOfAcks; ++i) {
SendAvailableSendWindow();
AckNPackets(2);
}
SendAvailableSendWindow();
QuicByteCount expected_send_window =
kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
LoseNPackets(1);
size_t packets_in_recovery_window = expected_send_window / kDefaultTCPMSS;
expected_send_window *= kRenoBeta;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
size_t number_of_packets_in_window = expected_send_window / kDefaultTCPMSS;
QUIC_DLOG(INFO) << "number_packets: " << number_of_packets_in_window;
AckNPackets(packets_in_recovery_window);
SendAvailableSendWindow();
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
AckNPackets(number_of_packets_in_window - 2);
SendAvailableSendWindow();
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
AckNPackets(1);
expected_send_window += kDefaultTCPMSS;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
EXPECT_TRUE(sender_->hybrid_slow_start().started());
sender_->OnRetransmissionTimeout(true);
EXPECT_FALSE(sender_->hybrid_slow_start().started());
}
TEST_F(TcpCubicSenderBytesTest, SlowStartPacketLossWithLargeReduction) {
QuicConfig config;
QuicTagVector options;
options.push_back(kSSLR);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
sender_->SetFromConfig(config, Perspective::IS_SERVER);
sender_->SetNumEmulatedConnections(1);
const int kNumberOfAcks = (kDefaultWindowTCP / (2 * kDefaultTCPMSS)) - 1;
for (int i = 0; i < kNumberOfAcks; ++i) {
SendAvailableSendWindow();
AckNPackets(2);
}
SendAvailableSendWindow();
QuicByteCount expected_send_window =
kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
LoseNPackets(1);
expected_send_window -= kDefaultTCPMSS;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
LoseNPackets(5);
expected_send_window -= 5 * kDefaultTCPMSS;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
LoseNPackets(10);
expected_send_window -= 10 * kDefaultTCPMSS;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
size_t packets_in_recovery_window = expected_send_window / kDefaultTCPMSS;
size_t number_of_packets_in_window = expected_send_window / kDefaultTCPMSS;
QUIC_DLOG(INFO) << "number_packets: " << number_of_packets_in_window;
AckNPackets(packets_in_recovery_window);
SendAvailableSendWindow();
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
AckNPackets(number_of_packets_in_window - 1);
SendAvailableSendWindow();
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
AckNPackets(1);
expected_send_window += kDefaultTCPMSS;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
EXPECT_TRUE(sender_->hybrid_slow_start().started());
sender_->OnRetransmissionTimeout(true);
EXPECT_FALSE(sender_->hybrid_slow_start().started());
}
TEST_F(TcpCubicSenderBytesTest, SlowStartHalfPacketLossWithLargeReduction) {
QuicConfig config;
QuicTagVector options;
options.push_back(kSSLR);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
sender_->SetFromConfig(config, Perspective::IS_SERVER);
sender_->SetNumEmulatedConnections(1);
const int kNumberOfAcks = 10;
for (int i = 0; i < kNumberOfAcks; ++i) {
SendAvailableSendWindow(kDefaultTCPMSS / 2);
AckNPackets(2);
}
SendAvailableSendWindow(kDefaultTCPMSS / 2);
QuicByteCount expected_send_window =
kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
LoseNPackets(1);
expected_send_window -= kDefaultTCPMSS;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
LoseNPackets(10, kDefaultTCPMSS / 2);
expected_send_window -= 5 * kDefaultTCPMSS;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
}
TEST_F(TcpCubicSenderBytesTest, SlowStartPacketLossWithMaxHalfReduction) {
QuicConfig config;
QuicTagVector options;
options.push_back(kSSLR);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
sender_->SetFromConfig(config, Perspective::IS_SERVER);
sender_->SetNumEmulatedConnections(1);
const int kNumberOfAcks = kInitialCongestionWindowPackets / 2;
for (int i = 0; i < kNumberOfAcks; ++i) {
SendAvailableSendWindow();
AckNPackets(2);
}
SendAvailableSendWindow();
QuicByteCount expected_send_window =
kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
LoseNPackets(1);
expected_send_window -= kDefaultTCPMSS;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
LoseNPackets(kNumberOfAcks * 2);
expected_send_window -= (kNumberOfAcks * 2 - 1) * kDefaultTCPMSS;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
LoseNPackets(5);
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
}
TEST_F(TcpCubicSenderBytesTest, NoPRRWhenLessThanOnePacketInFlight) {
SendAvailableSendWindow();
LoseNPackets(kInitialCongestionWindowPackets - 1);
AckNPackets(1);
EXPECT_EQ(2, SendAvailableSendWindow());
EXPECT_TRUE(sender_->CanSend(0));
}
TEST_F(TcpCubicSenderBytesTest, SlowStartPacketLossPRR) {
sender_->SetNumEmulatedConnections(1);
const int kNumberOfAcks = 5;
for (int i = 0; i < kNumberOfAcks; ++i) {
SendAvailableSendWindow();
AckNPackets(2);
}
SendAvailableSendWindow();
QuicByteCount expected_send_window =
kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
LoseNPackets(1);
size_t send_window_before_loss = expected_send_window;
expected_send_window *= kRenoBeta;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
size_t remaining_packets_in_recovery =
send_window_before_loss / kDefaultTCPMSS - 2;
for (size_t i = 0; i < remaining_packets_in_recovery; ++i) {
AckNPackets(1);
SendAvailableSendWindow();
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
}
size_t number_of_packets_in_window = expected_send_window / kDefaultTCPMSS;
for (size_t i = 0; i < number_of_packets_in_window; ++i) {
AckNPackets(1);
EXPECT_EQ(1, SendAvailableSendWindow());
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
}
AckNPackets(1);
expected_send_window += kDefaultTCPMSS;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
}
TEST_F(TcpCubicSenderBytesTest, SlowStartBurstPacketLossPRR) {
sender_->SetNumEmulatedConnections(1);
const int kNumberOfAcks = 10;
for (int i = 0; i < kNumberOfAcks; ++i) {
SendAvailableSendWindow();
AckNPackets(2);
}
SendAvailableSendWindow();
QuicByteCount expected_send_window =
kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
size_t send_window_after_loss = kRenoBeta * expected_send_window;
size_t num_packets_to_lose =
(expected_send_window - send_window_after_loss) / kDefaultTCPMSS + 1;
LoseNPackets(num_packets_to_lose);
EXPECT_TRUE(sender_->CanSend(bytes_in_flight_));
AckNPackets(1);
expected_send_window *= kRenoBeta;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
EXPECT_EQ(2, SendAvailableSendWindow());
LoseNPackets(1);
AckNPackets(1);
EXPECT_EQ(2, SendAvailableSendWindow());
LoseNPackets(1);
AckNPackets(1);
EXPECT_EQ(2, SendAvailableSendWindow());
for (int i = 0; i < kNumberOfAcks; ++i) {
AckNPackets(1);
EXPECT_EQ(1, SendAvailableSendWindow());
}
}
TEST_F(TcpCubicSenderBytesTest, RTOCongestionWindow) {
EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
sender_->OnRetransmissionTimeout(true);
EXPECT_EQ(2 * kDefaultTCPMSS, sender_->GetCongestionWindow());
EXPECT_EQ(5u * kDefaultTCPMSS, sender_->GetSlowStartThreshold());
}
TEST_F(TcpCubicSenderBytesTest, RTOCongestionWindowNoRetransmission) {
EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
sender_->OnRetransmissionTimeout(false);
EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
}
TEST_F(TcpCubicSenderBytesTest, TcpCubicResetEpochOnQuiescence) {
const int kMaxCongestionWindow = 50;
const QuicByteCount kMaxCongestionWindowBytes =
kMaxCongestionWindow * kDefaultTCPMSS;
int num_sent = SendAvailableSendWindow();
QuicByteCount saved_cwnd = sender_->GetCongestionWindow();
LoseNPackets(1);
EXPECT_GT(saved_cwnd, sender_->GetCongestionWindow());
for (int i = 1; i < num_sent; ++i) {
AckNPackets(1);
}
EXPECT_EQ(0u, bytes_in_flight_);
saved_cwnd = sender_->GetCongestionWindow();
num_sent = SendAvailableSendWindow();
for (int i = 0; i < num_sent; ++i) {
AckNPackets(1);
}
EXPECT_LT(saved_cwnd, sender_->GetCongestionWindow());
EXPECT_GT(kMaxCongestionWindowBytes, sender_->GetCongestionWindow());
EXPECT_EQ(0u, bytes_in_flight_);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(100000));
saved_cwnd = sender_->GetCongestionWindow();
SendAvailableSendWindow();
AckNPackets(1);
EXPECT_NEAR(saved_cwnd, sender_->GetCongestionWindow(), kDefaultTCPMSS);
EXPECT_GT(kMaxCongestionWindowBytes, sender_->GetCongestionWindow());
}
TEST_F(TcpCubicSenderBytesTest, MultipleLossesInOneWindow) {
SendAvailableSendWindow();
const QuicByteCount initial_window = sender_->GetCongestionWindow();
LosePacket(acked_packet_number_ + 1);
const QuicByteCount post_loss_window = sender_->GetCongestionWindow();
EXPECT_GT(initial_window, post_loss_window);
LosePacket(acked_packet_number_ + 3);
EXPECT_EQ(post_loss_window, sender_->GetCongestionWindow());
LosePacket(packet_number_ - 1);
EXPECT_EQ(post_loss_window, sender_->GetCongestionWindow());
LosePacket(packet_number_);
EXPECT_GT(post_loss_window, sender_->GetCongestionWindow());
}
TEST_F(TcpCubicSenderBytesTest, ConfigureMaxInitialWindow) {
QuicConfig config;
QuicTagVector options;
options.push_back(kIW10);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
sender_->SetFromConfig(config, Perspective::IS_SERVER);
EXPECT_EQ(10u * kDefaultTCPMSS, sender_->GetCongestionWindow());
}
TEST_F(TcpCubicSenderBytesTest, SetInitialCongestionWindow) {
EXPECT_NE(3u * kDefaultTCPMSS, sender_->GetCongestionWindow());
sender_->SetInitialCongestionWindowInPackets(3);
EXPECT_EQ(3u * kDefaultTCPMSS, sender_->GetCongestionWindow());
}
TEST_F(TcpCubicSenderBytesTest, 2ConnectionCongestionAvoidanceAtEndOfRecovery) {
sender_->SetNumEmulatedConnections(2);
const int kNumberOfAcks = 5;
for (int i = 0; i < kNumberOfAcks; ++i) {
SendAvailableSendWindow();
AckNPackets(2);
}
SendAvailableSendWindow();
QuicByteCount expected_send_window =
kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
LoseNPackets(1);
expected_send_window = expected_send_window * sender_->GetRenoBeta();
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
for (int i = 0; i < 10; ++i) {
SendAvailableSendWindow();
EXPECT_TRUE(sender_->InRecovery());
AckNPackets(2);
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
}
EXPECT_FALSE(sender_->InRecovery());
size_t packets_in_send_window = expected_send_window / kDefaultTCPMSS;
SendAvailableSendWindow();
AckNPackets(packets_in_send_window / 2 - 2);
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
SendAvailableSendWindow();
AckNPackets(2);
expected_send_window += kDefaultTCPMSS;
packets_in_send_window += 1;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
SendAvailableSendWindow();
AckNPackets(packets_in_send_window / 2 - 1);
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
SendAvailableSendWindow();
AckNPackets(2);
expected_send_window += kDefaultTCPMSS;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
}
TEST_F(TcpCubicSenderBytesTest, 1ConnectionCongestionAvoidanceAtEndOfRecovery) {
sender_->SetNumEmulatedConnections(1);
const int kNumberOfAcks = 5;
for (int i = 0; i < kNumberOfAcks; ++i) {
SendAvailableSendWindow();
AckNPackets(2);
}
SendAvailableSendWindow();
QuicByteCount expected_send_window =
kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
LoseNPackets(1);
expected_send_window *= kRenoBeta;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
for (int i = 0; i < 10; ++i) {
SendAvailableSendWindow();
EXPECT_TRUE(sender_->InRecovery());
AckNPackets(2);
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
}
EXPECT_FALSE(sender_->InRecovery());
for (uint64_t i = 0; i < expected_send_window / kDefaultTCPMSS - 2; i += 2) {
SendAvailableSendWindow();
AckNPackets(2);
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
}
SendAvailableSendWindow();
AckNPackets(2);
expected_send_window += kDefaultTCPMSS;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
}
TEST_F(TcpCubicSenderBytesTest, BandwidthResumption) {
const QuicPacketCount kNumberOfPackets = 123;
const QuicBandwidth kBandwidthEstimate =
QuicBandwidth::FromBytesPerSecond(kNumberOfPackets * kDefaultTCPMSS);
const QuicTime::Delta kRttEstimate = QuicTime::Delta::FromSeconds(1);
SendAlgorithmInterface::NetworkParams network_param;
network_param.bandwidth = kBandwidthEstimate;
network_param.rtt = kRttEstimate;
sender_->AdjustNetworkParameters(network_param);
EXPECT_EQ(kNumberOfPackets * kDefaultTCPMSS, sender_->GetCongestionWindow());
SendAlgorithmInterface::NetworkParams network_param_no_bandwidth;
network_param_no_bandwidth.bandwidth = QuicBandwidth::Zero();
network_param_no_bandwidth.rtt = kRttEstimate;
sender_->AdjustNetworkParameters(network_param_no_bandwidth);
EXPECT_EQ(kNumberOfPackets * kDefaultTCPMSS, sender_->GetCongestionWindow());
const QuicBandwidth kUnreasonableBandwidth =
QuicBandwidth::FromBytesPerSecond((kMaxResumptionCongestionWindow + 1) *
kDefaultTCPMSS);
SendAlgorithmInterface::NetworkParams network_param_large_bandwidth;
network_param_large_bandwidth.bandwidth = kUnreasonableBandwidth;
network_param_large_bandwidth.rtt = QuicTime::Delta::FromSeconds(1);
sender_->AdjustNetworkParameters(network_param_large_bandwidth);
EXPECT_EQ(kMaxResumptionCongestionWindow * kDefaultTCPMSS,
sender_->GetCongestionWindow());
}
TEST_F(TcpCubicSenderBytesTest, PaceBelowCWND) {
QuicConfig config;
QuicTagVector options;
options.push_back(kMIN4);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
sender_->SetFromConfig(config, Perspective::IS_SERVER);
sender_->OnRetransmissionTimeout(true);
EXPECT_EQ(kDefaultTCPMSS, sender_->GetCongestionWindow());
EXPECT_TRUE(sender_->CanSend(kDefaultTCPMSS));
EXPECT_TRUE(sender_->CanSend(2 * kDefaultTCPMSS));
EXPECT_TRUE(sender_->CanSend(3 * kDefaultTCPMSS));
EXPECT_FALSE(sender_->CanSend(4 * kDefaultTCPMSS));
}
TEST_F(TcpCubicSenderBytesTest, NoPRR) {
QuicTime::Delta rtt = QuicTime::Delta::FromMilliseconds(100);
sender_->rtt_stats_.UpdateRtt(rtt, QuicTime::Delta::Zero(), QuicTime::Zero());
sender_->SetNumEmulatedConnections(1);
QuicTagVector options;
options.push_back(kNPRR);
QuicConfig config;
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
sender_->SetFromConfig(config, Perspective::IS_SERVER);
SendAvailableSendWindow();
LoseNPackets(9);
AckNPackets(1);
EXPECT_EQ(kRenoBeta * kDefaultWindowTCP, sender_->GetCongestionWindow());
const QuicPacketCount window_in_packets =
kRenoBeta * kDefaultWindowTCP / kDefaultTCPMSS;
const QuicBandwidth expected_pacing_rate =
QuicBandwidth::FromBytesAndTimeDelta(kRenoBeta * kDefaultWindowTCP,
sender_->rtt_stats_.smoothed_rtt());
EXPECT_EQ(expected_pacing_rate, sender_->PacingRate(0));
EXPECT_EQ(window_in_packets,
static_cast<uint64_t>(SendAvailableSendWindow()));
EXPECT_EQ(expected_pacing_rate,
sender_->PacingRate(kRenoBeta * kDefaultWindowTCP));
}
TEST_F(TcpCubicSenderBytesTest, ResetAfterConnectionMigration) {
sender_->SetNumEmulatedConnections(1);
const int kNumberOfAcks = 10;
for (int i = 0; i < kNumberOfAcks; ++i) {
SendAvailableSendWindow();
AckNPackets(2);
}
SendAvailableSendWindow();
QuicByteCount expected_send_window =
kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
LoseNPackets(1);
expected_send_window *= kRenoBeta;
EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
EXPECT_EQ(expected_send_window, sender_->GetSlowStartThreshold());
sender_->OnConnectionMigration();
EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
EXPECT_EQ(kMaxCongestionWindowPackets * kDefaultTCPMSS,
sender_->GetSlowStartThreshold());
EXPECT_FALSE(sender_->hybrid_slow_start().started());
}
TEST_F(TcpCubicSenderBytesTest, DefaultMaxCwnd) {
RttStats rtt_stats;
QuicConnectionStats stats;
std::unique_ptr<SendAlgorithmInterface> sender(SendAlgorithmInterface::Create(
&clock_, &rtt_stats, nullptr, kCubicBytes,
QuicRandom::GetInstance(), &stats, kInitialCongestionWindow, nullptr));
AckedPacketVector acked_packets;
LostPacketVector missing_packets;
QuicPacketCount max_congestion_window =
GetQuicFlag(quic_max_congestion_window);
for (uint64_t i = 1; i < max_congestion_window; ++i) {
acked_packets.clear();
acked_packets.push_back(
AckedPacket(QuicPacketNumber(i), 1350, QuicTime::Zero()));
sender->OnCongestionEvent(true, sender->GetCongestionWindow(), clock_.Now(),
acked_packets, missing_packets, 0, 0);
}
EXPECT_EQ(max_congestion_window,
sender->GetCongestionWindow() / kDefaultTCPMSS);
}
TEST_F(TcpCubicSenderBytesTest, LimitCwndIncreaseInCongestionAvoidance) {
sender_ = std::make_unique<TcpCubicSenderBytesPeer>(&clock_, false);
int num_sent = SendAvailableSendWindow();
QuicByteCount saved_cwnd = sender_->GetCongestionWindow();
LoseNPackets(1);
EXPECT_GT(saved_cwnd, sender_->GetCongestionWindow());
for (int i = 1; i < num_sent; ++i) {
AckNPackets(1);
}
EXPECT_EQ(0u, bytes_in_flight_);
saved_cwnd = sender_->GetCongestionWindow();
num_sent = SendAvailableSendWindow();
while (sender_->GetCongestionWindow() == saved_cwnd) {
AckNPackets(1);
SendAvailableSendWindow();
}
EXPECT_GE(bytes_in_flight_, sender_->GetCongestionWindow());
saved_cwnd = sender_->GetCongestionWindow();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(2000));
AckNPackets(2);
EXPECT_EQ(saved_cwnd + kDefaultTCPMSS, sender_->GetCongestionWindow());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/congestion_control/tcp_cubic_sender_bytes.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/congestion_control/tcp_cubic_sender_bytes_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.