ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
adb8539d-0c2b-41c4-b736-da697e10d787 | cpp | tensorflow/tensorflow | gpu_compiler | third_party/xla/xla/service/gpu/gpu_compiler.cc | third_party/xla/xla/service/gpu/gpu_compiler_test.cc | #include "xla/service/gpu/gpu_compiler.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <functional>
#include <memory>
#include <new>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/Bitcode/BitcodeWriter.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/SplitModule.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_module_group.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/pass/hlo_pass_fix.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/maybe_owning.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/all_gather_broadcast_reorder.h"
#include "xla/service/all_gather_combiner.h"
#include "xla/service/all_reduce_combiner.h"
#include "xla/service/all_reduce_contiguous.h"
#include "xla/service/all_reduce_folder.h"
#include "xla/service/all_reduce_promotion.h"
#include "xla/service/all_reduce_reassociate.h"
#include "xla/service/async_collective_creator.h"
#include "xla/service/batched_gather_scatter_normalizer.h"
#include "xla/service/batchnorm_expander.h"
#include "xla/service/bitcast_dtypes_expander.h"
#include "xla/service/broadcast_canonicalizer.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/call_inliner.h"
#include "xla/service/collective_permute_decomposer.h"
#include "xla/service/collective_pipeliner.h"
#include "xla/service/collective_quantizer.h"
#include "xla/service/collectives_schedule_linearizer.h"
#include "xla/service/comparison_expander.h"
#include "xla/service/compiler.h"
#include "xla/service/conditional_canonicalizer.h"
#include "xla/service/conditional_simplifier.h"
#include "xla/service/convert_memory_placement_to_internal_annotations.h"
#include "xla/service/convert_mover.h"
#include "xla/service/convolution_4d_expander.h"
#include "xla/service/convolution_pred_expander.h"
#include "xla/service/copy_insertion.h"
#include "xla/service/cpu_gpu_shape_verifier.h"
#include "xla/service/dot_decomposer.h"
#include "xla/service/dot_merger.h"
#include "xla/service/dump.h"
#include "xla/service/dynamic_dimension_inference.h"
#include "xla/service/dynamic_dimension_simplifier.h"
#include "xla/service/dynamic_index_splitter.h"
#include "xla/service/dynamic_padder.h"
#include "xla/service/eigh_expander.h"
#include "xla/service/executable.h"
#include "xla/service/export_hlo.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/service/float_normalization.h"
#include "xla/service/float_support.h"
#include "xla/service/gather_expander.h"
#include "xla/service/gather_simplifier.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/autotuning/custom_kernel_fusion_autotuner.h"
#include "xla/service/gpu/compile_module_to_llvm_ir.h"
#include "xla/service/gpu/conv_layout_normalization.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/execution_stream_assignment.h"
#include "xla/service/gpu/fusion_pipeline.h"
#include "xla/service/gpu/fusions/triton/triton_support.h"
#include "xla/service/gpu/gpu_executable.h"
#include "xla/service/gpu/gpu_float_support.h"
#include "xla/service/gpu/gpu_hlo_schedule.h"
#include "xla/service/gpu/gpu_latency_hiding_scheduler.h"
#include "xla/service/gpu/gpu_p2p_pipeliner.h"
#include "xla/service/gpu/gpu_spmd_pipeline.h"
#include "xla/service/gpu/hlo_fusion_stats.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/ir_emitter_unnested.h"
#include "xla/service/gpu/kernel_reuse_cache.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/metrics.h"
#include "xla/service/gpu/model/gpu_cost_model_stats_collection.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/prepare_hlo_for_ir_emitting_pipeline.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/service/gpu/runtime_intrinsics.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/gpu/transforms/algebraic_simplifier.h"
#include "xla/service/gpu/transforms/algorithm_checker.h"
#include "xla/service/gpu/transforms/all_gather_optimizer.h"
#include "xla/service/gpu/transforms/all_reduce_blueconnect.h"
#include "xla/service/gpu/transforms/all_reduce_splitter.h"
#include "xla/service/gpu/transforms/async_collective_annotator.h"
#include "xla/service/gpu/transforms/async_wrapper.h"
#include "xla/service/gpu/transforms/collective_permute_cycle_decomposer.h"
#include "xla/service/gpu/transforms/collective_permute_valid_iteration_annotator.h"
#include "xla/service/gpu/transforms/command_buffer_scheduling.h"
#include "xla/service/gpu/transforms/conv_rewriter.h"
#include "xla/service/gpu/transforms/convert_async_collectives_to_sync.h"
#include "xla/service/gpu/transforms/cudnn_custom_call_converter.h"
#include "xla/service/gpu/transforms/custom_kernel_fusion_rewriter.h"
#include "xla/service/gpu/transforms/dot_dimension_sorter.h"
#include "xla/service/gpu/transforms/dot_operand_converter.h"
#include "xla/service/gpu/transforms/double_buffer_loop_unrolling.h"
#include "xla/service/gpu/transforms/dynamic_slice_fusion_rewriter.h"
#include "xla/service/gpu/transforms/fusion_block_level_rewriter.h"
#include "xla/service/gpu/transforms/fusion_wrapper.h"
#include "xla/service/gpu/transforms/gemm_broadcast_folding_rewriter.h"
#include "xla/service/gpu/transforms/gemm_fusion.h"
#include "xla/service/gpu/transforms/gemm_rewriter.h"
#include "xla/service/gpu/transforms/gemv_rewriter.h"
#include "xla/service/gpu/transforms/layout_assignment.h"
#include "xla/service/gpu/transforms/move_copy_to_users.h"
#include "xla/service/gpu/transforms/pipelined_p2p_rewriter.h"
#include "xla/service/gpu/transforms/reduce_scatter_creator.h"
#include "xla/service/gpu/transforms/reduction_degenerate_dim_remover.h"
#include "xla/service/gpu/transforms/reduction_dimension_grouper.h"
#include "xla/service/gpu/transforms/reduction_layout_normalizer.h"
#include "xla/service/gpu/transforms/reduction_splitter.h"
#include "xla/service/gpu/transforms/rename_fusions.h"
#include "xla/service/gpu/transforms/sanitize_constant_names.h"
#include "xla/service/gpu/transforms/scatter_expander.h"
#include "xla/service/gpu/transforms/scatter_slice_simplifier.h"
#include "xla/service/gpu/transforms/softmax_rewriter_triton.h"
#include "xla/service/gpu/transforms/stream_attribute_annotator.h"
#include "xla/service/gpu/transforms/stream_attribute_async_wrapper.h"
#include "xla/service/gpu/transforms/topk_specializer.h"
#include "xla/service/gpu/transforms/topk_splitter.h"
#include "xla/service/gpu/transforms/transpose_dimension_grouper.h"
#include "xla/service/gpu/transforms/tree_reduction_rewriter.h"
#include "xla/service/gpu/transforms/triton_fusion_numerics_verifier.h"
#include "xla/service/gpu/transforms/windowed_einsum_handler.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_computation_deduplicator.h"
#include "xla/service/hlo_constant_folding.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_rematerialization.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/host_memory_transfer_asyncifier.h"
#include "xla/service/host_offload_legalize.h"
#include "xla/service/host_offloader.h"
#include "xla/service/layout_assignment.h"
#include "xla/service/layout_normalization.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/logistic_expander.h"
#include "xla/service/operand_upcaster.h"
#include "xla/service/optimization_barrier_expander.h"
#include "xla/service/optimize_input_output_buffer_alias.h"
#include "xla/service/qr_expander.h"
#include "xla/service/real_imag_expander.h"
#include "xla/service/reduce_decomposer.h"
#include "xla/service/reduce_scatter_combiner.h"
#include "xla/service/reduce_scatter_reassociate.h"
#include "xla/service/reduce_window_rewriter.h"
#include "xla/service/reshape_decomposer.h"
#include "xla/service/reshape_mover.h"
#include "xla/service/result_caster.h"
#include "xla/service/rng_bit_generator_expander.h"
#include "xla/service/rng_expander.h"
#include "xla/service/scatter_expander.h"
#include "xla/service/scatter_simplifier.h"
#include "xla/service/sharding_remover.h"
#include "xla/service/simplify_fp_conversions.h"
#include "xla/service/slice_sinker.h"
#include "xla/service/slow_operation_alarm.h"
#include "xla/service/sort_simplifier.h"
#include "xla/service/stable_sort_expander.h"
#include "xla/service/stochastic_convert_decomposer.h"
#include "xla/service/sub_byte_normalization.h"
#include "xla/service/topk_rewriter.h"
#include "xla/service/transpose_folding.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_all_reduce_code_motion.h"
#include "xla/service/while_loop_constant_sinking.h"
#include "xla/service/while_loop_simplifier.h"
#include "xla/service/while_loop_trip_count_annotator.h"
#include "xla/service/zero_sized_hlo_elimination.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_description.pb.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/scoped_annotation.h"
#include "tsl/profiler/lib/traceme.h"
#ifdef PLATFORM_GOOGLE
#include "xla/hlo/experimental/auto_sharding/auto_sharding.h"
#endif
namespace xla {
namespace gpu {
namespace {
using MaybeOwningThreadPool = MaybeOwning<tsl::thread::ThreadPool>;
MaybeOwningThreadPool CreateMaybeOwningThreadPool(
int parallelism, tsl::thread::ThreadPool* default_thread_pool,
int default_parallelism) {
CHECK_GE(parallelism, 0);
CHECK_GE(default_parallelism, 1);
CHECK(default_thread_pool == nullptr ||
default_thread_pool->CurrentThreadId() == -1);
auto create_thread_pool = [&](int num_threads) {
CHECK_GE(num_threads, 1);
return std::make_unique<tsl::thread::ThreadPool>(tsl::Env::Default(), "",
num_threads);
};
switch (parallelism) {
case 0:
if (default_thread_pool == nullptr && default_parallelism > 1) {
return MaybeOwningThreadPool(create_thread_pool(default_parallelism));
}
return MaybeOwningThreadPool(default_thread_pool);
case 1:
return MaybeOwningThreadPool(nullptr);
default:
return MaybeOwningThreadPool(create_thread_pool(parallelism));
}
}
absl::StatusOr<AutotuneConfig> GetAutotuneConfig(
se::StreamExecutor* stream_exec, const DebugOptions& debug_options,
const GpuCompiler::CompileOptions& options,
const Compiler::TargetConfig& gpu_target_config) {
if (stream_exec) {
return AutotuneConfig{DeviceConfig{stream_exec, options.device_allocator},
debug_options};
}
return AutotuneConfig{DevicelessConfig{gpu_target_config.device_description},
debug_options};
}
se::GpuComputeCapability GetGpuVersion(const se::StreamExecutor* stream_exec) {
return stream_exec->GetDeviceDescription().gpu_compute_capability();
}
class GpuThunkAotCompilationResult : public AotCompilationResult {
public:
static absl::StatusOr<std::unique_ptr<GpuThunkAotCompilationResult>>
FromModule(const HloModule* hlo_module,
const BufferAssignment* buffer_assignment,
std::string_view asm_text, absl::Span<const uint8_t> binary,
const BinaryMap& dnn_compiled_graphs) {
CompilationResultProto proto;
*proto.mutable_hlo_module_with_config() = hlo_module->ToProtoWithConfig();
*proto.mutable_buffer_assignment() = buffer_assignment->ToProto();
proto.set_asm_text(std::string(asm_text));
proto.set_binary(binary.data(), binary.size());
proto.mutable_dnn_compiled_graphs()->insert(dnn_compiled_graphs.cbegin(),
dnn_compiled_graphs.cend());
return std::unique_ptr<GpuThunkAotCompilationResult>(
new GpuThunkAotCompilationResult(hlo_module->Clone(),
std::move(proto)));
}
static absl::StatusOr<std::unique_ptr<GpuThunkAotCompilationResult>>
FromString(const std::string& serialized) {
CompilationResultProto proto;
if (!proto.ParseFromString(serialized)) {
return Internal(
"Failed to parse serialized GpuThunkAotCompilationResult.");
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> module,
HloModule::CreateFromProtoWithConfig(proto.hlo_module_with_config()));
return std::unique_ptr<GpuThunkAotCompilationResult>(
new GpuThunkAotCompilationResult(std::move(module), std::move(proto)));
}
absl::StatusOr<std::string> SerializeAsString() const override {
return proto_.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<Executable>> LoadExecutable(
Compiler* compiler, const se::StreamExecutor* stream_exec) const override;
const HloModule* optimized_module() const override { return module_.get(); }
std::unique_ptr<HloModule> consume_optimized_module() override {
return std::move(module_);
}
private:
GpuThunkAotCompilationResult(std::unique_ptr<HloModule> module,
CompilationResultProto proto)
: module_(std::move(module)), proto_(std::move(proto)) {}
std::unique_ptr<HloModule> module_;
CompilationResultProto proto_;
};
}
absl::StatusOr<std::unique_ptr<Executable>>
GpuThunkAotCompilationResult::LoadExecutable(
Compiler* compiler, const se::StreamExecutor* stream_exec) const {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> hlo_module,
HloModule::CreateFromProtoWithConfig(proto_.hlo_module_with_config()));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<BufferAssignment> buffer_assignment,
BufferAssignment::FromProto(proto_.buffer_assignment(), hlo_module.get(),
compiler->BufferSizeBytesFunction(),
nullptr));
ExecutionStreamAssignment execution_stream_assignment(hlo_module.get());
std::vector<uint8_t> binary(proto_.binary().begin(), proto_.binary().end());
TF_ASSIGN_OR_RETURN(
se::Platform * platform,
se::PlatformManager::PlatformWithId(compiler->PlatformId()));
std::string platform_name = platform->Name();
const se::DeviceDescription& gpu_device_info =
stream_exec->GetDeviceDescription();
mlir::DialectRegistry registry;
auto mlir_context = std::make_unique<mlir::MLIRContext>(registry);
llvm::LLVMContext llvm_context;
auto* gpu_compiler = dynamic_cast<GpuCompiler*>(compiler);
if (gpu_compiler == nullptr) {
return Internal("Compiler is not a GpuCompiler.");
}
auto llvm_module = std::make_unique<llvm::Module>("", llvm_context);
llvm_module->setTargetTriple(gpu_compiler->target_triple());
llvm_module->setDataLayout(gpu_compiler->data_layout());
IrEmitterContext ir_emitter_context(
hlo_module.get(), buffer_assignment.get(), &execution_stream_assignment,
platform_name, gpu_device_info, mlir_context.get(), llvm_module.get(),
nullptr,
false);
absl::string_view cache_file_path =
hlo_module->config().debug_options().xla_gpu_kernel_cache_file();
if (!cache_file_path.empty() &&
hlo_module->config()
.debug_options()
.xla_gpu_enable_llvm_module_compilation_parallelism()) {
TF_RETURN_IF_ERROR(LoadCache(ir_emitter_context, cache_file_path));
}
auto ir_emitter = IrEmitterUnnested::Create(&ir_emitter_context);
TF_RETURN_IF_ERROR(
ir_emitter->EmitHloComputation(hlo_module->entry_computation()));
std::vector<GpuExecutable::ConstantInfo> constants =
std::move(ir_emitter_context.constants());
TF_ASSIGN_OR_RETURN(auto output_info,
GetOutputInfo(*hlo_module, *buffer_assignment));
const Shape& output_shape = hlo_module->result_shape();
int64_t debug_buffer_assignment_show_max =
hlo_module->config()
.debug_options()
.xla_debug_buffer_assignment_show_max();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<GpuExecutable> executable,
GpuExecutable::Create(GpuExecutable::Params{
proto_.asm_text(),
binary,
BinaryMap(proto_.dnn_compiled_graphs().cbegin(),
proto_.dnn_compiled_graphs().cend()),
gpu_device_info.gpu_compute_capability(),
ir_emitter->ConsumeThunkSequence(),
std::move(constants),
std::move(output_info),
std::move(hlo_module->name()),
std::move(output_shape),
std::nullopt,
std::move(buffer_assignment),
debug_buffer_assignment_show_max,
std::move(hlo_module),
true}));
return executable;
}
GpuCompiler::GpuCompiler(se::Platform::Id platform_id,
const char* target_triple, const char* data_layout)
: platform_id_(platform_id),
target_triple_(target_triple),
data_layout_(data_layout),
pointer_size_(llvm::DataLayout(data_layout)
.getPointerSize(0 )) {}
namespace {
void AddHloVerifier(HloPassPipeline* pipeline,
bool verify_unique_channel_ids = false,
HloVerifierOpts&& opts = {}, bool debug_only = false) {
opts.verify_unique_channel_ids = verify_unique_channel_ids;
std::unique_ptr<TargetVerifierMetadata> verifier_metadata =
std::make_unique<CpuGpuVerifierMetadata>(std::move(opts));
if (debug_only) {
pipeline->AddInvariantCheckerDebug<HloVerifier>(
std::move(verifier_metadata), "hlo verifier (debug)");
} else {
pipeline->AddInvariantChecker<HloVerifier>(std::move(verifier_metadata),
"hlo verifier");
}
}
void CheckNotScheduled(HloModule* hlo_module) {
if (hlo_module->has_schedule() &&
!hlo_module->config().debug_options().xla_disable_all_hlo_passes()) {
LOG(WARNING) << "\nThe current HLO module " << hlo_module->name()
<< " is scheduled and optimized. \n"
<< "It is not expected to run optimization passes again.\n"
"Use a test method like RunAndCompareNoHloPasses() or "
<< "the xla_disable_all_hlo_passes flag.";
}
}
void LogDebugOptions(HloModule* hlo_module) {
XLA_VLOG_LINES(
1, absl::StrFormat("GpuCompilationEnvironment of hlo_module %s:\n%s",
hlo_module->name(),
hlo_module->config().debug_options().DebugString()));
}
AlgebraicSimplifierOptions LayoutInsensitiveAlgebraicSimplifierOptions(
const HloModuleConfig& hlo_module_config,
const Compiler::TargetConfig& gpu_target_config,
AlgebraicSimplifierOptions opts_from_compiler) {
AlgebraicSimplifierOptions layout_insensitive_algsimp_opts =
opts_from_compiler;
layout_insensitive_algsimp_opts.set_conv_is_lowerable_callback(
ConvRewriter::ConvIsLowerable);
layout_insensitive_algsimp_opts.set_enable_dot_strength_reduction(
hlo_module_config.debug_options()
.xla_gpu_enable_dot_strength_reduction());
layout_insensitive_algsimp_opts.set_supports_non_canonical_dots(false);
layout_insensitive_algsimp_opts.set_minmax_propagate_nan(
!hlo_module_config.debug_options().xla_gpu_enable_fast_min_max());
layout_insensitive_algsimp_opts
.set_unconditionally_simplify_reduce_of_transpose_or_reshape(true);
if (gpu_target_config.platform_name == "ROCM") {
layout_insensitive_algsimp_opts.set_enable_conv_operand_swap(false);
}
layout_insensitive_algsimp_opts
.set_enable_unconditional_reduce_of_concat_replacement(false);
return layout_insensitive_algsimp_opts;
}
absl::Status RunPreSPMDPartitionerPasses(HloModule* hlo_module) {
HloPassPipeline pre_spmd_pipeline("pre-spmd-partitioner");
pre_spmd_pipeline.AddPass<BatchedGatherScatterNormalizer>();
pre_spmd_pipeline.AddPass<CuDnnCustomCallConverter>();
pre_spmd_pipeline.AddPass<ConvertMemoryPlacementToInternalAnnotations>();
pre_spmd_pipeline.AddPass<CallInliner>();
pre_spmd_pipeline.AddPass<ZeroSizedHloElimination>();
pre_spmd_pipeline.AddPass<ConditionalCanonicalizer>();
pre_spmd_pipeline.AddPass<TopkDecomposer>([&](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kTopK;
});
pre_spmd_pipeline.AddPass<TopkRewriter>(
[](const HloSortInstruction*, int64_t) { return true; });
return pre_spmd_pipeline.Run(hlo_module).status();
}
absl::Status RunSPMDPasses(
HloModule* hlo_module, const Compiler::TargetConfig& gpu_target_config,
const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts) {
bool auto_sharding = hlo_module->config().use_auto_spmd_partitioning();
#ifndef PLATFORM_GOOGLE
if (auto_sharding) {
LOG(ERROR) << "GPU autosharding is not yet available in open source.";
}
#endif
const int64_t num_partitions = hlo_module->config().num_partitions();
if (num_partitions > 1) {
if (!hlo_module->config().use_spmd_partitioning()) {
return InvalidArgument(
"num_partitions=%d but SPMD partitioning not enabled.",
num_partitions);
}
HloPassPipeline spmd_pipeline("spmd-partitioner");
AddSPMDPasses(
hlo_module, layout_insensitive_algsimp_opts,
gpu_target_config.device_description.gpu_compute_capability(),
spmd_pipeline,
#ifdef PLATFORM_GOOGLE
[&](HloPassPipeline& pipeline) {
if (auto_sharding) {
AutoShardingOption option;
option.enable = true;
if (!hlo_module->config()
.auto_spmd_partitioning_mesh_shape()
.empty()) {
option.device_mesh_shape =
hlo_module->config().auto_spmd_partitioning_mesh_shape();
} else {
option.device_mesh_shape = {
gpu_target_config.device_description.core_count(), 1};
}
if (!hlo_module->config()
.auto_spmd_partitioning_mesh_ids()
.empty()) {
option.device_mesh_ids =
hlo_module->config().auto_spmd_partitioning_mesh_ids();
}
option.memory_budget_per_device =
hlo_module->config()
.debug_options()
.xla_gpu_auto_spmd_partitioning_memory_budget_gb() *
1024 * 1024 * 1024;
option.memory_budget_ratio =
hlo_module->config()
.debug_options()
.xla_gpu_auto_spmd_partitioning_memory_budget_ratio();
spmd_pipeline.AddPass<AutoSharding>(option);
}
});
#else
std::nullopt);
#endif
if (hlo_module->config()
.debug_options()
.xla_gpu_unsafe_pipelined_loop_annotator()) {
spmd_pipeline.AddPass<WhileLoopTripCountAnnotator>();
spmd_pipeline.AddPass<CollectivePermuteValidIterationAnnotator>();
}
return spmd_pipeline.Run(hlo_module).status();
} else {
HloPassPipeline sharding_removal_pipeline("sharding-removal");
sharding_removal_pipeline.AddPass<ShardingRemover>();
sharding_removal_pipeline.AddPass<HloDCE>();
return sharding_removal_pipeline.Run(hlo_module).status();
}
}
absl::Status RunOptimizationPasses(
HloModule* hlo_module, const Compiler::TargetConfig& gpu_target_config,
const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts) {
const DebugOptions& debug_options = hlo_module->config().debug_options();
HloPassPipeline pipeline("optimization");
AddHloVerifier(&pipeline,
!debug_options.xla_experimental_ignore_channel_id());
if (debug_options.xla_gpu_multi_streamed_windowed_einsum()) {
pipeline.AddPass<WindowedEinsumHandler>();
}
pipeline.AddPass<TopKSplitter>();
pipeline.AddPass<TopkSpecializer>();
pipeline.AddPass<TopkDecomposer>();
HloPredicate upcaster_filter = [&](const HloInstruction* instr) {
const auto* cuda_cc = std::get_if<se::CudaComputeCapability>(
&gpu_target_config.device_description.gpu_compute_capability());
if (cuda_cc != nullptr &&
!cuda_cc->IsAtLeast(se::CudaComputeCapability::VOLTA)) {
return true;
}
return !gpu::IsMatrixMultiplication(*instr);
};
pipeline.AddPass<DotDimensionSorter>();
pipeline.AddPass<DotDecomposer>();
pipeline.AddPass<ResultCaster>(upcaster_filter);
pipeline.AddPass<OperandUpcaster>(upcaster_filter);
pipeline.AddPass<DotOperandConverter>();
pipeline.AddPass<SubByteNormalization>(
SubByteNormalization::SET_ELEMENT_SIZE);
pipeline.AddPass<RngExpander>();
pipeline.AddPass<RngBitGeneratorExpander>(RandomAlgorithm::RNG_PHILOX);
pipeline.AddPass<ComparisonExpander>(std::array{std::make_pair(BF16, F32)});
pipeline.AddPass<ZeroSizedHloElimination>();
if (RequireDeterminism(hlo_module->config())) {
pipeline.AddPass<ScatterExpander>(
ScatterExpander::kEliminateIndeterministicScatters);
}
pipeline.AddPass<GpuScatterExpander>();
pipeline.AddPass<QrExpander>();
pipeline.AddPass<EighExpander>();
pipeline.AddPass<DynamicIndexSplitter>();
pipeline.AddPass<CallInliner>();
pipeline.AddPass<StochasticConvertDecomposer>();
pipeline.AddPass<Convolution4DExpander>();
pipeline.AddPass<ConvolutionPredExpander>();
pipeline.AddPass<StableSortExpander>();
pipeline.AddPass<BatchNormExpander>(
true,
true,
true);
pipeline.AddPass<LogisticExpander>();
pipeline.AddPass<ConditionalCanonicalizer>();
pipeline.AddPass<DynamicDimensionSimplifier>();
if (debug_options.xla_reduce_window_rewrite_base_length() != 0) {
pipeline.AddPass<HloPassFix<ReduceWindowRewriter>>(
debug_options.xla_reduce_window_rewrite_base_length());
}
DynamicPadderOptions dynamic_padder_options;
switch (debug_options.xla_gpu_shape_checks()) {
case DebugOptions::IGNORE:
dynamic_padder_options.shape_check_mode =
DynamicDimensionInference::ShapeCheckMode::kIgnore;
break;
case DebugOptions::RUNTIME: {
dynamic_padder_options.shape_check_mode =
DynamicDimensionInference::ShapeCheckMode::kRuntime;
dynamic_padder_options.assertion_generator = [&](HloInstruction* inst) {
auto created = Cast<HloCustomCallInstruction>(
inst->parent()->AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeTokenShape(), {inst}, kXlaGpuAssertCustomCallTag,
"Buffers have different size at runtime",
API_VERSION_STATUS_RETURNING)));
created->set_custom_call_has_side_effect(true);
};
break;
}
case DebugOptions::COMPILE_TIME:
dynamic_padder_options.shape_check_mode =
DynamicDimensionInference::ShapeCheckMode::kCompileTime;
break;
default:
LOG(FATAL) << "Unreachable";
}
pipeline.AddPass<DynamicPadder>(dynamic_padder_options);
se::GpuComputeCapability gpu_version =
gpu_target_config.device_description.gpu_compute_capability();
[&, &pipeline =
pipeline.AddPass<HloPassFix<HloPassPipeline>>("simplification")] {
AddHloVerifier(&pipeline,
!debug_options.xla_experimental_ignore_channel_id(),
HloVerifierOpts{}, true);
pipeline.AddPass<ZeroSizedHloElimination>();
pipeline.AddPass<GatherSimplifier>();
pipeline.AddPass<GatherExpander>(GatherExpander::kEliminateSimpleGathers);
pipeline.AddPass<ScatterSimplifier>();
pipeline.AddPass<ScatterExpander>(
ScatterExpander::kEliminateSimpleScatters);
pipeline.AddPass<ScatterSliceSimplifier>();
pipeline.AddPass<GpuAlgebraicSimplifier>(layout_insensitive_algsimp_opts,
gpu_version);
pipeline.AddPass<BitcastDtypesExpander>();
pipeline.AddPass<DotDimensionSorter>();
pipeline.AddPass<DotDecomposer>();
pipeline.AddPass<DotMerger>(
int64_t{
debug_options.xla_gpu_dot_merger_threshold_mb()}
<< 20);
pipeline.AddPass<SortSimplifier>();
pipeline.AddPass<TupleSimplifier>();
pipeline.AddPass<WhileLoopConstantSinking>();
pipeline.AddPass<WhileLoopSimplifier>();
pipeline.AddPass<SliceSinker>();
ReshapeMoverOptions reshape_mover_options;
reshape_mover_options.reshape_of_1d_broadcast_is_cheap = true;
pipeline.AddPass<ReshapeMover>(reshape_mover_options);
pipeline.AddPass<HloConstantFolding>();
pipeline.AddPass<ConditionalSimplifier>();
pipeline.AddPass<RealImagExpander>();
pipeline.AddPass<TransposeFolding>(CanFoldTransposeOperandIntoDot);
pipeline.AddPass<HloCSE>(false);
pipeline.AddPass<HloDCE>();
}();
[&, &pipeline =
pipeline.AddPass<HloPassFix<HloPassPipeline>>("simplification-2")] {
pipeline.AddPass<ConvertMover>();
pipeline.AddPass<GpuAlgebraicSimplifier>(layout_insensitive_algsimp_opts,
gpu_version);
}();
pipeline.AddPass<HloComputationDeduplicator>(
false);
return pipeline.Run(hlo_module).status();
}
absl::Status AddCollectivePipelinerPasses(
const DebugOptions& debug_options, HloPassPipeline& collectives_pipeline) {
if (debug_options.xla_gpu_enable_pipelined_collectives() ||
debug_options.xla_gpu_enable_pipelined_all_reduce()) {
CollectivePipeliner::Config config{
0,
INT64_MAX,
true,
false,
true,
CollectivePipeliner::PipeliningDirection::kForward,
HloPredicateIsOp<HloOpcode::kAllReduce>,
HloPredicateTrue,
HloPredicateFalse};
collectives_pipeline.AddPass<CollectivePipeliner>(config);
}
if (debug_options.xla_gpu_enable_pipelined_collectives() ||
debug_options.xla_gpu_enable_pipelined_all_gather()) {
CollectivePipeliner::Config config{
0,
INT64_MAX,
true,
false,
true,
CollectivePipeliner::PipeliningDirection::kBackward,
HloPredicateIsOp<HloOpcode::kAllGather>,
HloPredicateTrue,
HloPredicateFalse,
HloPredicateFalse,
false,
std::nullopt,
std::nullopt,
true,
};
collectives_pipeline.AddPass<CollectivePipeliner>(config);
}
if (debug_options.xla_gpu_enable_pipelined_collectives() ||
debug_options.xla_gpu_enable_pipelined_reduce_scatter()) {
CollectivePipeliner::Config config{
0,
INT64_MAX,
true,
false,
true,
CollectivePipeliner::PipeliningDirection::kForward,
HloPredicateIsOp<HloOpcode::kReduceScatter>,
HloPredicateTrue,
HloPredicateFalse};
collectives_pipeline.AddPass<CollectivePipeliner>(config);
}
return absl::OkStatus();
}
absl::Status RunPostLayoutCollectivePipelinerPasses(HloModule* hlo_module) {
const DebugOptions& debug_options = hlo_module->config().debug_options();
HloPassPipeline collectives_pipeline("collective-pipeliner-optimizations");
if (debug_options.xla_gpu_run_post_layout_collective_pipeliner()) {
TF_RETURN_IF_ERROR(
AddCollectivePipelinerPasses(debug_options, collectives_pipeline));
collectives_pipeline.AddPass<WhileLoopTripCountAnnotator>();
collectives_pipeline.AddPass<FlattenCallGraph>();
}
return collectives_pipeline.Run(hlo_module).status();
}
absl::Status RunCollectiveOptimizationPasses(
HloModule* hlo_module,
const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts,
se::GpuComputeCapability gpu_version) {
const DebugOptions& debug_options = hlo_module->config().debug_options();
HloPassPipeline collectives_pipeline("collective-optimizations");
collectives_pipeline.AddPass<AllReduceFolder>();
collectives_pipeline.AddPass<AllReduceSplitter>();
collectives_pipeline.AddPass<AllGatherOptimizer>();
collectives_pipeline.AddPass<AllReduceReassociate>(
debug_options.xla_gpu_enable_reassociation_for_converted_ar());
collectives_pipeline.AddPass<ReduceScatterReassociate>();
collectives_pipeline.AddPass<WhileLoopAllReduceCodeMotion>(
debug_options
.xla_gpu_enable_while_loop_reduce_scatter_code_motion());
if (!debug_options.xla_gpu_run_post_layout_collective_pipeliner()) {
TF_RETURN_IF_ERROR(
AddCollectivePipelinerPasses(debug_options, collectives_pipeline));
}
collectives_pipeline.AddPass<ReduceScatterCreator>();
collectives_pipeline.AddPass<CollectivePermuteCycleDecomposer>(
hlo_module->config()
.debug_options()
.xla_gpu_collective_permute_decomposer_threshold());
collectives_pipeline.AddPass<CollectivePermuteDecomposer>(
hlo_module->config()
.debug_options()
.xla_gpu_collective_permute_decomposer_threshold());
if (hlo_module->config()
.debug_options()
.xla_gpu_enable_pipelined_collectives() ||
hlo_module->config().debug_options().xla_gpu_enable_pipelined_p2p()) {
AddP2PPipeliner(collectives_pipeline);
}
collectives_pipeline.AddPass<GpuAlgebraicSimplifier>(
layout_insensitive_algsimp_opts, gpu_version);
collectives_pipeline.AddPass<AllGatherBroadcastReorder>();
const std::pair<PrimitiveType, PrimitiveType> ar_promoted_types[] = {
{U16, U32}, {S16, S32}};
collectives_pipeline.AddPass<AllReducePromotion>(ar_promoted_types);
collectives_pipeline.AddPass<HloDCE>();
collectives_pipeline.AddPass<CollectiveQuantizer>();
collectives_pipeline.AddPass<HloDCE>();
collectives_pipeline.AddPass<WhileLoopTripCountAnnotator>();
return collectives_pipeline.Run(hlo_module).status();
}
absl::Status RunLayoutAssignmentPasses(HloModule* hlo_module,
se::GpuComputeCapability gpu_version,
se::dnn::VersionInfo dnn_version) {
HloPassPipeline pipeline("layout assignment");
pipeline.AddPass<FlattenCallGraph>();
ChannelLayoutConstraints layout_constraints;
pipeline.AddPass<GpuLayoutAssignment>(
hlo_module->mutable_entry_computation_layout(), gpu_version, dnn_version,
&layout_constraints);
pipeline.AddPass<SubByteNormalization>(
SubByteNormalization::SET_ELEMENT_SIZE);
pipeline.AddPass<OptimizeInputOutputBufferAlias>(true);
pipeline.AddPass<HostOffloadLegalize>(
static_cast<int64_t>(stream_executor::MemoryType::kHost),
true);
return pipeline.Run(hlo_module).status();
}
absl::Status RunFusionPasses(HloModule* hlo_module,
const Compiler::TargetConfig& gpu_target_config,
tsl::thread::ThreadPool* thread_pool,
HloCostAnalysis::ShapeSizeFunction shape_size_fn) {
const se::DeviceDescription& gpu_device_info =
gpu_target_config.device_description;
TF_RETURN_IF_ERROR(FusionPipeline(hlo_module->config().debug_options(),
shape_size_fn, thread_pool, gpu_device_info)
.Run(hlo_module)
.status());
if (hlo_module->config().debug_options().xla_gpu_collect_cost_model_stats()) {
GpuHloCostAnalysis::Options cost_analysis_options{
shape_size_fn,
{},
{},
true};
HloPassPipeline post_fusion_analysis("post_fusion_analysis");
post_fusion_analysis.AddPass<GpuCostModelStatsCollection>(
gpu_device_info, cost_analysis_options);
TF_RETURN_IF_ERROR(post_fusion_analysis.Run(hlo_module).status());
}
TF_RETURN_IF_ERROR(
HorizontalFusionPipeline(gpu_device_info).Run(hlo_module).status());
if (VLOG_IS_ON(2)) {
HloFusionStatsVisitor stats;
TF_RETURN_IF_ERROR(hlo_module->entry_computation()->Accept(&stats));
VLOG(2) << stats.ToString();
}
return absl::OkStatus();
}
void AddDoubleBufferingPasses(const DebugOptions& opts,
HloPassPipeline& pipeline) {
std::optional<DoubleBufferLoopUnrolling::UnrollStrategy> unroll_strategy =
std::nullopt;
if (opts.xla_gpu_enable_while_loop_double_buffering()) {
unroll_strategy = DoubleBufferLoopUnrolling::UnrollStrategy::kDoubleBuffer;
}
if (opts.xla_gpu_enable_while_loop_unrolling() ==
DebugOptions::WHILE_LOOP_UNROLLING_DOUBLE_BUFFER) {
unroll_strategy = DoubleBufferLoopUnrolling::UnrollStrategy::kDoubleBuffer;
}
if (opts.xla_gpu_enable_while_loop_unrolling() ==
DebugOptions::WHILE_LOOP_UNROLLING_FULL_UNROLL) {
LOG_IF(WARNING, unroll_strategy != std::nullopt)
<< "Overriding double buffering set via "
"`xla_gpu_enable_while_loop_double_buffering` flag.";
unroll_strategy = DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll;
}
if (opts.xla_gpu_enable_while_loop_unrolling() ==
DebugOptions::WHILE_LOOP_UNROLLING_AUTO_UNROLL &&
opts.xla_gpu_enable_heuristic_pass_configuration() &&
!opts.xla_gpu_enable_while_loop_double_buffering()) {
unroll_strategy = DoubleBufferLoopUnrolling::UnrollStrategy::kAuto;
}
if (unroll_strategy != std::nullopt) {
pipeline.AddPass<WhileLoopSimplifier>();
pipeline.AddPass<DoubleBufferLoopUnrolling>(*unroll_strategy);
pipeline.AddPass<TupleSimplifier>();
pipeline.AddPass<HloDCE>();
}
}
absl::Status RunPostFusionPasses(
HloModule* hlo_module,
std::function<absl::Status(HloPassPipeline*, const DebugOptions&)>
add_custom_kernel_replacement_passes) {
const DebugOptions& opts = hlo_module->config().debug_options();
HloPassPipeline pipeline("post-fusion optimization");
pipeline.AddPass<RenameFusions>();
pipeline.AddPass<AllGatherCombiner>(
opts.xla_gpu_all_gather_combine_threshold_bytes(),
256,
opts.xla_gpu_enable_all_gather_combine_by_dim());
pipeline.AddPass<AllReduceCombiner>(
opts.xla_gpu_all_reduce_combine_threshold_bytes(),
256);
pipeline.AddPass<ReduceScatterCombiner>(
opts.xla_gpu_reduce_scatter_combine_threshold_bytes(),
256,
opts.xla_gpu_enable_reduce_scatter_combine_by_dim());
pipeline.AddPass<AllReduceContiguous>();
TF_RETURN_IF_ERROR(add_custom_kernel_replacement_passes(&pipeline, opts));
int32_t blueconnect_num_devices_per_host =
hlo_module->config()
.debug_options()
.xla_gpu_all_reduce_blueconnect_num_devices_per_host();
if (blueconnect_num_devices_per_host > 0) {
pipeline.AddPass<AllReduceBlueConnect>(blueconnect_num_devices_per_host);
}
AddDoubleBufferingPasses(opts, pipeline);
return pipeline.Run(hlo_module).status();
}
absl::Status RunPostFusionCollectiveOptimizationPasses(HloModule* hlo_module) {
HloPassPipeline pipeline("post-fusion-collectives optimization");
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_reduce = HloPredicateTrue;
config.convert_collective_broadcast = HloPredicateTrue;
config.convert_collective_permute = HloPredicateTrue;
config.convert_all_gather = HloPredicateTrue;
config.convert_reduce_scatter = HloPredicateTrue;
config.convert_all_to_all = HloPredicateTrue;
pipeline.AddPass<AsyncCollectiveCreator>(std::move(config));
absl::flat_hash_set<DebugOptions::CollectiveOpType> disabled_async_ops;
for (auto collective_op_type : hlo_module->config()
.debug_options()
.xla_gpu_disable_async_collectives()) {
disabled_async_ops.insert(
static_cast<DebugOptions::CollectiveOpType>(collective_op_type));
}
auto convert_to_async = [&disabled_async_ops](const HloInstruction* inst) {
switch (inst->opcode()) {
case HloOpcode::kAllReduceStart:
return !disabled_async_ops.contains(DebugOptions::ALLREDUCE);
case HloOpcode::kCollectivePermuteStart:
return !disabled_async_ops.contains(DebugOptions::COLLECTIVEPERMUTE);
case HloOpcode::kAllGatherStart:
return !disabled_async_ops.contains(DebugOptions::ALLGATHER);
case HloOpcode::kAsyncStart: {
auto async_inst = Cast<HloAsyncInstruction>(inst);
switch (async_inst->async_wrapped_opcode()) {
case HloOpcode::kCollectiveBroadcast:
return !disabled_async_ops.contains(
DebugOptions::COLLECTIVEBROADCAST);
case HloOpcode::kReduceScatter:
return !disabled_async_ops.contains(DebugOptions::REDUCESCATTER);
case HloOpcode::kAllToAll:
return !disabled_async_ops.contains(DebugOptions::ALLTOALL);
default:
return false;
}
}
default:
return false;
}
};
pipeline.AddPass<AsyncCollectiveAnnotator>(convert_to_async);
return pipeline.Run(hlo_module).status();
}
absl::Status RunPostFusionSimplificationPasses(
HloModule* hlo_module,
const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts,
se::GpuComputeCapability gpu_version) {
HloPassPipeline pipeline("post-fusion-simplification-pipeline optimization");
AlgebraicSimplifierOptions options = layout_insensitive_algsimp_opts;
options.set_is_layout_sensitive(true);
pipeline.AddPass<GpuAlgebraicSimplifier>(options, gpu_version);
pipeline.AddPass<HloComputationDeduplicator>(
true);
if (hlo_module->config()
.debug_options()
.xla_gpu_multi_streamed_windowed_einsum()) {
pipeline.AddPass<StreamAttributeAnnotator>();
pipeline.AddPass<StreamAttributeAsyncWrapper>();
}
return pipeline.Run(hlo_module).status();
}
absl::Status RunPostFusionVerificationPasses(
HloModule* hlo_module, se::StreamExecutor* stream_exec,
const GpuCompiler::CompileOptions& options,
const Compiler::TargetConfig& gpu_target_config) {
HloPassPipeline pipeline("post-fusion-verification-pipeline optimization");
if (hlo_module->config()
.debug_options()
.xla_gpu_verify_triton_fusion_numerics()) {
TF_ASSIGN_OR_RETURN(
AutotuneConfig autotune_config,
GetAutotuneConfig(stream_exec, hlo_module->config().debug_options(),
options, gpu_target_config));
pipeline.AddPass<TritonFusionNumericsVerifier>(autotune_config);
}
return pipeline.Run(hlo_module).status();
}
absl::Status RunLayoutNormalizationPasses(
HloModule* hlo_module, const se::GpuComputeCapability& gpu_version) {
HloPassPipeline layout_normalization_pipeline("layout normalization");
const DebugOptions& debug_options = hlo_module->config().debug_options();
AlgebraicSimplifierOptions opts =
GpuCompiler::GetAlgebraicSimplifierOptions(hlo_module->config());
opts.set_supports_non_canonical_dots(false);
opts.set_is_layout_sensitive(true);
opts.set_enable_conv_operand_swap(false);
opts.set_minmax_propagate_nan(!debug_options.xla_gpu_enable_fast_min_max());
opts.set_enable_unconditional_reduce_of_concat_replacement(false);
layout_normalization_pipeline.AddPass<ReshapeDecomposer>();
layout_normalization_pipeline.AddPass<HloPassFix<MoveCopyToUsers>>();
layout_normalization_pipeline.AddPass<LayoutNormalization>(
&NormalizeLayoutForGpuCustomCalls);
layout_normalization_pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(
opts, gpu_version);
layout_normalization_pipeline.AddPass<BroadcastCanonicalizer>();
layout_normalization_pipeline.AddPass<ScatterSimplifier>();
return layout_normalization_pipeline.Run(hlo_module).status();
}
absl::Status RunAsyncDotPasses(HloModule* hlo_module) {
HloPassPipeline pipeline("async-wrapper");
const DebugOptions& debug_options = hlo_module->config().debug_options();
if (debug_options.xla_gpu_async_dot()) {
pipeline.AddPass<AsyncWrapper>([](HloInstruction* instruction) {
if (IsCublasGemm(*instruction)) {
return true;
}
if (instruction->called_computations().size() == 1 &&
IsTritonFusedComputation(
*instruction->called_computations().front())) {
return true;
}
return false;
});
}
return pipeline.Run(hlo_module).status();
}
absl::Status RunDynamicSliceFusionPasses(HloModule* hlo_module,
se::Platform::Id platform_id) {
if (hlo_module->config()
.debug_options()
.xla_gpu_enable_dynamic_slice_fusion()) {
HloPassPipeline pipeline("dynamic-slice");
TF_ASSIGN_OR_RETURN(se::Platform * platform,
se::PlatformManager::PlatformWithId(platform_id));
pipeline.AddPass<DynamicSliceFusionRewriter>(platform->Name());
TF_RETURN_IF_ERROR(pipeline.Run(hlo_module).status());
}
return absl::OkStatus();
}
}
absl::Status GpuCompiler::RunCollectiveScheduleLinearizerPasses(
HloModule* hlo_module, se::StreamExecutor* stream_exec) {
HloPassPipeline pipeline("collective-schedule-linearizer");
pipeline.AddPass<CollectivesScheduleLinearizer>(
[this, stream_exec](const HloModule* module) {
return RequiresCollectiveScheduleLinearizer(module, stream_exec);
});
return pipeline.Run(hlo_module).status();
}
absl::Status GpuCompiler::OptimizeHloModule(
HloModule* hlo_module, se::StreamExecutor* stream_exec,
const CompileOptions& options, const TargetConfig& gpu_target_config) {
tsl::profiler::TraceMe traceme("GpuCompiler::OptimizeHloModule");
CheckNotScheduled(hlo_module);
LogDebugOptions(hlo_module);
MaybeOwningThreadPool thread_pool = CreateMaybeOwningThreadPool(
hlo_module->config()
.debug_options()
.xla_gpu_force_compilation_parallelism(),
options.thread_pool,
tsl::port::MaxParallelism());
AlgebraicSimplifierOptions layout_insensitive_algsimp_opts =
LayoutInsensitiveAlgebraicSimplifierOptions(
hlo_module->config(), gpu_target_config,
GetAlgebraicSimplifierOptions(hlo_module->config()));
TF_RETURN_IF_ERROR(RunPreSPMDPartitionerPasses(hlo_module));
TF_RETURN_IF_ERROR(RunSPMDPasses(hlo_module, gpu_target_config,
layout_insensitive_algsimp_opts));
TF_RETURN_IF_ERROR(RunOptimizationPasses(hlo_module, gpu_target_config,
layout_insensitive_algsimp_opts));
se::GpuComputeCapability gpu_version =
gpu_target_config.device_description.gpu_compute_capability();
TF_RETURN_IF_ERROR(RunCollectiveOptimizationPasses(
hlo_module, layout_insensitive_algsimp_opts, gpu_version));
se::dnn::VersionInfo dnn_version = gpu_target_config.dnn_version_info;
if (stream_exec != nullptr) {
gpu_version = GetGpuVersion(stream_exec);
TF_ASSIGN_OR_RETURN(dnn_version, GetDnnVersionInfo(stream_exec));
}
TF_RETURN_IF_ERROR(OptimizeHloConvolutionCanonicalization(
hlo_module, gpu_version, dnn_version, options.device_allocator,
gpu_target_config.device_description.runtime_version()));
TF_RETURN_IF_ERROR(
RunLayoutAssignmentPasses(hlo_module, gpu_version, dnn_version));
TF_RETURN_IF_ERROR(RunLayoutNormalizationPasses(hlo_module, gpu_version));
TF_RETURN_IF_ERROR(OptimizeHloPostLayoutAssignment(
hlo_module, stream_exec, options, gpu_target_config,
thread_pool.get_mutable()));
TF_RETURN_IF_ERROR(RunPostLayoutCollectivePipelinerPasses(hlo_module));
TF_RETURN_IF_ERROR(RunDynamicSliceFusionPasses(hlo_module, PlatformId()));
TF_RETURN_IF_ERROR(RunFusionPasses(hlo_module, gpu_target_config,
thread_pool.get_mutable(),
ShapeSizeBytesFunction()));
TF_RETURN_IF_ERROR(RunPostFusionPasses(
hlo_module,
[this](HloPassPipeline* pipeline, const DebugOptions& debug_options) {
return AddCustomKernelReplacementPasses(pipeline, debug_options);
}));
TF_RETURN_IF_ERROR(RunPostFusionCollectiveOptimizationPasses(hlo_module));
TF_RETURN_IF_ERROR(RunPostFusionSimplificationPasses(
hlo_module, layout_insensitive_algsimp_opts, gpu_version));
TF_RETURN_IF_ERROR(RunPostFusionVerificationPasses(
hlo_module, stream_exec, options, gpu_target_config));
TF_RETURN_IF_ERROR(
RunCollectiveScheduleLinearizerPasses(hlo_module, stream_exec));
TF_RETURN_IF_ERROR(RunAsyncDotPasses(hlo_module));
return absl::OkStatus();
}
AlgebraicSimplifierOptions GpuCompiler::GetAlgebraicSimplifierOptions(
const HloModuleConfig& config) {
AlgebraicSimplifierOptions opts;
opts.set_enable_dot_strength_reduction(
config.debug_options().xla_gpu_enable_dot_strength_reduction());
return opts;
}
absl::Status GpuCompiler::PrepareHloModuleForIrEmitting(HloModule* hlo_module) {
return PrepareHloModuleForIrEmittingPipeline(*hlo_module, GetCanShareBuffer())
.Run(hlo_module)
.status();
}
namespace {
void AddGemmRewriterPasses(HloPassPipeline& pipeline,
const DebugOptions& debug_options,
const se::GpuComputeCapability gpu_version,
const se::SemanticVersion& toolkit_version) {
GemmRewriterOptions::BiasMode bias_mode =
GemmRewriterOptions::BiasMode::kBias;
if (debug_options.xla_gpu_async_dot()) {
bias_mode = GemmRewriterOptions::BiasMode::kNoBias;
}
pipeline.AddPass<GemmRewriter>(
gpu_version, toolkit_version,
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only, bias_mode});
pipeline.AddPass<GemmRewriter>(
gpu_version, toolkit_version,
GemmRewriterOptions{GemmRewriterOptions::DType::kNonFp8Only, bias_mode});
}
}
absl::Status GpuCompiler::OptimizeHloPostLayoutAssignment(
HloModule* hlo_module, se::StreamExecutor* stream_exec,
const CompileOptions& options, const TargetConfig& gpu_target_config,
tsl::thread::ThreadPool* thread_pool) {
const DebugOptions& debug_options = hlo_module->config().debug_options();
const se::GpuComputeCapability gpu_version =
gpu_target_config.device_description.gpu_compute_capability();
const AlgebraicSimplifierOptions simplifier_options = [&] {
AlgebraicSimplifierOptions opts =
GetAlgebraicSimplifierOptions(hlo_module->config());
opts.set_supports_non_canonical_dots(false);
opts.set_is_layout_sensitive(true);
opts.set_enable_conv_operand_swap(false);
opts.set_minmax_propagate_nan(!debug_options.xla_gpu_enable_fast_min_max());
opts.set_enable_unconditional_reduce_of_concat_replacement(false);
return opts;
}();
TF_ASSIGN_OR_RETURN(AutotuneConfig autotune_config,
GetAutotuneConfig(stream_exec, debug_options, options,
gpu_target_config));
const GpuFloatSupport bf16_support(gpu_version, BF16);
const GpuFloatSupport f8e5m2_support(gpu_version, F8E5M2, F16);
const GpuFloatSupport f8e4m3_support(gpu_version, F8E4M3, F16);
const GpuFloatSupport f8e4m3fn_support(gpu_version, F8E4M3FN, F16);
const FloatSupport f8e4m3b11fnuz_support(F8E4M3B11FNUZ, F16);
const GpuFloatSupport f8e5m2fnuz_support(gpu_version, F8E5M2FNUZ, F16);
const GpuFloatSupport f8e4m3fnuz_support(gpu_version, F8E4M3FNUZ, F16);
const GpuFloatSupport f8e3m4_support(gpu_version, F8E3M4, F16);
auto add_float_normalization = [&](HloPassPipeline& pipeline) {
auto& sub_pipeline =
pipeline.AddPass<HloPassPipeline>("float_normalization");
sub_pipeline.AddPass<FloatNormalization>(&bf16_support);
sub_pipeline.AddPass<FloatNormalization>(&f8e5m2_support);
sub_pipeline.AddPass<FloatNormalization>(&f8e4m3_support);
sub_pipeline.AddPass<FloatNormalization>(&f8e4m3fn_support);
sub_pipeline.AddPass<FloatNormalization>(&f8e4m3b11fnuz_support);
sub_pipeline.AddPass<FloatNormalization>(&f8e5m2fnuz_support);
sub_pipeline.AddPass<FloatNormalization>(&f8e4m3fnuz_support);
sub_pipeline.AddPass<FloatNormalization>(&f8e3m4_support);
if (debug_options.xla_allow_excess_precision()) {
sub_pipeline.AddPass<SimplifyFPConversions>();
}
};
{
HloPassPipeline pipeline("hlo normalization");
pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(simplifier_options,
gpu_version);
pipeline.AddPass<TransposeFolding>(CanFoldTransposeOperandIntoDot,
TransposeFolding::NeverFoldTranspose);
pipeline.AddPass<ReshapeDecomposer>();
pipeline.AddPass<ReduceDecomposer>([&](const HloInstruction* r) {
return IsReductionFromOrToContiguousDimensions(*r);
});
if (debug_options.xla_gpu_enable_custom_fusions()) {
pipeline.AddPass<SimplifyFPConversions>();
pipeline.AddPass<CustomKernelFusionRewriter>(
&gpu_target_config.device_description);
pipeline.AddPass<CustomKernelFusionAutotuner>(autotune_config);
}
se::GpuComputeCapability gpu_version =
gpu_target_config.device_description.gpu_compute_capability();
pipeline.AddPass<AlgorithmChecker>(gpu_version);
const auto* cuda_cc = std::get_if<se::CudaComputeCapability>(&gpu_version);
const auto* rocm_cc = std::get_if<se::RocmComputeCapability>(&gpu_version);
if (debug_options.xla_gpu_enable_triton_gemm() &&
(cuda_cc != nullptr &&
cuda_cc->IsAtLeast(se::CudaComputeCapability::AMPERE))) {
pipeline.AddPass<GemvRewriter>();
pipeline.AddPass<GemmFusion>(gpu_version);
} else if (cuda_cc != nullptr &&
cuda_cc->major == se::CudaComputeCapability::VOLTA) {
pipeline.AddPass<SimplifyFPConversions>();
pipeline.AddPass<CustomKernelFusionRewriter>(
&gpu_target_config.device_description);
pipeline.AddPass<CustomKernelFusionAutotuner>(autotune_config);
}
AddGemmRewriterPasses(
pipeline, debug_options, gpu_version,
gpu_target_config.device_description.runtime_version());
pipeline.AddPass<GemmBroadcastFoldingRewriter>();
pipeline.AddPass<LayoutNormalization>(&NormalizeLayoutForGpuCustomCalls);
pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(simplifier_options,
gpu_version);
pipeline.AddPass<ScatterSimplifier>();
pipeline.AddPass<BroadcastCanonicalizer>();
pipeline.AddPass<TransposeDimensionGrouper>();
pipeline.AddPass<ReductionDegenerateDimRemover>();
pipeline.AddPass<ReductionLayoutNormalizer>();
if (debug_options
.xla_gpu_experimental_enable_triton_softmax_priority_fusion() &&
((cuda_cc != nullptr &&
cuda_cc->IsAtLeast(se::CudaComputeCapability::AMPERE)) ||
rocm_cc != nullptr)) {
add_float_normalization(pipeline);
pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(simplifier_options,
gpu_version);
pipeline.AddPass<HloCSE>(true);
pipeline.AddPass<HloConstantFolding>();
pipeline.AddPass<HloDCE>();
pipeline.AddPass<SoftmaxRewriterTriton>(
gpu_target_config.device_description, ShapeSizeBytesFunction(),
true);
}
pipeline.AddPass<ReductionDimensionGrouper>();
bool ignore_small_reduce_dims =
!debug_options.xla_gpu_enable_priority_fusion();
pipeline.AddPass<HloPassFix<ReductionSplitter>>(ignore_small_reduce_dims);
pipeline.AddPass<HloPassFix<TreeReductionRewriter>>(gpu_version);
pipeline.AddPass<SubByteNormalization>(
SubByteNormalization::SET_ELEMENT_SIZE);
TF_RETURN_IF_ERROR(pipeline.Run(hlo_module).status());
}
HloPassPipeline pipeline("post-layout_assignment");
AddHloVerifier(&pipeline, !debug_options.xla_experimental_ignore_channel_id(),
HloVerifierOpts{}
.MakeLayoutSensitive()
.WithInstructionCanChangeLayout(
LayoutAssignment::InstructionCanChangeLayout)
.VerifyBroadcastDimensionsOrder()
.VerifyReshapeIsBitcast(),
true);
add_float_normalization(pipeline);
TF_RETURN_IF_ERROR(AddGemmFusionAutotuningPasses(
&pipeline, hlo_module, autotune_config, thread_pool,
options.key_value_store,
gpu_target_config.device_description.runtime_version()));
pipeline.AddPass<CallInliner>();
AddGemmRewriterPasses(pipeline, debug_options, gpu_version,
gpu_target_config.device_description.runtime_version());
pipeline.AddPass<GemmBroadcastFoldingRewriter>();
pipeline.AddPass<HostOffloader>(
static_cast<int64_t>(stream_executor::MemoryType::kHost));
TF_RETURN_IF_ERROR(
AddConvAndGemmAutotuningPasses(&pipeline, gpu_version, options,
hlo_module, autotune_config, thread_pool));
add_float_normalization(pipeline);
pipeline.AddPass<TupleSimplifier>();
pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(simplifier_options,
gpu_version);
if (debug_options.xla_allow_excess_precision()) {
pipeline.AddPass<SimplifyFPConversions>();
}
pipeline.AddPass<HloCSE>(true);
pipeline.AddPass<HostMemoryTransferAsyncifier>(
static_cast<int64_t>(stream_executor::MemoryType::kHost));
#ifdef NDEBUG
HloVerifierOpts opts = HloVerifierOpts{}
.MakeLayoutSensitive()
.WithInstructionCanChangeLayout(
LayoutAssignment::InstructionCanChangeLayout)
.VerifyBroadcastDimensionsOrder()
.VerifyReshapeIsBitcast();
opts.verify_unique_channel_ids =
!debug_options.xla_experimental_ignore_channel_id();
pipeline.AddPass<HloVerifier>(
std::make_unique<DefaultVerifierMetadata>(std::move(opts)),
"end-of-post-layout_assignment");
#endif
TF_RETURN_IF_ERROR(pipeline.Run(hlo_module).status());
return absl::OkStatus();
}
absl::StatusOr<Compiler::TargetConfig> GpuCompiler::GetTargetConfig(
const Compiler::CompileOptions& options, const DebugOptions& debug_opts,
se::StreamExecutor* executor) {
if (options.target_config.has_value()) {
return *options.target_config;
}
if (!debug_opts.xla_gpu_target_config_filename().empty()) {
std::string gpu_target_config_string;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(
tsl::Env::Default(), debug_opts.xla_gpu_target_config_filename(),
&gpu_target_config_string));
stream_executor::GpuTargetConfigProto gpu_target_config_proto;
if (!tsl::protobuf::TextFormat::ParseFromString(gpu_target_config_string,
&gpu_target_config_proto)) {
return absl::FailedPreconditionError(
"Failed to parse GpuTargetConfigProto");
}
return Compiler::TargetConfig{gpu_target_config_proto};
}
if (executor) {
Compiler::TargetConfig target_config = Compiler::TargetConfig{executor};
int64_t device_memory_size =
target_config.device_description.device_memory_size();
if (device_memory_size == -1) {
return absl::FailedPreconditionError(
"When running on an NVIDIA simulation device, you must use "
"--xla_gpu_target_config_filename to pass in target information. "
"The target config from StreamExecutor is inaccurate.");
}
return target_config;
}
return absl::InternalError(
"Either GPU has to be attached, or --xla_gpu_target_config_filename "
"has to be specified to specify the target to compile for.");
}
absl::StatusOr<std::unique_ptr<HloModule>> GpuCompiler::RunHloPasses(
std::unique_ptr<HloModule> module, se::StreamExecutor* stream_exec,
const CompileOptions& options) {
const DebugOptions debug_opts = module->config().debug_options();
TF_RETURN_IF_ERROR(LoadAutotuneResultsFromFile(debug_opts));
bool is_deviceless = options.target_config.has_value() ||
!debug_opts.xla_gpu_target_config_filename().empty();
TF_ASSIGN_OR_RETURN(TargetConfig gpu_target_config,
GetTargetConfig(options, debug_opts, stream_exec));
const std::optional<std::string> unoptimized_fingerprint =
MaybeUploadUnoptimizedGpuSymbols(module.get(),
gpu_target_config.ToProto());
XLA_SCOPED_LOGGING_TIMER_IF(
absl::StrCat("GpuCompiler::RunHloPasses for ", module->name()),
!options.is_autotuning_compilation);
uint64_t start_usecs = tsl::Env::Default()->NowMicros();
tsl::profiler::TraceMe activity(
[&] { return absl::StrCat("HLO Transforms:", module->name()); },
tsl::profiler::TraceMeLevel::kInfo);
TF_RETURN_IF_ERROR(OptimizeHloModule(module.get(),
is_deviceless ? nullptr : stream_exec,
options, gpu_target_config));
TF_RETURN_IF_ERROR(PrepareHloModuleForIrEmitting(module.get()));
if (module->config()
.debug_options()
.xla_gpu_experimental_enable_fusion_block_level_rewriter()) {
HloPassPipeline pipeline("fusion-block-level-rewriter-pipeline");
pipeline.AddPass<FusionBlockLevelRewriter>(
gpu_target_config.device_description, ShapeSizeBytesFunction());
TF_RETURN_IF_ERROR(pipeline.Run(module.get()).status());
}
uint64_t end_usecs = tsl::Env::Default()->NowMicros();
RecordHloPassesDuration(end_usecs - start_usecs);
DumpHloModuleMetadataIfEnabled({module.get()});
AutotuneResults autotune_results;
TF_ASSIGN_OR_RETURN(
AutotuneConfig autotune_config,
GetAutotuneConfig(stream_exec, debug_opts, options, gpu_target_config));
if (!is_deviceless) {
TF_RETURN_IF_ERROR(
AutotunerUtil::SerializeAutotuneResults(&autotune_results));
TF_RETURN_IF_ERROR(SerializeAutotuneResultsToFile(debug_opts));
}
const std::optional<std::string> optimized_fingerprint =
MaybeUploadOptimizedGpuSymbols(module.get(), autotune_results);
if (unoptimized_fingerprint.has_value() &&
optimized_fingerprint.has_value()) {
MaybeUploadGpuSymbolMapping(*unoptimized_fingerprint,
*optimized_fingerprint);
}
if (DumpingEnabledForHloModule(*module)) {
TF_ASSIGN_OR_RETURN(
std::string autotune_results,
AutotunerUtil::SerializeAutotuneResults(true));
DumpToFileInDirOrStdout(*module, "", "autotune_results.pbtxt",
autotune_results);
}
return std::move(module);
}
namespace {
absl::Status RunPostSchedulingCopyInsertion(
HloModule* module,
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer) {
constexpr int64_t kRegionBasedLiveRangeAnalysisLimit = -1;
const int64_t kUseRegionBasedLiveRangeAnalysis =
module->config()
.debug_options()
.xla_gpu_copy_insertion_use_region_analysis()
? kRegionBasedLiveRangeAnalysisLimit
: 0;
CopyInsertion copy_insertion(can_share_buffer,
kUseRegionBasedLiveRangeAnalysis);
TF_RETURN_IF_ERROR(copy_insertion.RemoveUnnecessaryCopies(module));
HloSchedule saved_schedule = module->schedule();
module->clear_schedule();
TF_RETURN_IF_ERROR(
copy_insertion.CopyInsertion::AddSpecialCaseCopies(module));
TF_RETURN_IF_ERROR(HloDCE().Run(module).status());
TF_RETURN_IF_ERROR(saved_schedule.Update());
TF_RETURN_IF_ERROR(module->set_schedule(std::move(saved_schedule)));
return absl::OkStatus();
}
}
using OutputInfoMap =
absl::flat_hash_map<ShapeIndex, GpuExecutable::OutputInfo>;
static void NullDiagnosticHandler(const llvm::DiagnosticInfo* diag_info,
void* context) {
std::string error_string;
llvm::raw_string_ostream string_printer(error_string);
llvm::DiagnosticPrinterRawOStream diagnostic_printer(string_printer);
diag_info->print(diagnostic_printer);
VLOG(5) << error_string;
}
namespace {
std::unique_ptr<llvm::Module> CopyToContext(const llvm::Module& module,
llvm::LLVMContext& context) {
llvm::SmallString<0> bitcode;
llvm::raw_svector_ostream bitcode_ostream(bitcode);
llvm::WriteBitcodeToFile(module, bitcode_ostream);
llvm::Expected<std::unique_ptr<llvm::Module>> new_module =
llvm::parseBitcodeFile(
llvm::MemoryBufferRef(llvm::StringRef(bitcode.data(), bitcode.size()),
"split_module"),
context);
CHECK(new_module) << "Failed to parse bitcode "
<< llvm::toString(new_module.takeError());
return std::move(new_module.get());
}
}
absl::StatusOr<GpuCompiler::BackendCompileResult>
GpuCompiler::CompileSingleModule(const HloModuleConfig& module_config,
se::GpuComputeCapability gpu_version,
const HloModule* debug_module,
llvm::Module* llvm_module, bool relocatable,
const CompileOptions& options,
std::optional<int> shard_number) {
{
XLA_SCOPED_LOGGING_TIMER_IF(
absl::StrCat(
"GpuCompiler::RunBackend - Running LLVM verifier for ",
(debug_module != nullptr ? debug_module->name() : "(unknown)")),
VLOG_IS_ON(4) && !options.is_autotuning_compilation);
llvm_module->getContext().setDiagnosticHandlerCallBack(
NullDiagnosticHandler, nullptr);
std::string err;
llvm::raw_string_ostream err_stream(err);
TF_RET_CHECK(!llvm::verifyModule(*llvm_module, &err_stream))
<< "Invalid LLVM IR before optimizations:\n"
<< err_stream.str()
<< "\nThis probably indicates a bug in the HLO -> LLVM IR "
"lowering. Rerun with --xla_dump_to to get the IR"
<< (debug_module
? absl::StrCat(" and looks for files with name containing: *",
FilenameFor(*debug_module, "", ""), "*")
: ".");
}
TF_ASSIGN_OR_RETURN(
BackendCompileResult result,
CompileTargetBinary(module_config, llvm_module, gpu_version, relocatable,
debug_module, options));
const bool should_dump = DumpingEnabledForHloModule(
debug_module ? debug_module->name() : "", module_config.debug_options());
if (should_dump) {
if (debug_module) {
llvm_ir::DumpIrIfEnabled(
*debug_module, *llvm_module,
true,
shard_number.has_value() ? std::to_string(*shard_number) : "");
} else {
LOG(ERROR) << "Dumping is not implemented since the file name cannot be "
"inferred. Please implement (potentially MLIR) module -> "
"filename heuristic.";
}
}
if (user_post_optimization_hook_) {
user_post_optimization_hook_(*llvm_module);
}
if (should_dump) {
absl::string_view ptx = result.asm_text;
if (debug_module) {
DumpToFileInDirOrStdout(*debug_module, "",
shard_number.has_value()
? (std::to_string(*shard_number) + ".ptx")
: "ptx",
ptx);
} else {
LOG(ERROR) << "Dumping is not implemented since the file name cannot be "
"inferred. Please implement (potentially MLIR) module -> "
"filename heuristic.";
}
}
return result;
}
namespace {
int CountFunctions(const llvm::Module& module) {
int num_functions = 0;
for (const llvm::Function& func : module.functions()) {
if (!func.isDeclaration() &&
func.getLinkage() == llvm::GlobalValue::LinkageTypes::ExternalLinkage) {
++num_functions;
}
}
return num_functions;
}
std::string SingleFunctionName(const llvm::Module& module) {
std::string name;
for (const llvm::Function& func : module.functions()) {
if (!func.isDeclaration() &&
func.getLinkage() == llvm::GlobalValue::LinkageTypes::ExternalLinkage) {
if (name.empty()) {
name = func.getName().str();
} else {
return "";
}
}
}
return name;
}
}
absl::StatusOr<GpuCompiler::BackendCompileResult> GpuCompiler::CompileAndLink(
const HloModuleConfig& module_config,
CompileModuleResults& compile_module_results,
se::GpuComputeCapability gpu_version, se::StreamExecutor* stream_exec,
const CompileOptions& options, const HloModule* debug_module) {
llvm::Module* llvm_module = &*compile_module_results.llvm_module;
bool force_module_split =
module_config.debug_options().xla_llvm_force_inline_before_split();
if (force_module_split) {
for (llvm::Function& func : llvm_module->functions()) {
if (func.getNumUses() > 0 && !func.isDeclaration()) {
VLOG(4) << absl::StrFormat("Inlining function %s with %d users.\n",
func.getName().str(), func.getNumUses());
std::vector<llvm::CallInst*> calls_to_inline;
for (auto* user : func.users()) {
if (auto* call = llvm::dyn_cast<llvm::CallInst>(user)) {
calls_to_inline.push_back(call);
}
}
for (auto* call_to_inline : calls_to_inline) {
llvm::InlineFunctionInfo inline_function_info;
if (!llvm::InlineFunction(*call_to_inline, inline_function_info)
.isSuccess()) {
return absl::InternalError("Can not inline function " +
func.getName().str());
};
}
}
}
}
llvm::DenseMap<llvm::StringRef, llvm::Constant*> const_initializer_map;
llvm::Module& module_with_constants =
(compile_module_results.llvm_module_constants == nullptr)
? *llvm_module
: *compile_module_results.llvm_module_constants;
for (llvm::GlobalVariable& gv : module_with_constants.globals()) {
if (gv.hasName() && gv.isConstant() && gv.hasInitializer() &&
gv.hasExternalLinkage()) {
llvm::Constant* initializer = gv.getInitializer();
unsigned int num_elements = 0;
if (auto* caz =
llvm::dyn_cast<llvm::ConstantAggregateZero>(initializer)) {
num_elements = caz->getElementCount().getFixedValue();
} else if (auto* cds = llvm::dyn_cast<llvm::ConstantDataSequential>(
initializer)) {
num_elements = cds->getNumElements();
}
if (num_elements > 0) {
const_initializer_map[gv.getName()] = initializer;
}
}
}
llvm_ir::DumpIrIfEnabled(*debug_module, *llvm_module,
false, "inlined");
absl::string_view cache_path =
module_config.debug_options().xla_gpu_kernel_cache_file();
const bool use_cache = !cache_path.empty();
struct NamedModule {
std::string name;
std::unique_ptr<llvm::Module> module;
};
std::vector<NamedModule> llvm_modules;
MaybeOwningThreadPool thread_pool = CreateMaybeOwningThreadPool(
module_config.debug_options()
.xla_gpu_force_compilation_parallelism(),
options.thread_pool,
1);
int num_modules = CountFunctions(*llvm_module);
if (thread_pool.get() != nullptr && !use_cache) {
num_modules = std::max(1, std::min(thread_pool->NumThreads(), num_modules));
}
if (compile_module_results.llvm_module_constants != nullptr) {
llvm_modules.reserve(num_modules + 1);
llvm_modules.push_back(
{"", std::move(compile_module_results.llvm_module_constants)});
} else {
llvm_modules.reserve(num_modules);
}
int single_function_module_count = 0;
llvm::SplitModule(
*llvm_module, num_modules,
[&](std::unique_ptr<llvm::Module> module) {
for (llvm::GlobalVariable& gv : module->globals()) {
if (gv.hasName() && gv.isConstant() && !gv.hasInitializer() &&
const_initializer_map.count(gv.getName()) != 0) {
gv.setInitializer(const_initializer_map[gv.getName()]);
gv.setLinkage(llvm::GlobalValue::InternalLinkage);
}
}
const std::string name = SingleFunctionName(*module);
if (!name.empty()) {
++single_function_module_count;
}
llvm_modules.push_back({name, std::move(module)});
},
true, true);
VLOG(2) << "Single-function cacheable modules: "
<< single_function_module_count << " / " << llvm_modules.size();
struct NamedCompileResult {
std::string name;
absl::StatusOr<BackendCompileResult> result;
};
std::vector<NamedCompileResult> compile_results(llvm_modules.size());
if (thread_pool.get() != nullptr) {
tsl::BlockingCounter counter(llvm_modules.size());
for (int i = 0; i < llvm_modules.size(); ++i) {
thread_pool.get_mutable()->Schedule(
[&compile_results, i, &llvm_modules, &counter, this, &module_config,
&gpu_version, &debug_module, &options] {
llvm::LLVMContext new_context;
std::unique_ptr<llvm::Module> new_module =
CopyToContext(*llvm_modules.at(i).module, new_context);
compile_results.at(i) = {
llvm_modules.at(i).name,
CompileSingleModule(module_config, gpu_version, debug_module,
new_module.get(),
true, options,
i)};
counter.DecrementCount();
});
}
counter.Wait();
} else {
for (int i = 0; i < llvm_modules.size(); ++i) {
compile_results.at(i) = {
llvm_modules.at(i).name,
CompileSingleModule(module_config, gpu_version, debug_module,
&*llvm_modules.at(i).module,
true, options,
i)};
}
}
std::string ptx_snippets;
std::vector<std::vector<uint8_t>> binaries_to_link;
binaries_to_link.reserve(compile_results.size());
std::vector<KernelReuseCache::NamedBinary> binaries_to_cache;
binaries_to_cache.reserve(single_function_module_count);
for (const auto& [name, maybe_result] : compile_results) {
TF_ASSIGN_OR_RETURN(auto result, maybe_result);
if (result.binary.empty()) {
continue;
}
ptx_snippets += result.asm_text;
ptx_snippets += "\n";
binaries_to_link.push_back(result.binary);
if (!name.empty()) {
binaries_to_cache.push_back({name, result.binary});
}
}
if (use_cache) {
std::string resolved_path;
if (!tsl::io::ResolveTestPrefixes(cache_path, resolved_path)) {
return FailedPrecondition("File path can not be resolved: %s",
cache_path);
}
const CompilationCacheProto& current_cache =
compile_module_results.kernel_compilation_cache;
const bool cache_file_exists =
tsl::Env::Default()->FileExists(resolved_path).ok();
if (cache_file_exists) {
int loaded_kernel_count = 0;
for (const auto& [name, entry] : current_cache.entries()) {
if (llvm_module->getFunction(name) != nullptr) {
VLOG(5) << "Using the just compiled kernel for " << name;
TF_RET_CHECK(entry.binary().empty())
<< name
<< " is a just compiled kernel and is not expected to have a "
"binary yet.";
continue;
}
const uint8_t* binary =
reinterpret_cast<const uint8_t*>(entry.binary().data());
binaries_to_link.push_back(
std::vector<uint8_t>(binary, binary + entry.binary().size()));
VLOG(5) << "Using " << name << " from cache: " << entry.binary().size();
++loaded_kernel_count;
}
VLOG(2) << "Using " << loaded_kernel_count << " / "
<< current_cache.entries_size() << " cached kernels.";
}
if (!binaries_to_cache.empty()) {
TF_RETURN_IF_ERROR(
UpdateDiskKernelCache(resolved_path, cache_file_exists,
current_cache, binaries_to_cache));
}
}
auto maybe_backend_result =
LinkModules(gpu_version, stream_exec, std::move(binaries_to_link),
module_config.debug_options());
if (!maybe_backend_result.ok()) {
LOG(ERROR) << "The CUDA linking API did not work. Please use XLA_FLAGS="
"--xla_gpu_enable_llvm_module_compilation_parallelism=false "
"to bypass it, but expect to get longer compilation time due "
"to the lack of multi-threading. Original error: "
<< maybe_backend_result.status();
return maybe_backend_result.status();
}
VLOG(4) << "Binary size after linking [B]: " << maybe_backend_result->size();
compile_module_results.kernel_compilation_cache.Clear();
return BackendCompileResult{ptx_snippets, std::move(*maybe_backend_result)};
}
absl::StatusOr<GpuCompiler::CompileResultWithMetadata>
GpuCompiler::CompileToBackendResult(
HloModule* module, llvm::LLVMContext* llvm_context,
se::StreamExecutor* executor, const CompileOptions& options,
const se::DeviceDescription& gpu_device_info) {
tsl::profiler::TraceMe traceme("GpuCompiler::CompileToBackendResult");
TF_RETURN_IF_ERROR(RunPreSchedulingPasses(module, executor));
TF_ASSIGN_OR_RETURN(
ScheduleMetadata schedule_metadata,
ScheduleGpuModule(module, pointer_size_, gpu_device_info));
TF_RETURN_IF_ERROR(RunPostSchedulingPipelines(
module, schedule_metadata.scheduler_mem_limit, gpu_device_info));
TF_ASSIGN_OR_RETURN(se::Platform * platform,
se::PlatformManager::PlatformWithId(PlatformId()));
bool can_use_link_modules = (executor != nullptr);
if (can_use_link_modules) {
TF_ASSIGN_OR_RETURN(can_use_link_modules,
CanUseLinkModules(module->config()));
}
const bool split_modules =
can_use_link_modules &&
module->config()
.debug_options()
.xla_gpu_enable_llvm_module_compilation_parallelism();
const bool use_cache =
split_modules &&
!module->config().debug_options().xla_gpu_kernel_cache_file().empty();
TF_ASSIGN_OR_RETURN(
CompileModuleResults compile_module_results,
CompileModuleToLlvmIr(module, llvm_context, target_triple_, data_layout_,
platform->Name(), platform->id(), gpu_device_info,
GetCanShareBuffer(), BufferSizeBytesFunction(),
use_cache));
if (user_pre_optimization_hook_) {
user_pre_optimization_hook_(*compile_module_results.llvm_module);
if (compile_module_results.llvm_module_constants != nullptr) {
user_pre_optimization_hook_(
*compile_module_results.llvm_module_constants);
}
}
llvm_ir::DumpIrIfEnabled(*module, *compile_module_results.llvm_module,
false);
if (compile_module_results.llvm_module_constants != nullptr) {
llvm_ir::DumpIrIfEnabled(*module,
*compile_module_results.llvm_module_constants,
false, "constants");
}
BackendCompileResult backend_result;
if (split_modules) {
TF_ASSIGN_OR_RETURN(backend_result,
CompileAndLink(module->config(), compile_module_results,
gpu_device_info.gpu_compute_capability(),
executor, options, module));
} else {
CHECK(compile_module_results.llvm_module_constants == nullptr);
TF_ASSIGN_OR_RETURN(
backend_result,
CompileSingleModule(module->config(),
gpu_device_info.gpu_compute_capability(), module,
&*compile_module_results.llvm_module,
false, options,
std::nullopt));
}
RecordXlaDeviceBinarySize(backend_result.binary.size());
if (DumpingEnabledForHloModule(*module)) {
DumpToFileInDirOrStdout(
*module, "", "thunk_sequence.txt",
compile_module_results.executable->ToString(0));
}
return CompileResultWithMetadata{std::move(backend_result),
std::move(compile_module_results)};
}
absl::StatusOr<std::unique_ptr<Executable>> GpuCompiler::RunBackend(
std::unique_ptr<HloModule> module, se::StreamExecutor* stream_exec,
const CompileOptions& options) {
tsl::profiler::ScopedAnnotation backend_annotation{[&] {
return absl::StrFormat("XlaCompileBackend:#module=%s,program_id=%d#",
module->name(), module->unique_id());
}};
BinaryMap dnn_compiled_graphs;
if (stream_exec) {
TF_RETURN_IF_ERROR(RunCudnnCompilerPasses(module.get(), stream_exec,
&dnn_compiled_graphs));
}
const DebugOptions& debug_opts = module->config().debug_options();
TF_ASSIGN_OR_RETURN(TargetConfig gpu_target_config,
GetTargetConfig(options, debug_opts, stream_exec));
if (DumpingEnabledForHloModule(*module)) {
std::string textproto;
tsl::protobuf::TextFormat::PrintToString(gpu_target_config.ToProto(),
&textproto);
DumpToFileInDirOrStdout(*module, "", "gpu_target_config.pbtxt", textproto);
}
if (!options.is_autotuning_compilation) {
VLOG(1) << "Starting to compile HLO module " << module->name();
}
XLA_SCOPED_LOGGING_TIMER_IF(
absl::StrCat("GpuCompiler::RunBackend for ", module->name()),
!options.is_autotuning_compilation);
std::string slow_compilation_msg =
absl::StrCat("Compiling module ", module->name());
auto slow_compile_alarm = SlowCompilationAlarm(slow_compilation_msg);
if (options.is_autotuning_compilation) {
if (module->config().debug_options().xla_embed_ir_in_executable()) {
LOG(WARNING) << "Doing autotuning compilations with "
"xla_embed_ir_in_executable wastes memory!";
}
}
llvm::LLVMContext llvm_context;
const se::DeviceDescription& gpu_device_info =
gpu_target_config.device_description;
if (module->config().hlo_profiling_enabled() || VLOG_IS_ON(1)) {
HloCostAnalysis::Options cost_analysis_options{ShapeSizeBytesFunction()};
cost_analysis_options.set_bytes_per_second(
gpu_device_info.memory_bandwidth());
GpuHloCostAnalysis cost_analysis(cost_analysis_options, gpu_device_info);
TF_RETURN_IF_ERROR(module->entry_computation()->Accept(&cost_analysis));
if (!options.is_autotuning_compilation) {
VLOG(1) << "HLO memory read+written: "
<< tsl::strings::HumanReadableNumBytes(
cost_analysis.bytes_accessed());
}
if (module->config().hlo_profiling_enabled()) {
LOG(ERROR) << "--xla_hlo_profile for GPU is unsupported.";
}
}
TF_ASSIGN_OR_RETURN(
CompileResultWithMetadata res,
CompileToBackendResult(module.get(), &llvm_context, stream_exec, options,
gpu_device_info));
if (DumpingEnabledForHloModule(*module)) {
DumpToFileInDirOrStdout(
*module, "", "thunk_sequence.txt",
res.compile_module_results.executable->ToString(0));
}
bool embed_ir_in_executable =
module->config().debug_options().xla_embed_ir_in_executable();
int64_t debug_buffer_assignment_show_max =
module->config().debug_options().xla_debug_buffer_assignment_show_max();
tsl::profiler::ScopedAnnotation annotation([&] {
return absl::StrFormat("XlaCreateGpuExecutable:#module=%s#",
module->name());
});
TF_ASSIGN_OR_RETURN(
auto gpu_executable,
GpuExecutable::Create(GpuExecutable::Params{
(options.is_autotuning_compilation &&
!res.backend_result.binary.empty())
? std::string()
: std::move(res.backend_result.asm_text),
std::move(res.backend_result.binary),
std::move(dnn_compiled_graphs),
gpu_device_info.gpu_compute_capability(),
std::move(res.compile_module_results.executable),
std::move(res.compile_module_results.constants),
std::move(res.compile_module_results.output_info),
std::move(res.compile_module_results.module_name),
std::move(res.compile_module_results.output_shape),
(res.compile_module_results.use_original_allocations
? std::optional<std::vector<BufferAllocation>>()
: std::move(res.compile_module_results.allocations)),
std::move(res.compile_module_results.buffer_assignment),
debug_buffer_assignment_show_max,
options.is_autotuning_compilation
? std::unique_ptr<HloModule>()
: std::move(module),
!options.is_autotuning_compilation}));
if (embed_ir_in_executable) {
std::string ir_module_string_before_opt =
llvm_ir::DumpToString(res.compile_module_results.llvm_module.get());
gpu_executable->set_ir_module_string(ir_module_string_before_opt);
DCHECK_NE("", ir_module_string_before_opt);
}
IncrementCompiledProgramsCount();
if (!options.is_autotuning_compilation && gpu_executable->has_module()) {
auto hlo_proto = std::make_unique<HloProto>();
*hlo_proto->mutable_buffer_assignment() =
gpu_executable->buffer_assignment()->ToProto();
gpu_executable->set_hlo_proto(std::move(hlo_proto));
gpu_executable->set_debug_info(
gpu_executable->buffer_assignment()->GetStats().ToString());
}
return static_cast<std::unique_ptr<Executable>>(std::move(gpu_executable));
}
absl::StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>>
GpuCompiler::CompileAheadOfTime(std::unique_ptr<HloModuleGroup> module_group,
const AotCompilationOptions& options) {
CHECK_EQ(options.PlatformId(), PlatformId());
std::vector<std::unique_ptr<HloModule>> modules =
module_group->ConsumeModules();
std::vector<std::unique_ptr<HloModule>> optimized_modules;
optimized_modules.reserve(modules.size());
for (std::unique_ptr<HloModule>& module : modules) {
if (!module->has_schedule()) {
tsl::profiler::ScopedAnnotation annotation{[&] {
return absl::StrFormat("XlaCompile:#module=%s,program_id=%d#",
module->name(), module->unique_id());
}};
CompileOptions compile_options;
compile_options.device_allocator = options.device_allocator();
compile_options.target_config = options.target_config();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> optimized_module,
RunHloPasses(std::move(module), options.executor(), compile_options));
optimized_modules.push_back(std::move(optimized_module));
} else {
optimized_modules.push_back(std::move(module));
}
}
modules = std::move(optimized_modules);
std::vector<std::unique_ptr<AotCompilationResult>> results;
const std::optional<Compiler::TargetConfig>& target_config =
options.target_config();
CHECK(target_config.has_value() || options.executor() != nullptr);
const se::DeviceDescription& gpu_device_info =
target_config.has_value() ? target_config->device_description
: options.executor()->GetDeviceDescription();
for (const std::unique_ptr<HloModule>& module : modules) {
llvm::LLVMContext llvm_context;
TF_ASSIGN_OR_RETURN(
CompileResultWithMetadata res,
CompileToBackendResult(module.get(), &llvm_context, options.executor(),
{options.device_allocator()}, gpu_device_info));
TF_ASSIGN_OR_RETURN(
results.emplace_back(),
GpuThunkAotCompilationResult::FromModule(
module.get(), res.compile_module_results.buffer_assignment.get(),
res.backend_result.asm_text, res.backend_result.binary,
res.backend_result.dnn_compiled_graphs));
}
return std::move(results);
}
HloCostAnalysis::ShapeSizeFunction GpuCompiler::ShapeSizeBytesFunction() const {
return [pointer_size = pointer_size_](const Shape& shape) {
return GetSizeOfShape(shape, pointer_size);
};
}
absl::StatusOr<std::unique_ptr<AotCompilationResult>> GpuCompiler::Export(
Executable* executable) const {
auto* gpu_executable = tensorflow::down_cast<GpuExecutable*>(executable);
if (!gpu_executable) return Internal("GpuExecutable is null");
return GpuThunkAotCompilationResult::FromModule(
&gpu_executable->module(), gpu_executable->buffer_assignment(),
gpu_executable->text(), gpu_executable->binary(),
gpu_executable->dnn_compiled_graphs());
}
absl::Status GpuCompiler::RunPreSchedulingPasses(
HloModule* module, se::StreamExecutor* stream_exec) {
HloPassPipeline pipeline("pre-scheduling-passes");
pipeline.AddPass<FusionWrapper>();
return pipeline.Run(module).status();
}
HloCostAnalysis::Options CreateHloAnalysisOpts(
const HloModule& module, const se::DeviceDescription& gpu_device_info,
ShapeSizeFn shape_size_fn) {
HloCostAnalysis::Options hlo_cost_analysis_options;
hlo_cost_analysis_options.shape_size = shape_size_fn;
std::optional<HloRematerialization::HostMemoryOffloadConfig>
offloading_config = std::nullopt;
if (module.config().debug_options().xla_gpu_enable_host_memory_offloading()) {
constexpr float kGiga = 1e+9;
constexpr float kFma = 2;
float flops_per_sec = gpu_device_info.core_count() *
gpu_device_info.fpus_per_core() *
gpu_device_info.clock_rate_ghz() * kGiga * kFma;
int64_t host_memory_space_color =
static_cast<int64_t>(se::MemoryType::kHost);
hlo_cost_analysis_options.set_flops_per_second(flops_per_sec);
hlo_cost_analysis_options.set_transcendentals_per_second(flops_per_sec);
offloading_config =
std::make_optional<HloRematerialization::HostMemoryOffloadConfig>(
host_memory_space_color,
gpu_device_info.memory_bandwidth(),
gpu_device_info.memory_bandwidth());
}
return hlo_cost_analysis_options;
}
HloRematerialization::Options CreateRematOpts(
const HloModule& module, const se::DeviceDescription& gpu_device_info,
HloCostAnalysis& hlo_cost_analysis, int64_t scheduler_mem_limit) {
bool enable_offloading =
module.config().debug_options().xla_gpu_enable_host_memory_offloading();
std::optional<HloRematerialization::HostMemoryOffloadConfig>
offloading_config = std::nullopt;
if (enable_offloading) {
int64_t host_memory_space_color =
static_cast<int64_t>(se::MemoryType::kHost);
offloading_config =
std::make_optional<HloRematerialization::HostMemoryOffloadConfig>(
host_memory_space_color,
gpu_device_info.memory_bandwidth(),
gpu_device_info.memory_bandwidth());
}
HloRematerialization::RematerializationModeConfig
rematerialization_mode_config(true, true,
enable_offloading);
HloRematerialization::Options options(
hlo_cost_analysis, rematerialization_mode_config,
scheduler_mem_limit,
1, 1,
0, nullptr,
offloading_config);
return options;
}
absl::Status GpuCompiler::RunPostSchedulingPipelines(
HloModule* module, int64_t scheduler_mem_limit,
const se::DeviceDescription& gpu_device_info) const {
TF_RETURN_IF_ERROR(
RunPostSchedulingCopyInsertion(module, GetCanShareBuffer()));
HloPassPipeline main_pipeline("post-scheduling-passes");
HloPredicate is_nop =
HloPredicateIsOp<HloOpcode::kParameter, HloOpcode::kConstant,
HloOpcode::kBitcast, HloOpcode::kGetTupleElement>;
{
HloPassPipeline& pipeline =
main_pipeline.AddPass<HloPassPipeline>("async-to-sync-converter");
if (module->config()
.debug_options()
.xla_gpu_enable_pipelined_collectives() ||
module->config().debug_options().xla_gpu_enable_pipelined_p2p()) {
pipeline.AddPass<PipelinedP2PRewriter>();
}
pipeline.AddPass<GpuConvertAsyncCollectivesToSync>(is_nop);
}
HloRematerialization::RematerializationSizes sizes;
HloCostAnalysis::Options hlo_cost_analysis_opts =
CreateHloAnalysisOpts(*module, gpu_device_info, ShapeSizeBytesFunction());
HloCostAnalysis hlo_cost_analysis(hlo_cost_analysis_opts);
HloRematerialization::Options remat_opts = CreateRematOpts(
*module, gpu_device_info, hlo_cost_analysis, scheduler_mem_limit);
{
HloPassPipeline& pipeline =
main_pipeline.AddPass<HloPassPipeline>("remat-pipeline");
pipeline.AddPass<HloRematerialization>(remat_opts, sizes);
pipeline.AddPass<StreamAttributeAnnotator>();
pipeline.AddPass<OptimizationBarrierExpander>();
}
{
HloPassPipeline& pipeline =
main_pipeline.AddPass<HloPassPipeline>("fusion-wrapper");
pipeline.AddPass<FusionWrapper>();
}
{
HloPassPipeline& pipeline =
main_pipeline.AddPass<HloPassPipeline>("command-buffer-scheduling");
pipeline.AddPass<CommandBufferScheduling>(gpu_device_info);
pipeline.AddPass<SanitizeConstantNames>();
}
if (module->config().debug_options().xla_gpu_enable_pgle_accuracy_checker()) {
AddHloVerifier(
&main_pipeline,
module->config().debug_options().xla_experimental_ignore_channel_id(),
HloVerifierOpts{}.VerifyInstructionNameUnchanged());
}
return main_pipeline.Run(module).status();
}
absl::Status GpuCompiler::LoadAutotuneResultsFromFile(
const DebugOptions& debug_options) {
if (absl::string_view file_path =
debug_options.xla_gpu_load_autotune_results_from();
!file_path.empty()) {
static absl::once_flag once;
absl::Status status = absl::OkStatus();
absl::call_once(once, [&file_path, &status] {
status = AutotunerUtil::LoadAutotuneResultsFromFile(file_path);
});
TF_RETURN_IF_ERROR(status);
}
return absl::OkStatus();
}
absl::Status GpuCompiler::SerializeAutotuneResultsToFile(
const DebugOptions& debug_options) {
if (absl::string_view file_path =
debug_options.xla_gpu_dump_autotune_results_to();
!file_path.empty()) {
TF_RETURN_IF_ERROR(
AutotunerUtil::SerializeAutotuneResultsToFile(file_path));
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<AotCompilationResult>>
GpuCompiler::LoadAotCompilationResult(
const std::string& serialized_aot_result) {
return LoadAotCompilationResultStatic(serialized_aot_result);
}
absl::StatusOr<std::unique_ptr<AotCompilationResult>>
GpuCompiler::LoadAotCompilationResultStatic(
const std::string& serialized_aot_result) {
return GpuThunkAotCompilationResult::FromString(serialized_aot_result);
}
}
} | #include "xla/service/gpu/gpu_compiler.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/autotune_results.pb.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_module_group.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/compiler.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/gpu_hlo_schedule.h"
#include "xla/service/gpu/metrics.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/xla_debug_info_manager.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::TempDir;
class GpuCompilerTest : public HloTestBase {
public:
absl::Status Schedule(HloModule* module) {
auto compiler = backend().compiler();
const se::DeviceDescription& gpu_device_info =
backend().default_stream_executor()->GetDeviceDescription();
TF_RETURN_IF_ERROR(ScheduleGpuModule(module, 4, gpu_device_info).status());
return tensorflow::down_cast<GpuCompiler*>(compiler)
->RunPostSchedulingPipelines(module, 4 * 1024 * 1024, gpu_device_info);
}
const stream_executor::GpuComputeCapability& GpuComputeComp() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability();
}
};
TEST_F(GpuCompilerTest, CompiledProgramsCount) {
const char* hlo_text = R"(
HloModule test
ENTRY main {
p = f32[10]{0} parameter(0)
ROOT neg = f32[10]{0} negate(p)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
ResetCompiledProgramsCountForTesting();
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
false})
.value();
EXPECT_EQ(GetCompiledProgramsCount(), 1);
}
TEST_F(GpuCompilerTest, GenerateDebugInfoForNonAutotuningCompilations) {
const char* hlo_text = R"(
HloModule test
ENTRY main {
p = f32[10]{0} parameter(0)
ROOT neg = f32[10]{0} negate(p)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
false})
.value();
EXPECT_TRUE(XlaDebugInfoManager::Get()->TracksModule(
executable->module().unique_id()));
}
TEST_F(GpuCompilerTest, DoesNotGenerateDebugInfoForAutotuningCompilations) {
const char* hlo_text = R"(
HloModule test
ENTRY main {
p = f32[10]{0} parameter(0)
ROOT neg = f32[10]{0} negate(p)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
int module_id = module->unique_id();
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
true})
.value();
EXPECT_FALSE(XlaDebugInfoManager::Get()->TracksModule(module_id));
}
TEST_F(GpuCompilerTest, CopyInsertionFusion) {
const char* hlo_text = R"(
HloModule cluster
ENTRY main {
cst = f32[1]{0} constant({0})
ROOT tuple_out = (f32[1]{0}, f32[1]{0}, f32[1]{0}, f32[1]{0}) tuple(cst, cst, cst, cst)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{0, 0}));
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
std::unique_ptr<HloModule> compiled_module =
backend()
.compiler()
->RunHloPasses(module->Clone(), backend().default_stream_executor(),
nullptr)
.value();
VLOG(2) << compiled_module->ToString();
size_t total_fusion_instrs = 0;
for (const HloInstruction* instr :
compiled_module->entry_computation()->instructions()) {
if (instr->opcode() == HloOpcode::kFusion) {
++total_fusion_instrs;
}
}
EXPECT_EQ(total_fusion_instrs, 1);
const HloInstruction* entry_root =
compiled_module->entry_computation()->root_instruction();
EXPECT_THAT(
entry_root,
GmockMatch(m::Tuple(
m::GetTupleElement(m::Fusion()), m::GetTupleElement(m::Fusion()),
m::GetTupleElement(m::Fusion()), m::GetTupleElement(m::Fusion()))));
}
TEST_F(GpuCompilerTest, CanRunScheduledModules) {
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_disable_all_hlo_passes(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m, is_scheduled=true
w {
p = s8[] parameter(0)
ROOT n = s8[] negate(p)
}
ENTRY e {
p = s8[] parameter(0)
ROOT _ = s8[] fusion(p), kind=kLoop, calls=w
})",
config));
EXPECT_TRUE(Run(std::move(module), true));
}
TEST_F(GpuCompilerTest, NonFusedInstructionsAreWrapped) {
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY e {
p = f32[2,4,4] parameter(0)
ROOT _ = f32[2,4,4]{2,1,0} transpose(p), dimensions={0,2,1}
})",
config));
config.set_debug_options(debug_options);
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
false})
.value();
HloModule& compiled_module = executable->module();
const HloInstruction* entry_root =
compiled_module.entry_computation()->root_instruction();
EXPECT_THAT(entry_root, GmockMatch(m::Fusion()));
}
class PersistedAutotuningTest : public HloTestBase {
protected:
static constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
p0 = f16[1,16,17,3] parameter(0)
p1 = s8[16,17,3] parameter(1)
cp1 = f16[16,17,3] convert(p1)
ROOT _ = f16[1,16,16] dot(p0, cp1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
})";
std::string GetUniqueTempFilePath(absl::string_view suffix) {
std::string filename = TempDir();
CHECK(tsl::Env::Default()->CreateUniqueFileName(&filename,
std::string(suffix)));
return filename;
}
std::string ExpectToReadNonEmptyFile(absl::string_view file_path) {
std::string str;
tsl::Env* env = tsl::Env::Default();
TF_EXPECT_OK(tsl::ReadFileToString(env, std::string(file_path), &str));
EXPECT_THAT(str, Not(IsEmpty()));
return str;
}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions options = HloTestBase::GetDebugOptionsForTest();
options.set_xla_gpu_dump_autotune_results_to(
xla_gpu_dump_autotune_results_to_);
options.set_xla_gpu_load_autotune_results_from(
xla_gpu_load_autotune_results_from_);
return options;
}
std::string xla_gpu_dump_autotune_results_to_;
std::string xla_gpu_load_autotune_results_from_;
};
TEST_F(PersistedAutotuningTest, WriteResultsOnEachCompilation) {
constexpr absl::string_view kInvalidTextProto = "Invalid!";
xla_gpu_dump_autotune_results_to_ = GetUniqueTempFilePath(".txt");
TF_EXPECT_OK(GetOptimizedModule(kHloText).status());
{
std::string autotune_results_str =
ExpectToReadNonEmptyFile(xla_gpu_dump_autotune_results_to_);
AutotuneResults results;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,
&results));
}
tsl::Env* env = tsl::Env::Default();
TF_EXPECT_OK(tsl::WriteStringToFile(env, xla_gpu_dump_autotune_results_to_,
kInvalidTextProto));
TF_EXPECT_OK(GetOptimizedModule(kHloText).status());
{
std::string autotune_results_str =
ExpectToReadNonEmptyFile(xla_gpu_dump_autotune_results_to_);
AutotuneResults results;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,
&results));
}
}
int64_t CountCopies(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
count++;
}
}
return count;
}
int64_t CountCopies(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountCopies(*computation);
}
return count;
}
TEST_F(GpuCompilerTest, RemovesUnnecessaryCopyAfterScheduling) {
const absl::string_view hlo_string = R"(
HloModule all_gather_overlapping
condition {
input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0)
ROOT cond = pred[] get-tuple-element(input_tuple), index=2
}
body {
input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
param_1 = f32[2,128] get-tuple-element(input_tuple), index=1
cond = pred[] get-tuple-element(input_tuple), index=2
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
add = f32[1,128] add(splat_c0, param_0)
all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128}
all-gather-done = f32[2,128] all-gather-done(all-gather-start)
ROOT output_tuple = (f32[1,128], f32[2,128], pred[]) tuple(dynamic-slice, all-gather-done, cond)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[2,128] parameter(1)
param_2 = pred[] parameter(2)
tuple = (f32[1,128], f32[2,128], pred[]) tuple(param_0, param_1, param_2)
ROOT while = (f32[1,128], f32[2,128], pred[]) while(tuple), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
GetOptimizedModule(hlo_string));
EXPECT_EQ(CountCopies(*module), 7);
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* while_op = root->operand(0)->operand(0);
EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(),
HloOpcode::kCopy);
TF_ASSERT_OK(Schedule(module.get()));
EXPECT_EQ(CountCopies(*module), 4);
module->entry_computation()->root_instruction();
while_op = root->operand(0)->operand(0);
EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(),
HloOpcode::kAllGatherDone);
}
TEST_F(GpuCompilerTest,
GemmFusionIsNoOpWhenGemmFusionAutotunerFallsBackToCublas) {
auto cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
if (!cc.IsAtLeastAmpere()) {
GTEST_SKIP() << "Autotuning results have only been generated for Ampere "
<< "and Hopper GPUs";
}
const absl::string_view hlo_string = R"(
HloModule test
ENTRY main {
param_0 = bf16[3,32,1024,4,1024]{4,3,2,1,0} parameter(0)
param_1 = bf16[4,3,32,1024]{3,2,1,0} parameter(1)
param_2 = s32[] parameter(2)
constant_0 = s32[] constant(0)
dynamic-slice_0 = bf16[1,3,32,1024]{3,2,1,0} dynamic-slice(param_1, param_2, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,3,32,1024}
reshape_0 = bf16[3,32,1024]{2,1,0} reshape(dynamic-slice_0)
broadcast_0 = bf16[3,32,1024,4,1024]{2,1,4,3,0} broadcast(reshape_0), dimensions={0,1,2}
add_0 = bf16[3,32,1024,4,1024]{4,3,2,1,0} add(param_0, broadcast_0)
transpose_0 = bf16[3,4,1024,32,1024]{2,1,4,3,0} transpose(add_0), dimensions={0,3,4,1,2}
slice_0 = bf16[1,4,1024,32,1024]{4,3,2,1,0} slice(transpose_0), slice={[0:1], [0:4], [0:1024], [0:32], [0:1024]}
reshape_1 = bf16[4,1024,32,1024]{3,2,1,0} reshape(slice_0)
copy_0 = bf16[4,1024,32,1024]{3,2,1,0} copy(reshape_1)
constant_1 = bf16[] constant(0.08838)
broadcast_1 = bf16[4,1024,32,1024]{3,2,1,0} broadcast(constant_1), dimensions={}
multiply_0 = bf16[4,1024,32,1024]{3,2,1,0} multiply(copy_0, broadcast_1)
slice_1 = bf16[1,4,1024,32,1024]{4,3,2,1,0} slice(transpose_0), slice={[1:2], [0:4], [0:1024], [0:32], [0:1024]}
reshape_2 = bf16[4,1024,32,1024]{3,2,1,0} reshape(slice_1)
copy_1 = bf16[4,1024,32,1024]{3,2,1,0} copy(reshape_2)
ROOT dot_0 = bf16[4,32,1024,1024]{3,2,1,0} dot(multiply_0, copy_1), lhs_batch_dims={0,2}, lhs_contracting_dims={3}, rhs_batch_dims={0,2}, rhs_contracting_dims={3}
}
)";
HloModuleConfig config;
DebugOptions triton_enabled_debug_options = GetDebugOptionsForTest();
triton_enabled_debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false);
triton_enabled_debug_options
.set_xla_gpu_require_complete_aot_autotune_results(true);
config.set_debug_options(triton_enabled_debug_options);
config.set_replica_count(1);
config.set_num_partitions(1);
std::string path =
tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "service", "gpu",
"gpu_compiler_test_autotune_db.textproto");
TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(path));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, config));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> triton_enabled_module,
GetOptimizedModule(std::move(module)));
AutotunerUtil::ClearAutotuneResults();
DebugOptions triton_disabled_debug_options = GetDebugOptionsForTest();
triton_disabled_debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false);
triton_disabled_debug_options.set_xla_gpu_enable_triton_gemm(false);
config.set_debug_options(triton_disabled_debug_options);
TF_ASSERT_OK_AND_ASSIGN(module,
ParseAndReturnVerifiedModule(hlo_string, config));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> triton_disabled_module,
GetOptimizedModule(std::move(module)));
const HloInstruction* root =
triton_enabled_module->entry_computation()->root_instruction();
const HloInstruction* custom_op = root->operand(0)->operand(0);
EXPECT_TRUE(custom_op->IsCustomCall("__cublas$gemm"));
EXPECT_EQ(triton_enabled_module->computation_count(),
triton_disabled_module->computation_count());
}
class FloatNormalizationTest : public GpuCompilerTest,
public ::testing::WithParamInterface<
std::pair<PrimitiveType, PrimitiveType>> {};
INSTANTIATE_TEST_SUITE_P(
Fp8s, FloatNormalizationTest,
::testing::Values(
std::make_pair(PrimitiveType::F8E4M3FN, PrimitiveType::F8E4M3FN),
std::make_pair(PrimitiveType::F8E5M2, PrimitiveType::F8E4M3FN),
std::make_pair(PrimitiveType::F8E4M3FN, PrimitiveType::F8E5M2),
std::make_pair(PrimitiveType::F8E5M2, PrimitiveType::F8E5M2)));
TEST_P(FloatNormalizationTest, Fp8Normalization) {
const PrimitiveType lhs_type = GetParam().first;
const PrimitiveType rhs_type = GetParam().second;
const std::string lhs_name =
primitive_util::LowercasePrimitiveTypeName(lhs_type);
const std::string rhs_name =
primitive_util::LowercasePrimitiveTypeName(rhs_type);
const std::string module_str = absl::Substitute(R"(
HloModule sch
ENTRY main {
parameter = $0[1600,1600]{1,0} parameter(0)
parameter.1 = $1[1600,1600]{1,0} parameter(1)
neg = $1[1600,1600]{1,0} negate(parameter.1)
dot = f16[1600,1600]{1,0} dot(parameter,neg), lhs_contracting_dims={1}, rhs_contracting_dims={0}
constant = f16[] constant(0)
broadcast = f16[1600,1600]{1,0} broadcast(constant), dimensions={}
ROOT maximum = f16[1600,1600]{1,0} maximum(dot,broadcast)
})",
lhs_name, rhs_name);
auto optimize_module = [&](bool enable_triton, bool enable_blas,
bool enable_blas_fallback)
-> absl::StatusOr<std::unique_ptr<HloModule>> {
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_cublas_fallback(enable_blas_fallback);
debug_options.set_xla_gpu_enable_triton_gemm(enable_triton);
if (!enable_blas) {
debug_options.add_xla_disable_hlo_passes("cublas-gemm-rewriter");
}
config.set_debug_options(debug_options);
config.set_num_partitions(1);
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, config));
return GetOptimizedModule(std::move(module));
};
auto cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
const std::string triton_keep_types = absl::Substitute(
R"(CHECK: fusion($0{{[^)]*}}, $1{{[^)]*}}){{.*}}"kind":"__triton_gemm")",
lhs_name, rhs_name);
const std::string cublaslt_keep_types = absl::Substitute(
R"(CHECK: custom-call($0{{[^)]*}}, $1{{[^)]*}}){{.*}}custom_call_target="__cublas$$lt$$matmul$$f8")",
lhs_name, rhs_name);
const std::string cublas_convert_to_f16 =
R"(CHECK: custom-call(f16{{[^)]*}}, f16{{[^)]*}}){{.*}}custom_call_target="__cublas$gemm")";
const std::string fallback_convert_to_f16 =
R"(CHECK: dot(f16{{[^)]*}}, f16{{[^)]*}}))";
{
TF_ASSERT_OK_AND_ASSIGN(auto optimized_module_no_fallback,
optimize_module(true,
true,
false));
const std::string triton_expected_check =
(cc.IsAtLeastHopper() ||
(cc.IsAtLeastAmpere() && lhs_type == F8E5M2 && rhs_type == F8E5M2))
? triton_keep_types
: cublas_convert_to_f16;
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matched,
RunFileCheck(optimized_module_no_fallback->ToString(),
triton_expected_check));
EXPECT_TRUE(filecheck_matched);
}
{
TF_ASSERT_OK_AND_ASSIGN(auto optimized_module_no_triton,
optimize_module(false,
true,
true));
const std::string blas_expected_check =
(cc.IsAtLeastHopper() && !(lhs_type == F8E5M2 && rhs_type == F8E5M2))
? cublaslt_keep_types
: cublas_convert_to_f16;
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched,
RunFileCheck(optimized_module_no_triton->ToString(),
blas_expected_check));
EXPECT_TRUE(filecheck_matched);
}
{
TF_ASSERT_OK_AND_ASSIGN(auto optimized_module_nothing,
optimize_module(false,
false,
false));
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched,
RunFileCheck(optimized_module_nothing->ToString(),
fallback_convert_to_f16));
EXPECT_TRUE(filecheck_matched);
}
}
TEST_F(GpuCompilerTest, CollectivePermuteDecompositionAndPipelining) {
const char* kModuleStr = R"(
HloModule cp
cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(11)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
send-data = get-tuple-element(%param), index=1
recv-data = f32[1, 1024, 1024] collective-permute(send-data),
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}, channel_id=1
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init)
while_result = (u32[], f32[1, 1024, 1024]) while(while_init), body=body, condition=cond
ROOT result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1
}
)";
const char* kExpected = R"(
CHECK: recv-done
CHECK-SAME: channel_id=[[CHANNEL_ID:[0-9]+]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: send-done
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %[[CUSTOM_CALL:.*]] = custom-call
CHECK: %[[AFTER_ALL:.*]] = after-all
CHECK: %[[RESULT_RECV:.*]] = recv(%[[AFTER_ALL]])
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0",
CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}},
CHECK-SAME: control-predecessors={%[[CUSTOM_CALL]]}
CHECK: %[[RESULT_SEND:.*]] = send(%[[SOME_SEND_ARG:.*]], %[[AFTER_ALL]])
CHECK-SAME: channel_id=1
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0",
CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}},
CHECK-SAME: control-predecessors={%[[RESULT_RECV]]}
CHECK: ROOT
CHECK-SAME: %[[RESULT_RECV]]
CHECK: ENTRY
CHECK: %[[ENTRY_AFTER_ALL:.*]] = after-all
CHECK: %[[ENTRY_RECV:.*]] = recv(%[[ENTRY_AFTER_ALL]])
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0",
CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}}
CHECK: %[[ENTRY_SEND:.*]] = send(%[[SOME_SEND_ARG:.*]], %[[ENTRY_AFTER_ALL]])
CHECK-SAME: channel_id=1
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0",
CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}},
CHECK-SAME: control-predecessors={%[[ENTRY_RECV]]}
CHECK: %[[WHILE_INIT:.*]] = tuple
CHECK-SAME: %[[ENTRY_SEND]]
CHECK: while(%[[WHILE_INIT]])
CHECK: recv-done
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: send-done
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"}
)";
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_latency_hiding_scheduler(true);
debug_options.set_xla_gpu_collective_permute_decomposer_threshold(1);
debug_options.set_xla_gpu_enable_pipelined_p2p(true);
debug_options.set_xla_gpu_enable_triton_gemm(false);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(std::move(module)));
TF_ASSERT_OK(Schedule(optimized_module.get()));
HloPrintOptions options;
options.set_print_operand_shape(false);
options.set_print_result_shape(false);
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matched,
RunFileCheck(optimized_module->ToString(options), kExpected));
EXPECT_TRUE(filecheck_matched);
}
class KernelCacheTest : public HloTestBase {
public:
void SetUp() override {
CHECK(tsl::Env::Default()->LocalTempFilename(&cache_file_name_));
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsForTest());
TF_ASSERT_OK_AND_ASSIGN(bool can_use_link_modules,
dynamic_cast<GpuCompiler*>(backend().compiler())
->CanUseLinkModules(config));
if (!can_use_link_modules) {
GTEST_SKIP() << "Caching compiled kernels requires support of linking.";
}
}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_kernel_cache_file(cache_file_name_);
debug_options.set_xla_gpu_enable_llvm_module_compilation_parallelism(true);
return debug_options;
}
bool CacheFileExists() {
if (!tsl::Env::Default()->FileExists(cache_file_name_).ok()) {
return false;
}
return true;
}
int CacheEntryCount() {
if (!CacheFileExists()) {
return 0;
}
std::string serialized;
TF_EXPECT_OK(tsl::ReadFileToString(tsl::Env::Default(), cache_file_name_,
&serialized));
CompilationCacheProto proto;
EXPECT_TRUE(proto.ParseFromString(std::string(serialized)));
return proto.entries_size();
}
std::string cache_file_name_;
static constexpr absl::string_view kHloText = R"(
ENTRY e {
p = s8[] parameter(0)
c = s8[] constant(8)
ROOT _ = s8[] add(p, c)
})";
};
TEST_F(KernelCacheTest, CacheIsGenerated) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
}
TEST_F(KernelCacheTest, NoCacheIsGeneratedWithoutCompiledKernels) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(R"(
ENTRY e {
a = f32[5,5] parameter(0)
ROOT _ = f32[5,5] custom-call(a, a), custom_call_target="__cublas$gemm",
backend_config="{ \"gemm_backend_config\": {\"alpha_real\":1,\"beta\":0,\"dot_dimension_numbers\":{\"lhs_contracting_dimensions\":[\"1\"],\"rhs_contracting_dimensions\":[\"0\"],\"lhs_batch_dimensions\":[],\"rhs_batch_dimensions\":[]},\"alpha_imag\":0,\"precision_config\":{\"operand_precision\":[\"DEFAULT\",\"DEFAULT\"]},\"epilogue\":\"DEFAULT\"}}"
})",
false));
EXPECT_FALSE(CacheFileExists());
}
TEST_F(KernelCacheTest, CacheGrowsWithNewKernels) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
EXPECT_TRUE(Run(R"(
ENTRY e {
p = s8[] parameter(0)
ROOT _ = s8[] multiply(p, p)
})",
false));
EXPECT_EQ(CacheEntryCount(), 2);
}
TEST_F(KernelCacheTest, AllKernelsAreCachedBecauseSplitModuleUsesRoundRobin) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(R"(
ENTRY e {
p = s8[] parameter(0)
n = s8[] negate(p)
a = s8[] add(n, n)
s = s8[] subtract(p, a)
ROOT _ = s8[] multiply(s, p)
})",
false));
EXPECT_EQ(CacheEntryCount(), 4);
}
TEST_F(KernelCacheTest, CachingWorksWithLoadedExecutables) {
const std::string kHloAdd1 = R"(
add1 {
p = s32[] parameter(0)
c = s32[] constant(1)
ROOT a = s32[] add(p, c)
}
ENTRY e {
p = s32[] parameter(0)
ROOT r = s32[] fusion(p), kind=kLoop, calls=add1
})";
const std::string kHloAdd2 = R"(
add2 {
p = s32[] parameter(0)
c = s32[] constant(2)
ROOT a = s32[] add(p, c)
}
ENTRY e {
p = s32[] parameter(0)
ROOT r = s32[] fusion(p), kind=kLoop, calls=add2
})";
TF_ASSERT_OK_AND_ASSIGN(se::Platform * platform,
se::PlatformManager::PlatformWithName("cuda"));
TF_ASSERT_OK_AND_ASSIGN(se::StreamExecutor * stream_exec,
platform->ExecutorForDevice(0));
Compiler* compiler = backend().compiler();
AotCompilationOptions aot_options(compiler->PlatformId());
aot_options.set_executor(stream_exec);
auto test = [this, &compiler, &aot_options](absl::string_view hlo, int input,
int expected_result) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
auto module_group = std::make_unique<HloModuleGroup>(std::move(module));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<std::unique_ptr<AotCompilationResult>> aot_results,
compiler->CompileAheadOfTime(std::move(module_group), aot_options));
TF_ASSERT_OK_AND_ASSIGN(std::string serialized_aot_result,
aot_results[0]->SerializeAsString());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<AotCompilationResult> aot_result,
compiler->LoadAotCompilationResult(serialized_aot_result));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<Executable> executable,
aot_result->LoadExecutable(compiler, aot_options.executor()));
const xla::Literal literal_input =
xla::LiteralUtil::CreateR0<int32_t>(input);
const xla::Literal literal_expected_result =
xla::LiteralUtil::CreateR0<int32_t>(expected_result);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
GetHloRunner().value()->ExecuteWithExecutable(
executable.get(), {&literal_input}));
EXPECT_TRUE(LiteralTestUtil::Equal(result, literal_expected_result));
};
test(kHloAdd1, 1, 2);
test(kHloAdd2, 1, 3);
test(kHloAdd2, 1, 3);
}
class KernelCacheTestSingleThreaded : public KernelCacheTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = KernelCacheTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_force_compilation_parallelism(1);
return debug_options;
}
};
TEST_F(KernelCacheTestSingleThreaded, CacheIsGenerated) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
}
class NoKernelCacheTest : public KernelCacheTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = KernelCacheTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_llvm_module_compilation_parallelism(false);
return debug_options;
}
};
TEST_F(NoKernelCacheTest, NoCacheWithoutCompilationParallelism) {
EXPECT_TRUE(Run(kHloText, false));
EXPECT_FALSE(CacheFileExists());
}
TEST_F(GpuCompilerTest, TestFlag_xla_gpu_unsafe_pipelined_loop_annotator) {
const char* hlo = R"(
HloModule test, entry_computation_layout={()->(s32[], s32[])}
%Body (param: (s32[], s32[])) -> (s32[], s32[]) {
%param = (s32[], s32[]) parameter(0)
%i = s32[] get-tuple-element((s32[], s32[]) %param), index=1
%one = s32[] constant(1)
%i_plus_one = s32[] add(s32[] %i, s32[] %one)
%permute = s32[] collective-permute(%i_plus_one), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,0}}
ROOT %tuple = (s32[], s32[]) tuple(s32[] %permute, s32[] %i_plus_one)
}
%Cond (param.1: (s32[], s32[])) -> pred[] {
%param.1 = (s32[], s32[]) parameter(0)
%i.1 = s32[] get-tuple-element((s32[], s32[]) %param.1), index=1
%trip_count = s32[] constant(10)
ROOT %done = pred[] compare(s32[] %i.1, s32[] %trip_count), direction=LT
}
ENTRY %test () -> (s32[], s32[]) {
%i_start = s32[] constant(0)
%p_start = s32[] constant(0)
%initial_tuple = (s32[], s32[]) tuple(s32[] %i_start, s32[] %p_start)
ROOT %while = (s32[], s32[]) while((s32[], s32[]) %initial_tuple), condition=%Cond, body=%Body, frontend_attributes={is_pipelined_while_loop="true"}
})";
const char* kExpected = R"(
)";
DebugOptions debug_options;
HloModuleConfig config;
debug_options.set_xla_gpu_unsafe_pipelined_loop_annotator(true);
config.set_debug_options(debug_options);
config.set_num_partitions(4);
config.set_use_spmd_partitioning(true);
TF_ASSERT_OK_AND_ASSIGN(auto unoptimized_module,
ParseAndReturnVerifiedModule(hlo, config));
TF_ASSERT_OK_AND_ASSIGN(auto optimized_module,
GetOptimizedModule(std::move(unoptimized_module)));
HloPrintOptions options;
options.set_print_operand_shape(false);
options.set_print_result_shape(false);
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matched,
RunFileCheck(optimized_module->ToString(options), kExpected));
EXPECT_TRUE(filecheck_matched);
}
using GpuCompilerPassTest = GpuCompilerTest;
TEST_F(GpuCompilerPassTest,
GpuCompilerRunsTritonGemmRewriterByDefaultFromAmpere) {
if (std::holds_alternative<se::RocmComputeCapability>(GpuComputeComp())) {
GTEST_SKIP() << "TritonGemmRewriter disabled for ROCm until autotuner "
<< "is included.";
}
auto cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
bool is_rocm = std::holds_alternative<stream_executor::RocmComputeCapability>(
backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability());
bool expect_triton_gemm_rewriter_has_run = cc.IsAtLeastAmpere() || is_rocm;
constexpr absl::string_view constant_module = R"(
HloModule noop
ENTRY main {
ROOT constant = f32[] constant(0)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(constant_module));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(std::move(module)));
const HloModuleMetadataProto& module_metadata =
optimized_module->metadata()->proto();
bool triton_gemm_rewriter_has_run = false;
for (const HloPassMetadata& pass_metadata : module_metadata.pass_metadata()) {
triton_gemm_rewriter_has_run |=
pass_metadata.pass_name() == "triton-gemm-rewriter";
}
EXPECT_EQ(triton_gemm_rewriter_has_run, expect_triton_gemm_rewriter_has_run);
}
TEST_F(GpuCompilerPassTest,
GpuCompilerRunsCustomKernelFusionByDefaultFromVolta) {
auto cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
bool expect_custom_kernel_fusion_rewriter_has_run =
cc.major == se::CudaComputeCapability::VOLTA;
constexpr absl::string_view constant_module = R"(
HloModule noop
ENTRY main {
ROOT constant = f32[] constant(0)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(constant_module));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(std::move(module)));
const HloModuleMetadataProto& module_metadata =
optimized_module->metadata()->proto();
bool custom_kernel_fusion_rewriter_has_run = false;
for (const HloPassMetadata& pass_metadata : module_metadata.pass_metadata()) {
custom_kernel_fusion_rewriter_has_run |=
pass_metadata.pass_name() == "custom-kernel-fusion-rewriter";
}
EXPECT_EQ(custom_kernel_fusion_rewriter_has_run,
expect_custom_kernel_fusion_rewriter_has_run);
}
struct PassRunIndex {
int first_run = std::numeric_limits<int>::max();
int last_run = std::numeric_limits<int>::min();
};
void VerifyPassOrder(
const absl::flat_hash_map<std::string, PassRunIndex>& passes,
absl::string_view before, absl::string_view after) {
ASSERT_TRUE(passes.contains(before))
<< "Expected pass did not run: " << before;
ASSERT_TRUE(passes.contains(after)) << "Expected pass did not run: " << after;
EXPECT_LT(passes.at(before).last_run, passes.at(after).first_run)
<< "Pass " << before << " ran after " << after;
}
absl::flat_hash_map<std::string, PassRunIndex> GatherPassOrderInformation(
const HloModule& module) {
absl::flat_hash_map<std::string, PassRunIndex> passes;
int run_index = 0;
for (const HloPassMetadata& pass_metadata :
module.metadata().proto().pass_metadata()) {
auto& pass = passes[pass_metadata.pass_name()];
pass.first_run = std::min(pass.first_run, run_index);
pass.last_run = std::max(pass.last_run, run_index);
++run_index;
}
return passes;
}
TEST_F(GpuCompilerPassTest, PassesAreRunInCorrectOrder) {
constexpr absl::string_view constant_module = R"(
ENTRY main {
ROOT constant = f32[] constant(0)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(constant_module));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(std::move(module)));
absl::flat_hash_map<std::string, PassRunIndex> passes =
GatherPassOrderInformation(*optimized_module);
VerifyPassOrder(passes, "layout-assignment",
"priority-fusion");
VerifyPassOrder(passes, "layout-assignment",
"layout_normalization");
VerifyPassOrder(passes, "host-offload-legalize",
"layout_normalization");
}
TEST_F(GpuCompilerPassTest, FusionBlockLevelRewriterRunsAfterAllFusionPasses) {
auto cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
if (!cc.IsAtLeastAmpere()) {
GTEST_SKIP() << "FusionBlockLevelRewriter requires Ampere+ to run.";
}
constexpr absl::string_view constant_module = R"(
ENTRY main {
ROOT constant = f32[] constant(0)
})";
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_experimental_enable_fusion_block_level_rewriter(
true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(constant_module, config));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(std::move(module)));
absl::flat_hash_map<std::string, PassRunIndex> passes =
GatherPassOrderInformation(*optimized_module);
absl::string_view kFusionBlockLevelRewriterName =
"fusion-block-level-rewriter";
for (const auto& [pass_name, _] : passes) {
if (pass_name != kFusionBlockLevelRewriterName &&
absl::StrContains(pass_name, "fusion")) {
VerifyPassOrder(passes, pass_name,
kFusionBlockLevelRewriterName);
VLOG(2) << "Verified pass order: " << pass_name << " -> "
<< kFusionBlockLevelRewriterName;
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_compiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7569af85-4644-4ab1-890f-3d81139b5f07 | cpp | tensorflow/tensorflow | pjrt_gpu_client_registration | tensorflow/core/tfrt/common/pjrt_gpu_client_registration.cc | tensorflow/core/tfrt/common/pjrt_gpu_client_registration_test.cc | #include <memory>
#include <utility>
#include "absl/status/statusor.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/pjrt/gpu/se_gpu_pjrt_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_options.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_registry.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<std::unique_ptr<xla::PjRtClient>> GetGpuClient(
const PjrtClientFactoryOptions& option) {
xla::GpuClientOptions gpu_client_options;
gpu_client_options.node_id = option.gpu_options.node_id;
gpu_client_options.num_nodes = 1;
gpu_client_options.allowed_devices = option.gpu_options.allowed_devices;
gpu_client_options.platform_name = option.gpu_options.platform_name;
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtClient> client,
xla::GetStreamExecutorGpuClient(gpu_client_options));
return std::move(client);
}
REGISTER_PJRT_CLIENT_FACTORY(gpu_client, tensorflow::DEVICE_GPU, GetGpuClient);
REGISTER_PJRT_CLIENT_FACTORY(xla_gpu_client, tensorflow::DEVICE_XLA_GPU,
GetGpuClient);
} | #include <gtest/gtest.h>
#include "xla/tsl/framework/device_type.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_options.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_registry.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
TEST(PjrtGpuClientCreateTest, TestGpuCreateOption) {
PjrtClientFactoryOptions options = PjrtClientFactoryOptions();
TF_ASSERT_OK_AND_ASSIGN(
auto client, xla::PjrtClientFactoryRegistry::Get().GetPjrtClient(
tsl::DeviceType(tensorflow::DEVICE_GPU), options));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/pjrt_gpu_client_registration.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/pjrt_gpu_client_registration_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d37fb101-53be-457f-b13d-5254be7b4fa6 | cpp | tensorflow/tensorflow | xla_sharding_serdes | third_party/xla/xla/python/pjrt_ifrt/xla_sharding_serdes.cc | third_party/xla/xla/python/pjrt_ifrt/xla_sharding_serdes_test.cc | #include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/pjrt_ifrt/xla_sharding.h"
#include "xla/python/pjrt_ifrt/xla_sharding.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
class HloShardingSerDes : public llvm::RTTIExtends<HloSharding, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::HloSharding";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const HloSharding& sharding = llvm::cast<HloSharding>(serializable);
HloShardingProto proto;
*proto.mutable_devices() = sharding.devices()->ToProto();
if (sharding.memory_kind().memory_kind().has_value()) {
proto.set_memory_kind(std::string(*sharding.memory_kind().memory_kind()));
}
*proto.mutable_xla_op_sharding() = sharding.xla_hlo_sharding().ToProto();
return proto.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
const auto* deserialize_sharding_options =
llvm::cast<DeserializeShardingOptions>(options.get());
HloShardingProto proto;
if (!proto.ParseFromString(serialized)) {
return absl::InvalidArgumentError(
"Failed to parse serialized HloSharding");
}
TF_ASSIGN_OR_RETURN(
auto devices,
DeviceList::FromProto(deserialize_sharding_options->lookup_device,
proto.devices()));
MemoryKind memory_kind;
if (proto.has_memory_kind()) {
memory_kind = MemoryKind(proto.memory_kind());
}
TF_ASSIGN_OR_RETURN(auto xla_hlo_sharding,
xla::HloSharding::FromProto(proto.xla_op_sharding()));
return HloSharding::Create(std::move(devices), memory_kind,
std::move(xla_hlo_sharding));
}
static char ID;
};
[[maybe_unused]] char HloShardingSerDes::ID = 0;
bool register_hlo_sharding_serdes = ([] {
RegisterSerDes<HloSharding>(
std::make_unique<HloShardingSerDes>());
}(), true);
}
}
} | #include <cstdint>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/bind_front.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device_test_util.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/pjrt_ifrt/xla_sharding.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::ElementsAreArray;
class XlaShardingSerDesTest : public test_util::DeviceTest {};
TEST_P(XlaShardingSerDesTest, HloShardingRoundTrip) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Tile(xla::TileAssignment({2, 1}));
auto sharding = HloSharding::Create(device_list, MemoryKind("abc"),
xla_hlo_sharding);
TF_ASSERT_OK_AND_ASSIGN(auto serialized, Serialize(*sharding));
TF_ASSERT_OK_AND_ASSIGN(
auto out_sharding,
Deserialize<HloSharding>(
serialized, std::make_unique<DeserializeShardingOptions>(
absl::bind_front(&Client::LookupDevice, client()))));
EXPECT_THAT(out_sharding->devices()->devices(),
ElementsAreArray(sharding->devices()->devices()));
EXPECT_EQ(out_sharding->xla_hlo_sharding(), sharding->xla_hlo_sharding());
}
INSTANTIATE_TEST_SUITE_P(NumDevices, XlaShardingSerDesTest,
testing::Values(test_util::DeviceTestParam{
.num_devices = 2, .num_addressable_devices = 2}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/xla_sharding_serdes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/xla_sharding_serdes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ceea81cc-89f9-4b41-ad70-93eed3f74f6f | cpp | google/quiche | window_manager | quiche/http2/adapter/window_manager.cc | quiche/http2/adapter/window_manager_test.cc | #include "quiche/http2/adapter/window_manager.h"
#include <utility>
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
namespace adapter {
bool DefaultShouldWindowUpdateFn(int64_t limit, int64_t window, int64_t delta) {
const int64_t kDesiredMinWindow = limit / 2;
const int64_t kDesiredMinDelta = limit / 3;
if (delta >= kDesiredMinDelta) {
return true;
} else if (window < kDesiredMinWindow) {
return true;
}
return false;
}
WindowManager::WindowManager(int64_t window_size_limit,
WindowUpdateListener listener,
ShouldWindowUpdateFn should_window_update_fn,
bool update_window_on_notify)
: limit_(window_size_limit),
window_(window_size_limit),
buffered_(0),
listener_(std::move(listener)),
should_window_update_fn_(std::move(should_window_update_fn)),
update_window_on_notify_(update_window_on_notify) {
if (!should_window_update_fn_) {
should_window_update_fn_ = DefaultShouldWindowUpdateFn;
}
}
void WindowManager::OnWindowSizeLimitChange(const int64_t new_limit) {
QUICHE_VLOG(2) << "WindowManager@" << this
<< " OnWindowSizeLimitChange from old limit of " << limit_
<< " to new limit of " << new_limit;
window_ += (new_limit - limit_);
limit_ = new_limit;
}
void WindowManager::SetWindowSizeLimit(int64_t new_limit) {
QUICHE_VLOG(2) << "WindowManager@" << this
<< " SetWindowSizeLimit from old limit of " << limit_
<< " to new limit of " << new_limit;
limit_ = new_limit;
MaybeNotifyListener();
}
bool WindowManager::MarkDataBuffered(int64_t bytes) {
QUICHE_VLOG(2) << "WindowManager@" << this << " window: " << window_
<< " bytes: " << bytes;
if (window_ < bytes) {
QUICHE_VLOG(2) << "WindowManager@" << this << " window underflow "
<< "window: " << window_ << " bytes: " << bytes;
window_ = 0;
} else {
window_ -= bytes;
}
buffered_ += bytes;
if (window_ == 0) {
MaybeNotifyListener();
}
return window_ > 0;
}
void WindowManager::MarkDataFlushed(int64_t bytes) {
QUICHE_VLOG(2) << "WindowManager@" << this << " buffered: " << buffered_
<< " bytes: " << bytes;
if (buffered_ < bytes) {
QUICHE_BUG(bug_2816_1) << "WindowManager@" << this << " buffered underflow "
<< "buffered_: " << buffered_ << " bytes: " << bytes;
buffered_ = 0;
} else {
buffered_ -= bytes;
}
MaybeNotifyListener();
}
void WindowManager::MaybeNotifyListener() {
const int64_t delta = limit_ - (buffered_ + window_);
if (should_window_update_fn_(limit_, window_, delta) && delta > 0) {
QUICHE_VLOG(2) << "WindowManager@" << this
<< " Informing listener of delta: " << delta;
listener_(delta);
if (update_window_on_notify_) {
window_ += delta;
}
}
}
}
} | #include "quiche/http2/adapter/window_manager.h"
#include <algorithm>
#include <list>
#include "absl/functional/bind_front.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/common/platform/api/quiche_expect_bug.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace adapter {
namespace test {
class WindowManagerPeer {
public:
explicit WindowManagerPeer(const WindowManager& wm) : wm_(wm) {}
int64_t buffered() { return wm_.buffered_; }
private:
const WindowManager& wm_;
};
namespace {
class WindowManagerTest : public quiche::test::QuicheTest {
protected:
WindowManagerTest()
: wm_(kDefaultLimit, absl::bind_front(&WindowManagerTest::OnCall, this)),
peer_(wm_) {}
void OnCall(int64_t s) { call_sequence_.push_back(s); }
const int64_t kDefaultLimit = 32 * 1024 * 3;
std::list<int64_t> call_sequence_;
WindowManager wm_;
WindowManagerPeer peer_;
::http2::test::Http2Random random_;
};
TEST_F(WindowManagerTest, NoOps) {
wm_.SetWindowSizeLimit(kDefaultLimit);
wm_.SetWindowSizeLimit(0);
wm_.SetWindowSizeLimit(kDefaultLimit);
wm_.MarkDataBuffered(0);
wm_.MarkDataFlushed(0);
EXPECT_TRUE(call_sequence_.empty());
}
TEST_F(WindowManagerTest, DataOnlyBuffered) {
int64_t total = 0;
while (total < kDefaultLimit) {
int64_t s = std::min<int64_t>(kDefaultLimit - total, random_.Uniform(1024));
total += s;
wm_.MarkDataBuffered(s);
}
EXPECT_THAT(call_sequence_, ::testing::IsEmpty());
}
TEST_F(WindowManagerTest, DataBufferedAndFlushed) {
int64_t total_buffered = 0;
int64_t total_flushed = 0;
while (call_sequence_.empty()) {
int64_t buffered = std::min<int64_t>(kDefaultLimit - total_buffered,
random_.Uniform(1024));
wm_.MarkDataBuffered(buffered);
total_buffered += buffered;
EXPECT_TRUE(call_sequence_.empty());
int64_t flushed = (total_buffered - total_flushed) > 0
? random_.Uniform(total_buffered - total_flushed)
: 0;
wm_.MarkDataFlushed(flushed);
total_flushed += flushed;
}
EXPECT_GE(total_buffered, kDefaultLimit / 3);
}
TEST_F(WindowManagerTest, AvoidWindowUnderflow) {
EXPECT_EQ(wm_.CurrentWindowSize(), wm_.WindowSizeLimit());
wm_.MarkDataBuffered(wm_.WindowSizeLimit() + 1);
EXPECT_EQ(wm_.CurrentWindowSize(), 0u);
}
TEST_F(WindowManagerTest, AvoidBufferedUnderflow) {
EXPECT_EQ(peer_.buffered(), 0u);
EXPECT_QUICHE_BUG(wm_.MarkDataFlushed(1), "buffered underflow");
EXPECT_EQ(peer_.buffered(), 0u);
wm_.MarkDataBuffered(42);
EXPECT_EQ(peer_.buffered(), 42u);
EXPECT_QUICHE_BUG(
{
wm_.MarkDataFlushed(43);
EXPECT_EQ(peer_.buffered(), 0u);
},
"buffered underflow");
}
TEST_F(WindowManagerTest, WindowConsumed) {
int64_t consumed = kDefaultLimit / 3 - 1;
wm_.MarkWindowConsumed(consumed);
EXPECT_TRUE(call_sequence_.empty());
const int64_t extra = 1;
wm_.MarkWindowConsumed(extra);
EXPECT_THAT(call_sequence_, testing::ElementsAre(consumed + extra));
}
TEST_F(WindowManagerTest, ListenerCalledOnSizeUpdate) {
wm_.SetWindowSizeLimit(kDefaultLimit - 1024);
EXPECT_TRUE(call_sequence_.empty());
wm_.SetWindowSizeLimit(kDefaultLimit * 5);
EXPECT_THAT(call_sequence_, testing::ElementsAre(kDefaultLimit * 4));
}
TEST_F(WindowManagerTest, WindowUpdateAfterLimitDecreased) {
wm_.MarkDataBuffered(kDefaultLimit - 1024);
wm_.SetWindowSizeLimit(kDefaultLimit - 2048);
wm_.MarkDataFlushed(512);
EXPECT_TRUE(call_sequence_.empty());
wm_.MarkDataFlushed(512);
EXPECT_TRUE(call_sequence_.empty());
wm_.MarkDataFlushed(512);
EXPECT_TRUE(call_sequence_.empty());
wm_.MarkDataFlushed(1024);
EXPECT_THAT(call_sequence_, testing::ElementsAre(512));
}
TEST_F(WindowManagerTest, ZeroWindowNotification) {
wm_.MarkWindowConsumed(1);
wm_.MarkDataBuffered(kDefaultLimit - 1);
EXPECT_THAT(call_sequence_, testing::ElementsAre(1));
}
TEST_F(WindowManagerTest, OnWindowSizeLimitChange) {
wm_.MarkDataBuffered(10000);
EXPECT_EQ(wm_.CurrentWindowSize(), kDefaultLimit - 10000);
EXPECT_EQ(wm_.WindowSizeLimit(), kDefaultLimit);
wm_.OnWindowSizeLimitChange(kDefaultLimit + 1000);
EXPECT_EQ(wm_.CurrentWindowSize(), kDefaultLimit - 9000);
EXPECT_EQ(wm_.WindowSizeLimit(), kDefaultLimit + 1000);
wm_.OnWindowSizeLimitChange(kDefaultLimit - 1000);
EXPECT_EQ(wm_.CurrentWindowSize(), kDefaultLimit - 11000);
EXPECT_EQ(wm_.WindowSizeLimit(), kDefaultLimit - 1000);
}
TEST_F(WindowManagerTest, NegativeWindowSize) {
wm_.MarkDataBuffered(80000);
EXPECT_EQ(wm_.CurrentWindowSize(), 18304);
wm_.OnWindowSizeLimitChange(65535);
EXPECT_EQ(wm_.CurrentWindowSize(), -14465);
wm_.MarkDataFlushed(70000);
EXPECT_EQ(wm_.CurrentWindowSize(), 55535);
EXPECT_THAT(call_sequence_, testing::ElementsAre(70000));
}
TEST_F(WindowManagerTest, IncreaseWindow) {
wm_.MarkDataBuffered(1000);
EXPECT_EQ(wm_.CurrentWindowSize(), kDefaultLimit - 1000);
EXPECT_EQ(wm_.WindowSizeLimit(), kDefaultLimit);
wm_.IncreaseWindow(5000);
EXPECT_EQ(wm_.CurrentWindowSize(), kDefaultLimit + 4000);
EXPECT_EQ(wm_.WindowSizeLimit(), kDefaultLimit);
wm_.MarkWindowConsumed(80000);
EXPECT_THAT(call_sequence_, testing::ElementsAre(75000));
EXPECT_EQ(wm_.CurrentWindowSize(), kDefaultLimit - 1000);
}
TEST(WindowManagerNoUpdateTest, NoWindowUpdateOnListener) {
const int64_t kDefaultLimit = 65535;
std::list<int64_t> call_sequence1;
WindowManager wm1(
kDefaultLimit,
[&call_sequence1](int64_t delta) { call_sequence1.push_back(delta); },
{},
true);
std::list<int64_t> call_sequence2;
WindowManager wm2(
kDefaultLimit,
[&call_sequence2](int64_t delta) { call_sequence2.push_back(delta); },
{},
false);
const int64_t consumed = kDefaultLimit / 3 - 1;
wm1.MarkWindowConsumed(consumed);
EXPECT_TRUE(call_sequence1.empty());
wm2.MarkWindowConsumed(consumed);
EXPECT_TRUE(call_sequence2.empty());
EXPECT_EQ(wm1.CurrentWindowSize(), kDefaultLimit - consumed);
EXPECT_EQ(wm2.CurrentWindowSize(), kDefaultLimit - consumed);
const int64_t extra = 1;
wm1.MarkWindowConsumed(extra);
EXPECT_THAT(call_sequence1, testing::ElementsAre(consumed + extra));
EXPECT_EQ(wm1.CurrentWindowSize(), kDefaultLimit);
call_sequence1.clear();
wm2.MarkWindowConsumed(extra);
EXPECT_THAT(call_sequence2, testing::ElementsAre(consumed + extra));
EXPECT_EQ(wm2.CurrentWindowSize(), kDefaultLimit - (consumed + extra));
call_sequence2.clear();
wm2.IncreaseWindow(consumed + extra);
EXPECT_EQ(wm2.CurrentWindowSize(), kDefaultLimit);
wm1.SetWindowSizeLimit(kDefaultLimit * 5);
EXPECT_THAT(call_sequence1, testing::ElementsAre(kDefaultLimit * 4));
EXPECT_EQ(wm1.CurrentWindowSize(), kDefaultLimit * 5);
wm2.SetWindowSizeLimit(kDefaultLimit * 5);
EXPECT_THAT(call_sequence2, testing::ElementsAre(kDefaultLimit * 4));
EXPECT_EQ(wm2.CurrentWindowSize(), kDefaultLimit);
}
TEST(WindowManagerShouldUpdateTest, CustomShouldWindowUpdateFn) {
const int64_t kDefaultLimit = 65535;
std::list<int64_t> call_sequence1;
WindowManager wm1(
kDefaultLimit,
[&call_sequence1](int64_t delta) { call_sequence1.push_back(delta); },
[](int64_t , int64_t , int64_t ) {
return true;
});
std::list<int64_t> call_sequence2;
WindowManager wm2(
kDefaultLimit,
[&call_sequence2](int64_t delta) { call_sequence2.push_back(delta); },
[](int64_t , int64_t , int64_t ) {
return false;
});
std::list<int64_t> call_sequence3;
WindowManager wm3(
kDefaultLimit,
[&call_sequence3](int64_t delta) { call_sequence3.push_back(delta); },
[](int64_t limit, int64_t window, int64_t delta) {
return delta == limit - window;
});
const int64_t consumed = kDefaultLimit / 4;
wm1.MarkWindowConsumed(consumed);
EXPECT_THAT(call_sequence1, testing::ElementsAre(consumed));
wm2.MarkWindowConsumed(consumed);
EXPECT_TRUE(call_sequence2.empty());
wm3.MarkWindowConsumed(consumed);
EXPECT_THAT(call_sequence3, testing::ElementsAre(consumed));
const int64_t buffered = 42;
wm1.MarkDataBuffered(buffered);
EXPECT_THAT(call_sequence1, testing::ElementsAre(consumed));
wm2.MarkDataBuffered(buffered);
EXPECT_TRUE(call_sequence2.empty());
wm3.MarkDataBuffered(buffered);
EXPECT_THAT(call_sequence3, testing::ElementsAre(consumed));
wm1.MarkDataFlushed(buffered / 3);
EXPECT_THAT(call_sequence1, testing::ElementsAre(consumed, buffered / 3));
wm2.MarkDataFlushed(buffered / 3);
EXPECT_TRUE(call_sequence2.empty());
wm3.MarkDataFlushed(buffered / 3);
EXPECT_THAT(call_sequence3, testing::ElementsAre(consumed));
wm1.MarkDataFlushed(2 * buffered / 3);
EXPECT_THAT(call_sequence1,
testing::ElementsAre(consumed, buffered / 3, 2 * buffered / 3));
wm2.MarkDataFlushed(2 * buffered / 3);
EXPECT_TRUE(call_sequence2.empty());
wm3.MarkDataFlushed(2 * buffered / 3);
EXPECT_THAT(call_sequence3, testing::ElementsAre(consumed, buffered));
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/window_manager.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/window_manager_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
15822a77-db68-4c49-8b8f-b70bcaebfe2c | cpp | google/arolla | typed_slot | arolla/qtype/typed_slot.cc | arolla/qtype/typed_slot_test.cc | #include "arolla/qtype/typed_slot.h"
#include <algorithm>
#include <optional>
#include <string>
#include <typeinfo>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/qtype.h"
#include "arolla/util/demangle.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
std::string TypeMismatchError(absl::string_view name, QTypePtr expected_type,
QTypePtr actual_type) {
return absl::StrFormat("%s{expected:%s, actual:%s}", name,
expected_type->name(), actual_type->name());
}
absl::Status SlotTypesError(std::vector<std::string> missed_slots,
std::vector<std::string> type_mismatch,
std::vector<std::string> unwanted_slots) {
if (missed_slots.empty() && type_mismatch.empty() && unwanted_slots.empty()) {
return absl::OkStatus();
}
std::string msg = "slots/types match errors:";
if (!missed_slots.empty()) {
std::sort(missed_slots.begin(), missed_slots.end());
msg +=
absl::StrFormat("missed slots: %s;", absl::StrJoin(missed_slots, ","));
}
if (!type_mismatch.empty()) {
std::sort(type_mismatch.begin(), type_mismatch.end());
msg += absl::StrFormat("slot types mismatch: %s;",
absl::StrJoin(type_mismatch, ","));
}
if (!unwanted_slots.empty()) {
std::sort(unwanted_slots.begin(), unwanted_slots.end());
msg += absl::StrFormat("unwanted slots: %s;",
absl::StrJoin(unwanted_slots, ","));
}
return absl::FailedPreconditionError(msg);
}
}
std::vector<QTypePtr> SlotsToTypes(absl::Span<const TypedSlot> slots) {
std::vector<QTypePtr> types;
types.reserve(slots.size());
for (const auto& slot : slots) {
types.push_back(slot.GetType());
}
return types;
}
absl::Status TypedSlot::VerifyType(const std::type_info& tpe) const {
if (GetType()->type_info() != tpe) {
return absl::InvalidArgumentError(absl::StrFormat(
"slot type does not match C++ type: expected %s, got %s", TypeName(tpe),
TypeName(GetType()->type_info())));
}
return absl::OkStatus();
}
absl::flat_hash_map<std::string, QTypePtr> SlotsToTypes(
const absl::flat_hash_map<std::string, TypedSlot>& slots) {
absl::flat_hash_map<std::string, QTypePtr> types;
types.reserve(slots.size());
for (const auto& kv : slots) {
types[kv.first] = kv.second.GetType();
}
return types;
}
std::vector<TypedSlot> AddSlots(absl::Span<const QTypePtr> types,
FrameLayout::Builder* layout_builder) {
std::vector<TypedSlot> slots;
slots.reserve(types.size());
for (const auto* type : types) {
slots.push_back(AddSlot(type, layout_builder));
}
return slots;
}
std::vector<std::pair<std::string, TypedSlot>> AddNamedSlots(
absl::Span<const std::pair<std::string, QTypePtr>> types,
FrameLayout::Builder* layout_builder) {
std::vector<std::pair<std::string, TypedSlot>> slots;
slots.reserve(types.size());
for (const auto& [name, type] : types) {
slots.emplace_back(name, AddSlot(type, layout_builder));
}
return slots;
}
absl::flat_hash_map<std::string, TypedSlot> AddSlotsMap(
const absl::flat_hash_map<std::string, const QType*>& types,
FrameLayout::Builder* layout_builder) {
absl::flat_hash_map<std::string, TypedSlot> slots;
slots.reserve(types.size());
for (const auto& name_type : types) {
slots.insert({name_type.first, AddSlot(name_type.second, layout_builder)});
}
return slots;
}
absl::Status RegisterUnsafeSlots(absl::Span<const TypedSlot> slots,
FrameLayout::Builder* layout_builder) {
for (const auto& slot : slots) {
RETURN_IF_ERROR(layout_builder->RegisterUnsafeSlot(
slot.byte_offset(), slot.GetType()->type_layout().AllocSize(),
slot.GetType()->type_info()));
}
return absl::OkStatus();
}
absl::Status RegisterUnsafeSlotsMap(
const absl::flat_hash_map<std::string, TypedSlot>& slots,
FrameLayout::Builder* layout_builder) {
for (const auto& name_slot : slots) {
const auto& slot = name_slot.second;
RETURN_IF_ERROR(layout_builder->RegisterUnsafeSlot(
slot.byte_offset(), slot.GetType()->type_layout().AllocSize(),
slot.GetType()->type_info()));
}
return absl::OkStatus();
}
absl::StatusOr<std::vector<std::optional<TypedSlot>>>
MaybeFindSlotsAndVerifyTypes(
absl::Span<const std::pair<std::string, QTypePtr>> types_in_order,
const absl::flat_hash_map<std::string, TypedSlot>& slots) {
std::vector<std::string> type_mismatch;
std::vector<std::optional<TypedSlot>> res_slots;
res_slots.reserve(types_in_order.size());
for (const auto& [name, type] : types_in_order) {
auto it = slots.find(name);
if (it == slots.end()) {
res_slots.push_back(std::nullopt);
continue;
}
res_slots.push_back({it->second});
if (it->second.GetType() != type) {
type_mismatch.push_back(
TypeMismatchError(name, type, it->second.GetType()));
}
}
RETURN_IF_ERROR(SlotTypesError({}, std::move(type_mismatch),
{}));
return {std::move(res_slots)};
}
absl::StatusOr<std::vector<TypedSlot>> FindSlotsAndVerifyTypes(
absl::Span<const std::pair<std::string, QTypePtr>> types_in_order,
const absl::flat_hash_map<std::string, TypedSlot>& slots) {
std::vector<std::string> missed_slots;
std::vector<std::string> type_mismatch;
std::vector<TypedSlot> res_slots;
res_slots.reserve(types_in_order.size());
for (const auto& [name, type] : types_in_order) {
auto it = slots.find(name);
if (it == slots.end()) {
missed_slots.push_back(name);
continue;
}
res_slots.push_back({it->second});
if (it->second.GetType() != type) {
type_mismatch.push_back(
TypeMismatchError(name, type, it->second.GetType()));
}
}
RETURN_IF_ERROR(SlotTypesError(std::move(missed_slots),
std::move(type_mismatch),
{}));
return {std::move(res_slots)};
}
absl::Status VerifySlotTypes(
const absl::flat_hash_map<std::string, QTypePtr>& types,
const absl::flat_hash_map<std::string, TypedSlot>& slots,
bool verify_unwanted_slots, bool verify_missed_slots) {
std::vector<std::string> missed_slots;
std::vector<std::string> type_mismatch;
std::vector<std::string> unwanted_slots;
for (const auto& [name, type] : types) {
auto it = slots.find(name);
if (it == slots.end()) {
if (verify_missed_slots) {
missed_slots.push_back(name);
}
continue;
}
if (it->second.GetType() != type) {
type_mismatch.push_back(
TypeMismatchError(name, type, it->second.GetType()));
}
}
if (verify_unwanted_slots) {
for (const auto& [name, _] : slots) {
if (!types.contains(name)) {
unwanted_slots.push_back(name);
}
}
}
return SlotTypesError(std::move(missed_slots), std::move(type_mismatch),
std::move(unwanted_slots));
}
} | #include "arolla/qtype/typed_slot.h"
#include <cstdint>
#include <optional>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/bytes.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::MatchesRegex;
using ::testing::Pair;
using ::testing::StrEq;
using ::testing::UnorderedElementsAre;
TEST(TypedSlotTest, Copy) {
FrameLayout::Builder layout_builder;
auto slot = layout_builder.AddSlot<int>();
auto slot2 = layout_builder.AddSlot<float>();
auto typed_slot = TypedSlot::FromSlot(slot);
auto typed_slot2 = TypedSlot::FromSlot(slot2);
auto typed_slot_copy = typed_slot;
EXPECT_EQ(typed_slot.GetType(), typed_slot_copy.GetType());
EXPECT_EQ(typed_slot, typed_slot_copy);
typed_slot_copy = typed_slot2;
EXPECT_EQ(typed_slot2.GetType(), typed_slot_copy.GetType());
EXPECT_EQ(typed_slot2, typed_slot_copy);
}
TEST(TypedSlotTest, PrimitiveTypes) {
FrameLayout::Builder layout_builder;
auto slot = layout_builder.AddSlot<int32_t>();
auto typed_slot = TypedSlot::FromSlot(slot);
EXPECT_EQ(typed_slot.GetType(), GetQType<int32_t>());
FrameLayout::Slot<int32_t> new_slot = typed_slot.ToSlot<int32_t>().value();
EXPECT_EQ(slot.byte_offset(), new_slot.byte_offset());
EXPECT_THAT(typed_slot.ToSlot<int64_t>().status(),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(TypedSlotTest, SlotsToTypes) {
FrameLayout::Builder layout_builder;
auto slot1 = layout_builder.AddSlot<int32_t>();
auto slot2 = layout_builder.AddSlot<float>();
auto typed_slot1 = TypedSlot::FromSlot(slot1);
auto typed_slot2 = TypedSlot::FromSlot(slot2);
EXPECT_THAT(SlotsToTypes(std::vector<TypedSlot>{typed_slot1, typed_slot2}),
ElementsAre(GetQType<int32_t>(), GetQType<float>()));
EXPECT_THAT(SlotsToTypes(absl::flat_hash_map<std::string, TypedSlot>{
{"X", typed_slot1}, {"Y", typed_slot2}}),
UnorderedElementsAre(Pair("X", GetQType<int32_t>()),
Pair("Y", GetQType<float>())));
}
TEST(TypedSlotTest, UnsafeFromOffset) {
const QType* i32 = GetQType<int32_t>();
auto typed_slot = TypedSlot::UnsafeFromOffset(i32, 10);
EXPECT_EQ(typed_slot.byte_offset(), 10);
EXPECT_EQ(typed_slot.GetType(), i32);
}
TEST(TypedSlotTest, AddSlots) {
FrameLayout::Builder layout_builder;
const QType* i32 = GetQType<int32_t>();
const QType* i64 = GetQType<int64_t>();
std::vector<TypedSlot> slots = AddSlots({i32, i64}, &layout_builder);
ASSERT_EQ(slots.size(), 2);
EXPECT_EQ(i32, slots[0].GetType());
EXPECT_EQ(i64, slots[1].GetType());
}
TEST(TypedSlotTest, AddNamedSlots) {
FrameLayout::Builder layout_builder;
const QType* i32 = GetQType<int32_t>();
const QType* i64 = GetQType<int64_t>();
std::vector<std::pair<std::string, TypedSlot>> slots =
AddNamedSlots({{"c", i32}, {"b", i64}}, &layout_builder);
ASSERT_EQ(slots.size(), 2);
EXPECT_EQ("c", slots[0].first);
EXPECT_EQ(i32, slots[0].second.GetType());
EXPECT_EQ("b", slots[1].first);
EXPECT_EQ(i64, slots[1].second.GetType());
}
TEST(TypedSlotTest, AddSlotsMap) {
FrameLayout::Builder layout_builder;
const QType* i32 = GetQType<int32_t>();
const QType* i64 = GetQType<int64_t>();
absl::flat_hash_map<std::string, TypedSlot> slots =
AddSlotsMap({{"a", i32}, {"b", i64}}, &layout_builder);
ASSERT_EQ(slots.size(), 2);
EXPECT_EQ(i32, slots.at("a").GetType());
EXPECT_EQ(i64, slots.at("b").GetType());
}
TEST(TypedSlotTest, RegisterUnsafeSlots) {
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<int64_t>();
const QType* i32 = GetQType<int32_t>();
const QType* f32 = GetQType<float>();
auto slot_i32 = TypedSlot::UnsafeFromOffset(i32, 0);
auto slot_f32 = TypedSlot::UnsafeFromOffset(f32, 4);
ASSERT_OK(RegisterUnsafeSlots({slot_i32, slot_f32}, &layout_builder));
#ifndef NDEBUG
ASSERT_FALSE(RegisterUnsafeSlots({slot_i32}, &layout_builder).ok());
#endif
auto layout = std::move(layout_builder).Build();
layout.HasField(0, typeid(int32_t));
layout.HasField(4, typeid(float));
}
TEST(TypedSlotTest, RegisterUnsafeSlotsMap) {
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<int64_t>();
const QType* i32 = GetQType<int32_t>();
const QType* f32 = GetQType<float>();
auto slot_i32 = TypedSlot::UnsafeFromOffset(i32, 0);
auto slot_f32 = TypedSlot::UnsafeFromOffset(f32, 4);
ASSERT_OK(RegisterUnsafeSlotsMap({{"a", slot_i32}, {"b", slot_f32}},
&layout_builder));
#ifndef NDEBUG
ASSERT_FALSE(RegisterUnsafeSlotsMap({{"a", slot_i32}}, &layout_builder).ok());
#endif
auto layout = std::move(layout_builder).Build();
layout.HasField(0, typeid(int32_t));
layout.HasField(4, typeid(float));
}
TEST(TypedSlotTest, GetSubslots) {
FrameLayout::Builder layout_builder;
auto opt_float_slot = layout_builder.AddSlot<OptionalValue<float>>();
auto opt_int32_slot = layout_builder.AddSlot<OptionalValue<int32_t>>();
auto float64_slot = layout_builder.AddSlot<double>();
FrameLayout layout = std::move(layout_builder).Build();
TypedSlot opt_float_tslot = TypedSlot::FromSlot(opt_float_slot);
TypedSlot opt_int32_tslot = TypedSlot::FromSlot(opt_int32_slot);
TypedSlot float64_tslot = TypedSlot::FromSlot(float64_slot);
EXPECT_EQ(opt_float_tslot.SubSlotCount(), 2);
EXPECT_EQ(opt_int32_tslot.SubSlotCount(), 2);
EXPECT_EQ(float64_tslot.SubSlotCount(), 0);
EXPECT_EQ(opt_float_tslot.SubSlot(0),
TypedSlot::FromSlot(opt_float_slot.GetSubslot<0>()));
EXPECT_EQ(opt_float_tslot.SubSlot(1),
TypedSlot::FromSlot(opt_float_slot.GetSubslot<1>()));
EXPECT_EQ(opt_int32_tslot.SubSlot(0),
TypedSlot::FromSlot(opt_int32_slot.GetSubslot<0>()));
EXPECT_EQ(opt_int32_tslot.SubSlot(1),
TypedSlot::FromSlot(opt_int32_slot.GetSubslot<1>()));
MemoryAllocation alloc_holder(&layout);
FramePtr frame = alloc_holder.frame();
frame.Set(opt_float_slot, OptionalValue<float>(1.0));
frame.Set(opt_int32_slot, OptionalValue<int32_t>());
auto float_present_slot = opt_float_tslot.SubSlot(0).ToSlot<bool>().value();
auto int32_present_slot = opt_int32_tslot.SubSlot(0).ToSlot<bool>().value();
EXPECT_EQ(frame.Get(float_present_slot), true);
EXPECT_EQ(frame.Get(int32_present_slot), false);
auto int32_value_slot = opt_int32_tslot.SubSlot(1).ToSlot<int32_t>().value();
frame.Set(int32_present_slot, true);
frame.Set(int32_value_slot, 2);
EXPECT_EQ(frame.Get(opt_int32_slot), OptionalValue<int32_t>(2));
}
TEST(TypedSlotTest, DebugPrintTypedSlot) {
FrameLayout::Builder layout_builder;
auto slot1 = layout_builder.AddSlot<int32_t>();
auto slot2 = layout_builder.AddSlot<float>();
auto slot3 = layout_builder.AddSlot<Bytes>();
auto typed_slot1 = TypedSlot::FromSlot(slot1);
auto typed_slot2 = TypedSlot::FromSlot(slot2);
auto typed_slot3 = TypedSlot::FromSlot(slot3);
std::stringstream buffer;
buffer << "typed_slot1 is: " << typed_slot1 << ", ";
buffer << "typed_slot2 is: " << typed_slot2 << ", ";
buffer << "typed_slot3 is: " << typed_slot3 << ".";
EXPECT_THAT(buffer.str(), StrEq("typed_slot1 is: TypedSlot<INT32>@0, "
"typed_slot2 is: TypedSlot<FLOAT32>@4, "
"typed_slot3 is: TypedSlot<BYTES>@8."));
}
TEST(TypedSlotTest, ToSlots) {
FrameLayout::Builder layout_builder;
auto slot1 = layout_builder.AddSlot<int32_t>();
auto slot2 = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(
auto slots_tuple,
(TypedSlot::ToSlots<int32_t, float>(
{TypedSlot::FromSlot(slot1), TypedSlot::FromSlot(slot2)})));
EXPECT_THAT(std::get<0>(slots_tuple).byte_offset(), Eq(slot1.byte_offset()));
EXPECT_THAT(std::get<1>(slots_tuple).byte_offset(), Eq(slot2.byte_offset()));
EXPECT_THAT(TypedSlot::ToSlots<float>({TypedSlot::FromSlot(slot1)}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("slot type does not match C++ type")));
EXPECT_THAT(
(TypedSlot::ToSlots<int32_t, float>({TypedSlot::FromSlot(slot1)})),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("wrong number of slots: expected 2, got 1")));
}
TEST(TypedSlotTest, MaybeFindSlotsAndVerifyTypesErrors) {
FrameLayout::Builder layout_builder;
auto float_slot = layout_builder.AddSlot<float>();
EXPECT_THAT(
MaybeFindSlotsAndVerifyTypes({{"a", GetQType<int>()}},
{{"a", TypedSlot::FromSlot(float_slot)}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*slot types "
"mismatch.*a.*expected:INT32.*actual:FLOAT32.*")));
}
TEST(TypedSlotTest, MaybeFindSlotsAndVerifyTypes) {
FrameLayout::Builder layout_builder;
auto int_slot = layout_builder.AddSlot<int>();
auto float_slot = layout_builder.AddSlot<float>();
EXPECT_THAT(
MaybeFindSlotsAndVerifyTypes(
{{"a", GetQType<int>()}, {"c", GetQType<float>()}},
{{"b", TypedSlot::FromSlot(float_slot)},
{"a", TypedSlot::FromSlot(int_slot)}}),
IsOkAndHolds(ElementsAre(TypedSlot::FromSlot(int_slot), std::nullopt)));
}
TEST(TypedSlotTest, FindSlotsAndVerifyTypesErrors) {
FrameLayout::Builder layout_builder;
auto float_slot = layout_builder.AddSlot<float>();
EXPECT_THAT(
FindSlotsAndVerifyTypes({{"NAME", GetQType<int>()}},
{{"NAME", TypedSlot::FromSlot(float_slot)}}),
StatusIs(
absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*slot types "
"mismatch.*NAME.*expected:INT32.*actual:FLOAT32.*")));
EXPECT_THAT(FindSlotsAndVerifyTypes({{"FAKE", GetQType<int>()}},
{{"b", TypedSlot::FromSlot(float_slot)}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*missed slots:.*FAKE.*")));
EXPECT_THAT(
FindSlotsAndVerifyTypes(
{{"NAME", GetQType<int>()}, {"FAKE", GetQType<int>()}},
{{"NAME", TypedSlot::FromSlot(float_slot)}}),
StatusIs(
absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*missed slots:.*FAKE.*slot types mismatch:.*NAME.*")));
}
TEST(TypedSlotTest, FindSlotsAndVerifyTypes) {
FrameLayout::Builder layout_builder;
auto int_slot = layout_builder.AddSlot<int>();
auto float_slot = layout_builder.AddSlot<float>();
auto int8_slot = layout_builder.AddSlot<int32_t>();
EXPECT_THAT(FindSlotsAndVerifyTypes(
{{"c", GetQType<float>()}, {"a", GetQType<int>()}},
{{"c", TypedSlot::FromSlot(float_slot)},
{"b", TypedSlot::FromSlot(int8_slot)},
{"a", TypedSlot::FromSlot(int_slot)}}),
IsOkAndHolds(ElementsAre(TypedSlot::FromSlot(float_slot),
TypedSlot::FromSlot(int_slot))));
}
TEST(TypedSlotTest, VerifySlotTypes) {
FrameLayout::Builder layout_builder;
auto int_slot = layout_builder.AddSlot<int>();
auto float_slot = layout_builder.AddSlot<float>();
EXPECT_OK(VerifySlotTypes({{"a", GetQType<int>()}, {"c", GetQType<float>()}},
{{"c", TypedSlot::FromSlot(float_slot)},
{"a", TypedSlot::FromSlot(int_slot)}}));
EXPECT_OK(VerifySlotTypes({{"a", GetQType<int>()}, {"c", GetQType<float>()}},
{{"c", TypedSlot::FromSlot(float_slot)}},
true,
false));
EXPECT_OK(VerifySlotTypes({{"a", GetQType<int>()}},
{{"c", TypedSlot::FromSlot(float_slot)},
{"a", TypedSlot::FromSlot(int_slot)}},
false));
EXPECT_THAT(
VerifySlotTypes({{"a", GetQType<int>()}},
{{"a", TypedSlot::FromSlot(float_slot)}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*slot types "
"mismatch.*a.*expected:INT32.*actual:FLOAT32.*")));
EXPECT_THAT(
VerifySlotTypes({{"a", GetQType<int>()}, {"c", GetQType<float>()}},
{{"a", TypedSlot::FromSlot(float_slot)}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*missed slots:.*c.*slot types mismatch:.*a.*")));
EXPECT_THAT(
VerifySlotTypes({{"d", GetQType<int>()}},
{{"a", TypedSlot::FromSlot(float_slot)}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*missed slots:.*d.*unwanted slots:.*a.*")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/typed_slot.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/typed_slot_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
0cae6dda-bd73-4939-aa3d-96eb9675ec45 | cpp | google/arolla | decision_forest | arolla/decision_forest/decision_forest.cc | arolla/decision_forest/decision_forest_test.cc | #include "arolla/decision_forest/decision_forest.h"
#include <algorithm>
#include <cstddef>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/simple_qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
using NodeId = DecisionTreeNodeId;
float DecisionForestNaiveEvaluation(const DecisionForest& forest,
const ConstFramePtr ctx,
absl::Span<const TypedSlot> inputs,
const TreeFilter& filter) {
DCHECK_OK(forest.ValidateInputSlots(inputs));
double res = 0;
for (const auto& tree : forest.GetTrees()) {
if (!filter(tree.tag)) continue;
NodeId node_id = GetTreeRootId(tree);
while (!node_id.is_leaf()) {
DCHECK(node_id.split_node_index() >= 0 &&
node_id.split_node_index() < tree.split_nodes.size());
const auto& node = tree.split_nodes[node_id.split_node_index()];
if (node.condition->EvaluateCondition(ctx, inputs)) {
node_id = node.child_if_true;
} else {
node_id = node.child_if_false;
}
}
DCHECK(node_id.adjustment_index() >= 0 &&
node_id.adjustment_index() < tree.adjustments.size());
res += tree.adjustments[node_id.adjustment_index()] * tree.weight;
}
return res;
}
namespace {
std::string NodeIdToString(DecisionTreeNodeId id) {
if (id.is_leaf()) {
return absl::StrFormat("adjustments[%d]", id.adjustment_index());
} else {
return absl::StrFormat("goto %d", id.split_node_index());
}
}
}
std::string ToDebugString(const DecisionTree& tree) {
std::string res = " DecisionTree {\n";
absl::StrAppendFormat(&res, " tag { step: %d submodel_id: %d }\n",
tree.tag.step, tree.tag.submodel_id);
absl::StrAppendFormat(&res, " weight: %f\n", tree.weight);
absl::StrAppend(&res, " split_nodes {\n");
for (size_t i = 0; i < tree.split_nodes.size(); ++i) {
const SplitNode& node = tree.split_nodes[i];
absl::StrAppendFormat(&res, " %d: IF %s THEN %s ELSE %s\n", i,
node.condition->ToString(),
NodeIdToString(node.child_if_true),
NodeIdToString(node.child_if_false));
}
absl::StrAppend(&res, " }\n");
absl::StrAppend(&res, " adjustments:");
for (float adj : tree.adjustments) {
absl::StrAppendFormat(&res, " %f", adj);
}
absl::StrAppend(&res, "\n }");
return res;
}
std::string ToDebugString(const DecisionForest& forest) {
std::string res = "DecisionForest {\n";
auto required_qtypes = forest.GetRequiredQTypes();
for (const auto& [k, v] : std::map<int, QTypePtr>(required_qtypes.begin(),
required_qtypes.end())) {
absl::StrAppendFormat(&res, " input #%d: %s\n", k, v->name());
}
for (const auto& tree : forest.GetTrees()) {
absl::StrAppend(&res, ToDebugString(tree), "\n");
}
absl::StrAppend(&res, "}");
return res;
}
absl::StatusOr<std::unique_ptr<DecisionForest>> DecisionForest::FromTrees(
std::vector<DecisionTree>&& trees) {
auto forest = absl::WrapUnique(new DecisionForest(std::move(trees)));
RETURN_IF_ERROR(forest->Initialize());
return forest;
}
absl::Status DecisionForest::ValidateInputSlots(
absl::Span<const TypedSlot> input_slots) const {
for (const auto& kv : required_qtypes_) {
if (kv.first >= input_slots.size()) {
return absl::InvalidArgumentError("not enough arguments");
}
if (input_slots[kv.first].GetType() != kv.second) {
return absl::InvalidArgumentError("type mismatch");
}
}
return absl::OkStatus();
}
absl::Status DecisionForest::Initialize() {
FingerprintHasher hasher("::arolla::DecisionForest");
hasher.Combine(trees_.size());
submodel_count_ = 0;
step_count_ = 0;
for (const auto& tree : trees_) {
hasher.CombineSpan(tree.split_nodes)
.CombineSpan(tree.adjustments)
.Combine(tree.weight, tree.tag.step, tree.tag.submodel_id);
if (tree.tag.submodel_id < 0) {
return absl::InvalidArgumentError("submodel_id can not be negative");
}
if (tree.tag.step < 0) {
return absl::InvalidArgumentError("step can not be negative");
}
submodel_count_ = std::max(submodel_count_, tree.tag.submodel_id + 1);
step_count_ = std::max(step_count_, tree.tag.step + 1);
if (tree.split_nodes.size() + 1 != tree.adjustments.size()) {
return absl::InvalidArgumentError("incorrect number of regions");
}
for (const auto& node : tree.split_nodes) {
bool is_correct = true;
DecisionTreeNodeId child = node.child_if_false;
if (child.is_leaf()) {
is_correct &= child.adjustment_index() < tree.adjustments.size();
} else {
is_correct &= child.split_node_index() < tree.split_nodes.size();
}
child = node.child_if_true;
if (child.is_leaf()) {
is_correct &= child.adjustment_index() < tree.adjustments.size();
} else {
is_correct &= child.split_node_index() < tree.split_nodes.size();
}
if (!is_correct)
return absl::InvalidArgumentError("incorrect split node");
for (auto id_type : node.condition->GetInputSignatures()) {
auto it = required_qtypes_.emplace(id_type.id, id_type.type);
if (it.first->second != id_type.type) {
return absl::InvalidArgumentError(
"types mismatch in decision forest");
}
}
}
}
fingerprint_ = std::move(hasher).Finish();
return absl::OkStatus();
}
void FingerprintHasherTraits<SplitNode>::operator()(
FingerprintHasher* hasher, const SplitNode& value) const {
hasher->Combine(value.child_if_false.raw_index(),
value.child_if_true.raw_index());
value.condition->CombineToFingerprintHasher(hasher);
}
void FingerprintHasherTraits<TreeFilter>::operator()(
FingerprintHasher* hasher, const TreeFilter& value) const {
std::vector<int> submodels(value.submodels.begin(), value.submodels.end());
absl::c_sort(submodels);
hasher->Combine(value.step_range_from, value.step_range_to)
.CombineSpan(submodels);
}
void FingerprintHasherTraits<DecisionForestPtr>::operator()(
FingerprintHasher* hasher, const DecisionForestPtr& value) const {
hasher->Combine(value->fingerprint());
}
AROLLA_DEFINE_SIMPLE_QTYPE(DECISION_FOREST, DecisionForestPtr);
AROLLA_DEFINE_SIMPLE_QTYPE(TREE_FILTER, TreeFilter);
} | #include "arolla/decision_forest/decision_forest.h"
#include <cmath>
#include <cstdint>
#include <limits>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/decision_forest/split_conditions/set_of_values_split_condition.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/typed_slot.h"
namespace arolla::testing {
namespace {
using ::absl_testing::StatusIs;
using ::testing::HasSubstr;
using ::testing::Test;
constexpr float inf = std::numeric_limits<float>::infinity();
constexpr auto S = DecisionTreeNodeId::SplitNodeId;
constexpr auto A = DecisionTreeNodeId::AdjustmentId;
TEST(DecisionForestTest, ForestValidation) {
DecisionTree tree1;
tree1.adjustments = {0.5, 1.5, 2.5, 3.5};
tree1.split_nodes = {{S(1), S(2), IntervalSplit(0, 1.5, inf)},
{A(0), A(1), SetOfValuesSplit<int64_t>(1, {5}, false)},
{A(2), A(3), IntervalSplit(0, -inf, 10)}};
DecisionTree tree2;
tree2.adjustments = {1., 2.};
tree2.split_nodes = {{A(0), A(1), IntervalSplit(0, 1.5, inf)}};
DecisionTree tree3;
tree3.adjustments = {1., 2., 3.};
tree3.split_nodes = {{A(0), A(1), IntervalSplit(0, 1.5, inf)}};
DecisionTree tree4;
tree4.adjustments = {1., 2.};
tree4.split_nodes = {
{A(0), A(1), IntervalSplit(1, 1.5, inf)}};
EXPECT_OK(DecisionForest::FromTrees({tree1, tree2}));
EXPECT_THAT(DecisionForest::FromTrees({tree1, tree3}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("incorrect number of regions")));
EXPECT_THAT(DecisionForest::FromTrees({tree1, tree4}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("types mismatch in decision forest")));
}
TEST(DecisionForestTest, Fingerprint) {
DecisionTree tree;
tree.adjustments = {0.5, 1.5, 2.5, 3.5};
tree.split_nodes = {{S(1), S(2), IntervalSplit(0, 1.5, inf)},
{A(0), A(1), SetOfValuesSplit<int64_t>(1, {5}, false)},
{A(2), A(3), IntervalSplit(0, -inf, 10)}};
ASSERT_OK_AND_ASSIGN(auto forest1, DecisionForest::FromTrees({tree}));
ASSERT_OK_AND_ASSIGN(auto forest2, DecisionForest::FromTrees({tree}));
tree.adjustments[1] += 0.1;
ASSERT_OK_AND_ASSIGN(auto forest3, DecisionForest::FromTrees({tree}));
EXPECT_EQ(forest1->fingerprint(), forest2->fingerprint());
EXPECT_NE(forest1->fingerprint(), forest3->fingerprint());
}
TEST(DecisionForestTest, ToDebugString) {
std::vector<DecisionTree> trees(2);
trees[0].adjustments = {0.5, 1.5, 2.5, 3.5};
trees[0].split_nodes = {
{S(1), S(2), IntervalSplit(0, 1.5, inf)},
{A(0), A(1), SetOfValuesSplit<int64_t>(1, {5}, false)},
{A(2), A(3), IntervalSplit(0, -inf, 10)}};
trees[1].adjustments = {5};
trees[1].tag.step = 1;
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees(std::move(trees)));
EXPECT_EQ(ToDebugString(*forest),
"DecisionForest {\n"
" input #0: OPTIONAL_FLOAT32\n"
" input #1: OPTIONAL_INT64\n"
" DecisionTree {\n"
" tag { step: 0 submodel_id: 0 }\n"
" weight: 1.000000\n"
" split_nodes {\n"
" 0: IF #0 in range [1.500000 inf] THEN goto 2 ELSE goto 1\n"
" 1: IF #1 in set [5] "
"THEN adjustments[1] ELSE adjustments[0]\n"
" 2: IF #0 in range [-inf 10.000000] "
"THEN adjustments[3] ELSE adjustments[2]\n"
" }\n"
" adjustments: 0.500000 1.500000 2.500000 3.500000\n"
" }\n"
" DecisionTree {\n"
" tag { step: 1 submodel_id: 0 }\n"
" weight: 1.000000\n"
" split_nodes {\n"
" }\n"
" adjustments: 5.000000\n"
" }\n"
"}");
}
TEST(DecisionForestTest, InputsValidation) {
std::vector<DecisionTree> trees(1);
DecisionTree& tree = trees[0];
tree.adjustments = {0.5, 1.5, 2.5, 3.5};
tree.split_nodes = {{S(1), S(2), IntervalSplit(0, 1.5, inf)},
{A(0), A(1), SetOfValuesSplit<int64_t>(1, {5}, false)},
{A(2), A(3), IntervalSplit(0, -inf, 10)}};
FrameLayout::Builder bldr;
auto slot_float = bldr.AddSlot<OptionalValue<float>>();
auto slot_int64 = bldr.AddSlot<OptionalValue<int64_t>>();
FrameLayout layout = std::move(bldr).Build();
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees(std::move(trees)));
EXPECT_OK(forest->ValidateInputSlots(
{TypedSlot::FromSlot(slot_float), TypedSlot::FromSlot(slot_int64)}));
EXPECT_THAT(forest->ValidateInputSlots({}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("not enough arguments")));
EXPECT_THAT(
forest->ValidateInputSlots(
{TypedSlot::FromSlot(slot_float), TypedSlot::FromSlot(slot_float)}),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("type mismatch")));
}
TEST(DecisionForestTest, TreeFilter) {
DecisionTree::Tag t0{.step = 0, .submodel_id = 0};
DecisionTree::Tag t1{.step = 1, .submodel_id = 1};
DecisionTree::Tag t2{.step = 2, .submodel_id = 0};
TreeFilter f0{};
TreeFilter f1{.submodels = {0}};
TreeFilter f2{.submodels = {1}};
TreeFilter f3{.submodels = {0, 1}};
TreeFilter f4{.step_range_from = 1};
TreeFilter f5{.step_range_to = 2};
TreeFilter f6{.step_range_from = 1, .step_range_to = 2, .submodels = {0}};
EXPECT_EQ((std::vector<bool>{f0(t0), f0(t1), f0(t2)}),
(std::vector<bool>{true, true, true}));
EXPECT_EQ((std::vector<bool>{f1(t0), f1(t1), f1(t2)}),
(std::vector<bool>{true, false, true}));
EXPECT_EQ((std::vector<bool>{f2(t0), f2(t1), f2(t2)}),
(std::vector<bool>{false, true, false}));
EXPECT_EQ((std::vector<bool>{f3(t0), f3(t1), f3(t2)}),
(std::vector<bool>{true, true, true}));
EXPECT_EQ((std::vector<bool>{f4(t0), f4(t1), f4(t2)}),
(std::vector<bool>{false, true, true}));
EXPECT_EQ((std::vector<bool>{f5(t0), f5(t1), f5(t2)}),
(std::vector<bool>{true, true, false}));
EXPECT_EQ((std::vector<bool>{f6(t0), f6(t1), f6(t2)}),
(std::vector<bool>{false, false, false}));
}
TEST(DecisionForestTest, GetTreeRootId) {
DecisionTree tree1;
tree1.adjustments = {1.0};
EXPECT_TRUE(GetTreeRootId(tree1).is_leaf());
DecisionTree tree2;
tree2.split_nodes = {{A(0), A(1), IntervalSplit(0, 1, 2)}};
tree2.adjustments = {1.0, 2.0};
EXPECT_FALSE(GetTreeRootId(tree2).is_leaf());
}
TEST(DecisionForestTest, NaiveEvaluation) {
std::vector<DecisionTree> trees(3);
trees[0].adjustments = {0.5, 1.5, 2.5, 3.5};
trees[0].split_nodes = {
{S(1), S(2), IntervalSplit(0, 1.5, inf)},
{A(0), A(1), SetOfValuesSplit<int64_t>(1, {5}, false)},
{A(2), A(3), IntervalSplit(0, -inf, 10)}};
trees[1].adjustments = {5.0};
trees[1].tag = {1, 1};
trees[2].adjustments = {2.0};
trees[2].tag = {2, 0};
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees(std::move(trees)));
EXPECT_EQ(forest->step_count(), 3);
EXPECT_EQ(forest->submodel_count(), 2);
FrameLayout::Builder bldr;
auto input1_slot = bldr.AddSlot<OptionalValue<float>>();
auto input2_slot = bldr.AddSlot<OptionalValue<int64_t>>();
std::vector<TypedSlot> slots = {TypedSlot::FromSlot(input1_slot),
TypedSlot::FromSlot(input2_slot)};
FrameLayout layout = std::move(bldr).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(input1_slot, 1.0f);
frame.Set(input2_slot, 5);
EXPECT_EQ(DecisionForestNaiveEvaluation(*forest, frame, slots), 8.5);
frame.Set(input1_slot, NAN);
frame.Set(input2_slot, {});
EXPECT_EQ(DecisionForestNaiveEvaluation(*forest, frame, slots), 7.5);
frame.Set(input1_slot, 2.0f);
frame.Set(input2_slot, 4);
EXPECT_EQ(DecisionForestNaiveEvaluation(*forest, frame, slots), 10.5);
EXPECT_EQ(DecisionForestNaiveEvaluation(*forest, frame, slots,
TreeFilter{.submodels = {0}}),
5.5);
EXPECT_EQ(DecisionForestNaiveEvaluation(*forest, frame, slots,
TreeFilter{.submodels = {1}}),
5.0);
EXPECT_EQ(DecisionForestNaiveEvaluation(*forest, frame, slots,
TreeFilter{.submodels = {0, 1}}),
10.5);
EXPECT_EQ(DecisionForestNaiveEvaluation(*forest, frame, slots,
TreeFilter{.step_range_from = 1}),
7.0);
EXPECT_EQ(DecisionForestNaiveEvaluation(*forest, frame, slots,
TreeFilter{.step_range_to = 2}),
8.5);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/decision_forest.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/decision_forest_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
95a70d08-876e-43a6-95e7-3b610dc108ae | cpp | tensorflow/tensorflow | data_flow_grad | tensorflow/cc/gradients/data_flow_grad.cc | tensorflow/cc/gradients/data_flow_grad_test.cc | #include "tensorflow/cc/ops/data_flow_ops.h"
#include "tensorflow/cc/ops/data_flow_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradients.h"
namespace tensorflow {
namespace ops {
namespace {
REGISTER_NO_GRADIENT_OP("Queue");
REGISTER_NO_GRADIENT_OP("QueueEnqueue");
REGISTER_NO_GRADIENT_OP("QueueEnqueueMany");
REGISTER_NO_GRADIENT_OP("QueueDequeue");
REGISTER_NO_GRADIENT_OP("QueueDequeueMany");
REGISTER_NO_GRADIENT_OP("QueueDequeueUpTo");
REGISTER_NO_GRADIENT_OP("QueueClose");
REGISTER_NO_GRADIENT_OP("QueueSize");
REGISTER_NO_GRADIENT_OP("Stack");
REGISTER_NO_GRADIENT_OP("StackPush");
REGISTER_NO_GRADIENT_OP("StackPop");
REGISTER_NO_GRADIENT_OP("StackClose");
REGISTER_NO_GRADIENT_OP("GetSessionHandle");
REGISTER_NO_GRADIENT_OP("GetSessionHandleV2");
REGISTER_NO_GRADIENT_OP("GetSessionTensor");
REGISTER_NO_GRADIENT_OP("DeleteSessionTensor");
Status DynamicPartitionGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto data = op.input(0);
auto partitions = op.input(1);
int32_t num_partitions;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "num_partitions", &num_partitions));
auto partitions_shape = Shape(scope, partitions);
auto zero = Const(scope, 0);
auto one = Const(scope, 1);
auto original_indices = Reshape(
scope, Range(scope, zero, Prod(scope, partitions_shape, zero), one),
partitions_shape);
auto partitioned_indices =
DynamicPartition(scope, original_indices, partitions, num_partitions);
auto reconstructed =
DynamicStitch(scope, partitioned_indices.outputs, grad_inputs);
grad_outputs->push_back(Reshape(scope, reconstructed, Shape(scope, data)));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("DynamicPartition", DynamicPartitionGrad);
Status DynamicStitchGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
int32_t num_values = op.num_inputs() / 2;
for (int32_t i = 0; i < num_values; i++) {
grad_outputs->push_back(NoGradient());
}
for (int32_t i = 0; i < num_values; i++) {
auto index = op.input(i);
if (index.type() != DT_INT32) {
index = Cast(scope, index, DT_INT32);
}
grad_outputs->push_back(Gather(scope, grad_inputs[0], index));
}
return scope.status();
}
REGISTER_GRADIENT_OP("DynamicStitch", DynamicStitchGrad);
REGISTER_GRADIENT_OP("ParallelDynamicStitch", DynamicStitchGrad);
}
}
} | #include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradient_checker.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/gradients/grad_testutil.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
namespace {
using ops::Const;
using ops::DynamicPartition;
using ops::DynamicStitch;
using ops::Placeholder;
class DataFlowGradTest : public ::testing::Test {
protected:
DataFlowGradTest() : scope_(Scope::NewRootScope()) {}
void RunTest(const OutputList& xs, const std::vector<TensorShape>& x_shapes,
const OutputList& ys, const std::vector<TensorShape>& y_shapes) {
TF_ASSERT_OK(scope_.status());
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope_, xs, x_shapes, ys, y_shapes, &max_error)));
EXPECT_LT(max_error, 1e-4);
}
Scope scope_;
};
TEST_F(DataFlowGradTest, DynamicPartitionGrad) {
TensorShape data_shape({2, 3, 2});
auto data = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(data_shape));
auto partitions = Const(scope_, {{2, 1, 0}, {1, 2, 0}});
auto y = DynamicPartition(scope_, data, partitions, 3);
TensorShape partition_shape({2, 2});
RunTest({data}, {data_shape}, y.outputs,
{partition_shape, partition_shape, partition_shape});
}
TEST_F(DataFlowGradTest, DynamicStitchGrad) {
TensorShape d1_shape({2});
TensorShape d2_shape({2, 2});
std::vector<Output> indices = {Const(scope_, 2), Const(scope_, {1, 0})};
std::vector<Output> data = {
Placeholder(scope_, DT_FLOAT, Placeholder::Shape(d1_shape)),
Placeholder(scope_, DT_FLOAT, Placeholder::Shape(d2_shape))};
auto y = DynamicStitch(scope_, indices, data);
TensorShape y_shape({3, 2});
RunTest(data, {d1_shape, d2_shape}, {y}, {y_shape});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/data_flow_grad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/data_flow_grad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8cea8c52-41ca-4a4d-ba13-9aee3808da0f | cpp | tensorflow/tensorflow | sparse_csr_matrix_ops | tensorflow/core/ops/sparse_csr_matrix_ops.cc | tensorflow/core/ops/sparse_csr_matrix_ops_test.cc | #include <tuple>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeAndType;
using shape_inference::ShapeHandle;
Status GetVariantInput(InferenceContext* c, int index,
ShapeAndType* shape_and_type) {
ShapeHandle variant;
TF_RETURN_IF_ERROR(c->WithRank(c->input(index), 0, &variant));
auto* shapes_and_types = c->input_handle_shapes_and_types(index);
if (shapes_and_types == nullptr || shapes_and_types->size() != 1) {
return absl::InvalidArgumentError(absl::StrCat(
"Unable to access shape and type info from variant input ", index));
}
*shape_and_type = shapes_and_types->at(0);
return absl::OkStatus();
}
Status ValidateSquareMatrixShape(InferenceContext* c,
const ShapeHandle& matrix_shape,
DimensionHandle* matrix_dimension) {
ShapeHandle out;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(matrix_shape, 2, &out));
TF_RETURN_IF_ERROR(c->WithRankAtMost(matrix_shape, 3, &out));
if (!c->RankKnown(matrix_shape)) {
return absl::InvalidArgumentError("Sparse matrix has an unknown rank.");
}
TF_RETURN_IF_ERROR(c->Merge(c->Dim(matrix_shape, -2),
c->Dim(matrix_shape, -1), matrix_dimension));
return absl::OkStatus();
}
REGISTER_OP("SparseTensorToCSRSparseMatrix")
.Input("indices: int64")
.Input("values: T")
.Input("dense_shape: int64")
.Attr("T: {float, double, complex64, complex128}")
.Output("sparse_matrix: variant")
.SetShapeFn([](InferenceContext* c) {
TF_RETURN_IF_ERROR(shape_inference::ValidateSparseTensor(
c, c->input(0), c->input(1), c->input(2)));
auto rank = c->Value(c->Dim(c->input(0), 1));
ShapeHandle dense_shape;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(2, &dense_shape));
TF_RETURN_IF_ERROR(c->WithRank(dense_shape, rank, &dense_shape));
if (!c->RankKnown(dense_shape) || c->Rank(dense_shape) < 2 ||
c->Rank(dense_shape) > 3) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid rank: ", c->Rank(dense_shape),
". Expected a known rank of either 2 or 3."));
}
DataType dtype;
TF_RETURN_IF_ERROR(c->GetAttr("T", &dtype));
c->set_output(0, c->Scalar());
c->set_output_handle_shapes_and_types(0,
{ShapeAndType{dense_shape, dtype}});
return absl::OkStatus();
});
REGISTER_OP("CSRSparseMatrixToSparseTensor")
.Input("sparse_matrix: variant")
.Output("indices: int64")
.Output("values: type")
.Output("dense_shape: int64")
.Attr("type: {float, double, complex64, complex128}")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle sparse_matrix = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtMost(sparse_matrix, 3, &sparse_matrix));
if (!c->RankKnown(sparse_matrix)) {
return absl::InvalidArgumentError("sparse_matrix has an unknown rank.");
}
int rank = c->Rank(sparse_matrix);
ShapeHandle indices = c->Matrix(c->UnknownDim(), rank);
ShapeHandle values = c->Vector(c->UnknownDim());
ShapeHandle dense_shape = c->Vector(rank);
c->set_output(0, indices);
c->set_output(1, values);
c->set_output(2, dense_shape);
return absl::OkStatus();
});
REGISTER_OP("DenseToCSRSparseMatrix")
.Input("dense_input: T")
.Input("indices: int64")
.Attr("T: {float, double, complex64, complex128}")
.Output("sparse_output: variant")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle dense_shape = c->input(0);
if (!c->RankKnown(dense_shape) || c->Rank(dense_shape) < 2 ||
c->Rank(dense_shape) > 3) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid rank of dense: ", c->Rank(dense_shape),
". Expected a known rank of either 2 or 3."));
}
auto rank = c->Rank(dense_shape);
ShapeHandle indices = c->input(1);
if (!c->RankKnown(indices) || c->Rank(indices) != 2) {
return absl::InvalidArgumentError(
absl::StrCat("indices must be a matrix; but its rank is not 2: ",
c->Rank(indices)));
}
auto indices_col = c->Dim(indices, 1);
if (!c->ValueKnown(indices_col) || c->Value(indices_col) != rank) {
return absl::InvalidArgumentError(
absl::StrCat("indices.shape[1] must match rank of dense; saw: ",
c->Value(indices_col), " vs. ", rank));
}
ShapeHandle fake_values_vec = c->Vector(c->Dim(indices, 0));
ShapeHandle fake_shape_shape = c->Vector(rank);
TF_RETURN_IF_ERROR(shape_inference::ValidateSparseTensor(
c, indices , fake_values_vec ,
fake_shape_shape ));
DataType dtype;
TF_RETURN_IF_ERROR(c->GetAttr("T", &dtype));
c->set_output_handle_shapes_and_types(0,
{ShapeAndType{dense_shape, dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("CSRSparseMatrixToDense")
.Input("sparse_input: variant")
.Output("dense_output: type")
.Attr("type: {float, double, complex64, complex128}")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle sparse_matrix = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtMost(sparse_matrix, 3, &sparse_matrix));
if (!c->RankKnown(sparse_matrix)) {
return absl::InvalidArgumentError("sparse_matrix has an unknown rank.");
}
c->set_output(0, sparse_matrix);
return absl::OkStatus();
});
REGISTER_OP("CSRSparseMatrixComponents")
.Input("csr_sparse_matrix: variant")
.Input("index: int32")
.Output("row_ptrs: int32")
.Output("col_inds: int32")
.Output("values: type")
.Attr("type: {float, double, complex64, complex128}")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle csr_sparse_matrix = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(
c->WithRankAtLeast(csr_sparse_matrix, 2, &csr_sparse_matrix));
TF_RETURN_IF_ERROR(
c->WithRankAtMost(csr_sparse_matrix, 3, &csr_sparse_matrix));
ShapeHandle index;
if (c->Rank(c->input(1)) != 0) {
return absl::InvalidArgumentError("index must be a scalar.");
}
if (!c->RankKnown(csr_sparse_matrix)) {
return absl::InvalidArgumentError(
"csr_sparse_matrix has an unknown rank.");
}
auto row_ptrs_dh = c->Dim(csr_sparse_matrix, -2);
TF_RETURN_IF_ERROR(c->Add(row_ptrs_dh, 1, &row_ptrs_dh));
ShapeHandle row_ptrs = c->Vector(row_ptrs_dh);
c->set_output(0, row_ptrs);
c->set_output(1, c->Vector(c->UnknownDim()));
c->set_output(2, c->Vector(c->UnknownDim()));
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixNNZ")
.Input("sparse_matrix: variant")
.Output("nnz: int32")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle sparse_matrix = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(sparse_matrix, 2, &sparse_matrix));
TF_RETURN_IF_ERROR(c->WithRankAtMost(sparse_matrix, 3, &sparse_matrix));
if (!c->RankKnown(sparse_matrix)) {
return absl::InvalidArgumentError("sparse_matrix has an unknown rank.");
}
ShapeHandle out;
if (c->Rank(sparse_matrix) == 3) {
out = c->Vector(c->Dim(sparse_matrix, 0));
} else {
out = c->Scalar();
}
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixMatMul")
.Input("a: variant")
.Input("b: T")
.Attr("T: type")
.Attr("transpose_a: bool = false")
.Attr("transpose_b: bool = false")
.Attr("adjoint_a: bool = false")
.Attr("adjoint_b: bool = false")
.Attr("transpose_output: bool = false")
.Attr("conjugate_output: bool = false")
.Output("output: T")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle a_shape = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(a_shape, 2, &a_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(a_shape, 3, &a_shape));
if (!c->RankKnown(a_shape)) {
return absl::InvalidArgumentError("a has an unknown rank.");
}
ShapeHandle b_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &b_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(b_shape, 3, &b_shape));
bool transpose_a = false;
bool transpose_b = false;
bool transpose_output = false;
TF_RETURN_IF_ERROR(c->GetAttr("transpose_a", &transpose_a));
TF_RETURN_IF_ERROR(c->GetAttr("transpose_b", &transpose_b));
TF_RETURN_IF_ERROR(c->GetAttr("transpose_output", &transpose_output));
bool adjoint_a = false;
bool adjoint_b = false;
TF_RETURN_IF_ERROR(c->GetAttr("adjoint_a", &adjoint_a));
TF_RETURN_IF_ERROR(c->GetAttr("adjoint_b", &adjoint_b));
if (adjoint_a && transpose_a) {
return absl::InvalidArgumentError(
"Only one of adjoint_a and transpose_a may be true.");
}
if (adjoint_b && transpose_b) {
return absl::InvalidArgumentError(
"Only one of adjoint_b and transpose_b may be true.");
}
transpose_a = transpose_a || adjoint_a;
transpose_b = transpose_b || adjoint_b;
auto output_rows = c->Dim(a_shape, transpose_a ? -1 : -2);
auto output_cols = c->Dim(b_shape, transpose_b ? -2 : -1);
if (transpose_output) {
std::tie(output_rows, output_cols) =
std::make_tuple(output_cols, output_rows);
}
ShapeHandle a_batch_dims;
ShapeHandle b_batch_dims;
ShapeHandle batch_dims;
TF_RETURN_IF_ERROR(c->Subshape(a_shape, 0, -2, &a_batch_dims));
TF_RETURN_IF_ERROR(c->Subshape(b_shape, 0, -2, &b_batch_dims));
TF_RETURN_IF_ERROR(c->Merge(a_batch_dims, b_batch_dims, &batch_dims));
shape_inference::DimensionHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(a_shape, transpose_a ? -2 : -1),
c->Dim(b_shape, transpose_b ? -1 : -2),
&unused));
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Concatenate(
batch_dims, c->Matrix(output_rows, output_cols), &out));
c->set_output(0, out);
return absl::OkStatus();
});
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
REGISTER_OP("_MklNativeSparseMatrixMatMul")
.Input("a: variant")
.Input("b: T")
.Attr("T: type")
.Attr("transpose_a: bool = false")
.Attr("transpose_b: bool = false")
.Attr("adjoint_a: bool = false")
.Attr("adjoint_b: bool = false")
.Attr("transpose_output: bool = false")
.Attr("conjugate_output: bool = false")
.Output("output: T")
.SetShapeFn([](InferenceContext* c) {
VLOG(1) << "_MklNativeSparseMatrixMatMul shape function";
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle a_shape = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRank(a_shape, 2, &a_shape));
if (!c->RankKnown(a_shape)) {
return absl::InvalidArgumentError("a has an unknown rank.");
}
ShapeHandle b_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &b_shape));
VLOG(1) << "_MklNativeSparseMatrixMatMul shape function still";
bool transpose_a = false;
bool transpose_b = false;
bool transpose_output = false;
TF_RETURN_IF_ERROR(c->GetAttr("transpose_a", &transpose_a));
TF_RETURN_IF_ERROR(c->GetAttr("transpose_b", &transpose_b));
TF_RETURN_IF_ERROR(c->GetAttr("transpose_output", &transpose_output));
bool adjoint_a = false;
bool adjoint_b = false;
TF_RETURN_IF_ERROR(c->GetAttr("adjoint_a", &adjoint_a));
TF_RETURN_IF_ERROR(c->GetAttr("adjoint_b", &adjoint_b));
if (adjoint_a && transpose_a) {
return absl::InvalidArgumentError(
"Only one of adjoint_a and transpose_a may be true.");
}
if (adjoint_b && transpose_b) {
return absl::InvalidArgumentError(
"Only one of adjoint_b and transpose_b may be true.");
}
transpose_a = transpose_a || adjoint_a;
transpose_b = transpose_b || adjoint_b;
auto output_rows = c->Dim(a_shape, transpose_a ? -1 : -2);
auto output_cols = c->Dim(b_shape, transpose_b ? -2 : -1);
if (transpose_output) {
std::tie(output_rows, output_cols) =
std::make_tuple(output_cols, output_rows);
}
ShapeHandle a_batch_dims;
ShapeHandle b_batch_dims;
ShapeHandle batch_dims;
TF_RETURN_IF_ERROR(c->Subshape(a_shape, 0, -2, &a_batch_dims));
TF_RETURN_IF_ERROR(c->Subshape(b_shape, 0, -2, &b_batch_dims));
TF_RETURN_IF_ERROR(c->Merge(a_batch_dims, b_batch_dims, &batch_dims));
shape_inference::DimensionHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(a_shape, transpose_a ? -2 : -1),
c->Dim(b_shape, transpose_b ? -1 : -2),
&unused));
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Concatenate(
batch_dims, c->Matrix(output_rows, output_cols), &out));
c->set_output(0, out);
return OkStatus();
});
#endif
REGISTER_OP("SparseMatrixMul")
.Input("a: variant")
.Input("b: T")
.Attr("T: type")
.Output("output: variant")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle a_shape = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtMost(a_shape, 3, &a_shape));
if (!c->RankKnown(a_shape)) {
return absl::InvalidArgumentError("a has an unknown rank.");
}
ShapeHandle b_shape;
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(1), 3, &b_shape));
if (!c->RankKnown(b_shape)) {
return absl::InvalidArgumentError("b has an unknown rank.");
}
ShapeHandle out;
if (c->Rank(b_shape) == 0) {
out = a_shape;
} else if (c->Rank(b_shape) == 3) {
if (c->Rank(a_shape) != 3) {
return absl::UnimplementedError(
"rank of b is 3 but rank of a is not.");
}
if (!(c->Value(c->Dim(b_shape, 1)) == 1 &&
c->Value(c->Dim(b_shape, 2)) == 1)) {
return absl::UnimplementedError(
"b must be a scalar or shaped [batch_size, 1, 1]");
}
DimensionHandle batch_size = c->Dim(a_shape, 0);
TF_RETURN_IF_ERROR(
c->Merge(batch_size, c->Dim(b_shape, 0), &batch_size));
TF_RETURN_IF_ERROR(c->ReplaceDim(b_shape, 0, batch_size, &b_shape));
TF_RETURN_IF_ERROR(c->ReplaceDim(a_shape, 0, batch_size, &a_shape));
out = a_shape;
} else {
return absl::UnimplementedError(
"b must be a scalar or shaped [batch_size, 1, 1]");
}
c->set_output_handle_shapes_and_types(
0, {ShapeAndType{out, sparse_matrix_shape_and_type.dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixAdd")
.Input("a: variant")
.Input("b: variant")
.Input("alpha: T")
.Input("beta: T")
.Attr("T: {float, double, complex64, complex128}")
.Output("c: variant")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused_scalar_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused_scalar_shape));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused_scalar_shape));
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle a_shape = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(a_shape, 2, &a_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(a_shape, 3, &a_shape));
if (!c->RankKnown(a_shape)) {
return absl::InvalidArgumentError("a has an unknown rank.");
}
TF_RETURN_IF_ERROR(GetVariantInput(c, 1, &sparse_matrix_shape_and_type));
ShapeHandle b_shape = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(b_shape, 2, &b_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(b_shape, 3, &b_shape));
if (!c->RankKnown(b_shape)) {
return absl::InvalidArgumentError("b has an unknown rank.");
}
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Merge(a_shape, b_shape, &out));
c->set_output_handle_shapes_and_types(
0, {ShapeAndType{out, sparse_matrix_shape_and_type.dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixSparseMatMul")
.Input("a: variant")
.Input("b: variant")
.Attr("type: {float, double, complex64, complex128}")
.Attr("transpose_a: bool = false")
.Attr("transpose_b: bool = false")
.Attr("adjoint_a: bool = false")
.Attr("adjoint_b: bool = false")
.Output("c: variant")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle a_shape = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(a_shape, 2, &a_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(a_shape, 3, &a_shape));
if (!c->RankKnown(a_shape)) {
return absl::InvalidArgumentError("a has an unknown rank.");
}
TF_RETURN_IF_ERROR(GetVariantInput(c, 1, &sparse_matrix_shape_and_type));
ShapeHandle b_shape = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(b_shape, 2, &b_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(b_shape, 3, &b_shape));
if (!c->RankKnown(b_shape)) {
return absl::InvalidArgumentError("b has an unknown rank.");
}
bool transpose_a = false;
bool transpose_b = false;
TF_RETURN_IF_ERROR(c->GetAttr("transpose_a", &transpose_a));
TF_RETURN_IF_ERROR(c->GetAttr("transpose_b", &transpose_b));
bool adjoint_a = false;
bool adjoint_b = false;
TF_RETURN_IF_ERROR(c->GetAttr("adjoint_a", &adjoint_a));
TF_RETURN_IF_ERROR(c->GetAttr("adjoint_b", &adjoint_b));
if (adjoint_a && transpose_a) {
return absl::InvalidArgumentError(
"Only one of adjoint_a and transpose_a may be true.");
} else if (adjoint_b && transpose_b) {
return absl::InvalidArgumentError(
"Only one of adjoint_b and transpose_b may be true.");
}
transpose_a = transpose_a || adjoint_a;
transpose_b = transpose_b || adjoint_b;
auto output_rows = c->Dim(a_shape, transpose_a ? -1 : -2);
auto output_cols = c->Dim(b_shape, transpose_b ? -2 : -1);
ShapeHandle a_batch_dims;
ShapeHandle b_batch_dims;
ShapeHandle batch_dims;
TF_RETURN_IF_ERROR(c->Subshape(a_shape, 0, -2, &a_batch_dims));
TF_RETURN_IF_ERROR(c->Subshape(b_shape, 0, -2, &b_batch_dims));
TF_RETURN_IF_ERROR(BroadcastBinaryOpOutputShapeFnHelper(
c, a_batch_dims, b_batch_dims, true,
&batch_dims));
shape_inference::DimensionHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(a_shape, transpose_a ? -2 : -1),
c->Dim(b_shape, transpose_b ? -1 : -2),
&unused));
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Concatenate(
batch_dims, c->Matrix(output_rows, output_cols), &out));
c->set_output_handle_shapes_and_types(
0, {ShapeAndType{out, sparse_matrix_shape_and_type.dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixZeros")
.Input("dense_shape: int64")
.Attr("type: {float, double, complex64, complex128}")
.Output("sparse_matrix: variant")
.SetShapeFn([](InferenceContext* c) {
auto rank = c->NumElements(c->input(0));
ShapeHandle dense_shape;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &dense_shape));
TF_RETURN_IF_ERROR(
c->WithRank(dense_shape, c->Value(rank), &dense_shape));
if (!c->RankKnown(dense_shape) || c->Rank(dense_shape) < 2 ||
c->Rank(dense_shape) > 3) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid rank: ", c->Rank(dense_shape),
". Expected a known rank of either 2 or 3."));
}
DataType dtype;
TF_RETURN_IF_ERROR(c->GetAttr("type", &dtype));
c->set_output_handle_shapes_and_types(0,
{ShapeAndType{dense_shape, dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixTranspose")
.Input("input: variant")
.Attr("conjugate: bool = false")
.Attr("type: {float, double, complex64, complex128}")
.Output("output: variant")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle input = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, 2, &input));
TF_RETURN_IF_ERROR(c->WithRankAtMost(input, 3, &input));
if (!c->RankKnown(input)) {
return absl::InvalidArgumentError("input has an unknown rank.");
}
ShapeHandle output;
if (c->Rank(input) == 2) {
output = c->Matrix(c->Dim(input, 1), c->Dim(input, 0));
} else {
output = c->MakeShape(
{c->Dim(input, 0), c->Dim(input, 2), c->Dim(input, 1)});
}
c->set_output_handle_shapes_and_types(
0, {ShapeAndType{output, sparse_matrix_shape_and_type.dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixSoftmax")
.Input("logits: variant")
.Attr("type: {float, double}")
.Output("softmax: variant")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle logits = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(logits, 2, &logits));
TF_RETURN_IF_ERROR(c->WithRankAtMost(logits, 3, &logits));
if (!c->RankKnown(logits)) {
return absl::InvalidArgumentError("logits has an unknown rank.");
}
c->set_output_handle_shapes_and_types(
0, {ShapeAndType{logits, sparse_matrix_shape_and_type.dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixSoftmaxGrad")
.Input("softmax: variant")
.Input("grad_softmax: variant")
.Attr("type: {float, double}")
.Output("gradient: variant")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle softmax = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(softmax, 2, &softmax));
TF_RETURN_IF_ERROR(c->WithRankAtMost(softmax, 3, &softmax));
if (!c->RankKnown(softmax)) {
return absl::InvalidArgumentError("softmax has an unknown rank.");
}
TF_RETURN_IF_ERROR(GetVariantInput(c, 1, &sparse_matrix_shape_and_type));
ShapeHandle grad_softmax = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(grad_softmax, 2, &grad_softmax));
TF_RETURN_IF_ERROR(c->WithRankAtMost(grad_softmax, 3, &grad_softmax));
if (!c->RankKnown(grad_softmax)) {
return absl::InvalidArgumentError("grad_softmax has an unknown rank.");
}
TF_RETURN_IF_ERROR(c->Merge(softmax, grad_softmax, &softmax));
c->set_output_handle_shapes_and_types(
0, {ShapeAndType{softmax, sparse_matrix_shape_and_type.dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixOrderingAMD")
.Input("input: variant")
.Output("output: int32")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle matrix_shape = sparse_matrix_shape_and_type.shape;
DimensionHandle n;
TF_RETURN_IF_ERROR(ValidateSquareMatrixShape(c, matrix_shape, &n));
ShapeHandle output;
if (c->Rank(matrix_shape) == 2) {
output = c->Vector(c->Dim(matrix_shape, 0));
} else {
output = c->Matrix(c->Dim(matrix_shape, 0), c->Dim(matrix_shape, 1));
}
c->set_output(0, output);
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixSparseCholesky")
.Input("input: variant")
.Input("permutation: int32")
.Attr("type: {float, double, complex64, complex128}")
.Output("output: variant")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle matrix_shape = sparse_matrix_shape_and_type.shape;
DimensionHandle n;
TF_RETURN_IF_ERROR(ValidateSquareMatrixShape(c, matrix_shape, &n));
ShapeHandle perm_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &perm_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(1), 2, &perm_shape));
if (!c->RankKnown(perm_shape)) {
return absl::InvalidArgumentError("permutation has an unknown rank.");
}
TF_RETURN_IF_ERROR(c->Merge(n, c->Dim(perm_shape, -1), &n));
ShapeHandle matrix_batch_shape;
ShapeHandle perm_batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(matrix_shape, 0, -2, &matrix_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(perm_shape, 0, -1, &perm_shape));
TF_RETURN_IF_ERROR(
c->Merge(matrix_batch_shape, perm_batch_shape, &matrix_batch_shape));
ShapeHandle out = matrix_shape;
c->set_output_handle_shapes_and_types(
0, {ShapeAndType{out, sparse_matrix_shape_and_type.dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
TEST(SparseMatrixOpsTest, SparseTensorToCSRSparseMatrix_ShapeFn) {
ShapeInferenceTestOp op("SparseTensorToCSRSparseMatrix");
(*op.node_def.mutable_attr())["T"].set_type(DT_FLOAT);
op.input_tensors.resize(3);
INFER_ERROR("Expected a known rank", op, "?;?;?");
INFER_ERROR("either 2 or 3", op, "[?,4];?;?");
INFER_OK(op, "[?,2];?;?", "[]");
INFER_OK(op, "[?,3];?;?", "[]");
Tensor dense_shape_t = test::AsTensor<int64_t>({5, 6});
op.input_tensors[2] = &dense_shape_t;
INFER_ERROR("Shape must be rank 3 but is rank 2 for", op, "[?,3];?;?");
INFER_OK(op, "[?,2];?;?", "[]");
}
TEST(SparseMatrixOpsTest, CSRSparseMatrixToSparseTensor_ShapeFn) {
ShapeInferenceTestOp op("CSRSparseMatrixToSparseTensor");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types(1);
shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
shapes_and_types[0].first = "[4,5]";
INFER_OK(op, "[]", "[?,2];[?];[2]");
shapes_and_types[0].first = "[?,?]";
INFER_OK(op, "[]", "[?,2];[?];[2]");
shapes_and_types[0].first = "[4,5,6]";
INFER_OK(op, "[]", "[?,3];[?];[3]");
shapes_and_types[0].first = "[?,?,?]";
INFER_OK(op, "[]", "[?,3];[?];[3]");
}
TEST(SparseMatrixOpsTest, DenseToCSRSparseMatrix_ShapeFn) {
ShapeInferenceTestOp op("DenseToCSRSparseMatrix");
(*op.node_def.mutable_attr())["T"].set_type(DT_FLOAT);
INFER_ERROR("Expected a known rank", op, "?;?");
INFER_ERROR("either 2 or 3", op, "[?];?");
INFER_OK(op, "[?,?];[?,2]", "[]");
INFER_OK(op, "[?,?,?];[?,3]", "[]");
INFER_ERROR("indices.shape[1] must match rank of dense; saw: 2 vs. 3", op,
"[?,?,?];[?,2]");
}
TEST(SparseMatrixOpsTest, CSRSparseMatrixToDense_ShapeFn) {
ShapeInferenceTestOp op("CSRSparseMatrixToDense");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types(1);
shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
shapes_and_types[0].first = "[?,?]";
INFER_OK(op, "[]", "[?,?]");
shapes_and_types[0].first = "[?,?,?]";
INFER_OK(op, "[]", "[?,?,?]");
}
TEST(SparseMatrixOpsTest, CSRSparseMatrixComponents_ShapeFn) {
ShapeInferenceTestOp op("CSRSparseMatrixComponents");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types(1);
shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
shapes_and_types[0].first = "[4,5]";
INFER_OK(op, "[];[]", "[5];[?];[?]");
shapes_and_types[0].first = "[?,?]";
INFER_OK(op, "[];[]", "[?];[?];[?]");
shapes_and_types[0].first = "[19,34,55]";
INFER_OK(op, "[];[]", "[35];[?];[?]");
shapes_and_types[0].first = "[?,?,?]";
INFER_OK(op, "[];[]", "[?];[?];[?]");
shapes_and_types[0].first = "[?,?,?]";
INFER_ERROR("index must be a scalar", op, "[];?");
}
TEST(SparseMatrixOpsTest, SparseMatrixMatMul_ShapeFn) {
ShapeInferenceTestOp op("SparseMatrixMatMul");
std::vector<ShapeInferenceTestOp::ShapeAndType> a_shapes_and_types(1);
a_shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&a_shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
auto set_options = [&op](bool transpose_a, bool transpose_b, bool adjoint_a,
bool adjoint_b, bool transpose_output) {
TF_ASSERT_OK(NodeDefBuilder("test", "SparseMatrixMatMul")
.Input("a", 0, DT_VARIANT)
.Input("b", 1, DT_FLOAT)
.Attr("transpose_a", transpose_a)
.Attr("transpose_b", transpose_b)
.Attr("adjoint_a", adjoint_a)
.Attr("adjoint_b", adjoint_b)
.Attr("transpose_output", transpose_output)
.Finalize(&op.node_def));
};
set_options(false, false, false, false, false );
a_shapes_and_types[0].first = "?";
INFER_ERROR("a has an unknown rank", op, "[];?");
a_shapes_and_types[0].first = "[?]";
INFER_ERROR("must be at least rank 2 but is rank 1", op, "[];?");
a_shapes_and_types[0].first = "[?,?]";
INFER_OK(op, "[];?", "[?,?]");
a_shapes_and_types[0].first = "[?,?,?]";
INFER_OK(op, "[];?", "[?,?,?]");
a_shapes_and_types[0].first = "[?,3,?]";
INFER_OK(op, "[];[?,?,?]", "[?,3,d1_2]");
a_shapes_and_types[0].first = "[?,3,?]";
INFER_OK(op, "[];[?,?,4]", "[?,3,d1_2]");
a_shapes_and_types[0].first = "[?,?,6]";
INFER_OK(op, "[];[?,6,?]", "[?,?,d1_2]");
a_shapes_and_types[0].first = "[?,?,5]";
INFER_ERROR("must be equal, but are 5 and 6 for", op, "[];[?,6,?]");
set_options(false, false, false, false, true );
a_shapes_and_types[0].first = "[?,3,?]";
INFER_OK(op, "[];[?,?,4]", "[?,d1_2,3]");
a_shapes_and_types[0].first = "[3,?]";
INFER_OK(op, "[];[?,4]", "[d1_1,3]");
set_options(true, true,
false, false,
false );
a_shapes_and_types[0].first = "[?,?,?]";
INFER_OK(op, "[];[?,?,?]", "[?,?,d1_1]");
set_options(false, false,
true, true,
false );
a_shapes_and_types[0].first = "[?,?,?]";
INFER_OK(op, "[];[?,?,?]", "[?,?,d1_1]");
set_options(true , true ,
false, false,
true );
a_shapes_and_types[0].first = "[?,?,?]";
INFER_OK(op, "[];[?,?,?]", "[?,d1_1,?]");
set_options(true, false,
true, true,
false );
a_shapes_and_types[0].first = "[?,?,?]";
INFER_ERROR("Only one of adjoint_a and transpose_a", op, "[];[?,?,?]");
set_options(false, true,
true, true,
false );
a_shapes_and_types[0].first = "[?,?,?]";
INFER_ERROR("Only one of adjoint_b and transpose_b", op, "[];[?,?,?]");
}
TEST(SparseMatrixOpsTest, SparseMatrixAdd_ShapeFn) {
ShapeInferenceTestOp op("SparseMatrixAdd");
std::vector<ShapeInferenceTestOp::ShapeAndType> a_shapes_and_types(1);
std::vector<ShapeInferenceTestOp::ShapeAndType> b_shapes_and_types(1);
a_shapes_and_types[0].second = DT_FLOAT;
b_shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&a_shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(&b_shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
auto set_shapes = [&a_shapes_and_types, &b_shapes_and_types](
const string& a_shape, const string& b_shape) {
a_shapes_and_types[0].first = a_shape;
b_shapes_and_types[0].first = b_shape;
};
set_shapes("[?,?]", "[?,?]");
INFER_OK(op, "[];[];?;?", "[]");
set_shapes("[?,?,?]", "[?,?,?]");
INFER_OK(op, "[];[];?;?", "[]");
set_shapes("[3,4]", "[3,4]");
INFER_OK(op, "[];[];?;?", "[]");
set_shapes("[3,4,5]", "[3,4,5]");
INFER_OK(op, "[];[];?;?", "[]");
set_shapes("[?,?,?]", "[?,?,?]");
INFER_OK(op, "[];[];[];[]", "[]");
set_shapes("[?,?]", "[?,?]");
INFER_ERROR("must be rank 0 but is rank 1", op, "[];[];?;[?]");
set_shapes("[?,?,?]", "?");
INFER_ERROR("b has an unknown rank", op, "[];[];?;?");
set_shapes("[?,?,?]", "[?,?]");
INFER_ERROR("must be equal", op, "[];[];?;?");
}
TEST(SparseMatrixOpsTest, SparseMatrixSparseMatMul_ShapeFn) {
ShapeInferenceTestOp op("SparseMatrixSparseMatMul");
std::vector<ShapeInferenceTestOp::ShapeAndType> a_shapes_and_types(1);
std::vector<ShapeInferenceTestOp::ShapeAndType> b_shapes_and_types(1);
a_shapes_and_types[0].second = DT_FLOAT;
b_shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&a_shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(&b_shapes_and_types);
auto set_shapes = [&a_shapes_and_types, &b_shapes_and_types](
const string& a_shape, const string& b_shape) {
a_shapes_and_types[0].first = a_shape;
b_shapes_and_types[0].first = b_shape;
};
auto set_options = [&op](bool transpose_a, bool transpose_b, bool adjoint_a,
bool adjoint_b) {
TF_ASSERT_OK(NodeDefBuilder("test", "SparseMatrixMatMul")
.Input("a", 0, DT_VARIANT)
.Input("b", 1, DT_FLOAT)
.Attr("transpose_a", transpose_a)
.Attr("transpose_b", transpose_b)
.Attr("adjoint_a", adjoint_a)
.Attr("adjoint_b", adjoint_b)
.Finalize(&op.node_def));
};
set_options(false, false, false, false);
set_shapes("?", "?");
INFER_ERROR("has an unknown rank", op, "[];[]");
set_shapes("[?]", "[?,?]");
INFER_ERROR("must be at least rank 2 but is rank 1", op, "[];[]");
set_shapes("[?,?]", "[?,?]");
INFER_OK(op, "[];[]", "[]");
set_shapes("[?,?,?]", "[?,?,?]");
INFER_OK(op, "[];[]", "[]");
set_shapes("[?,3,?]", "[?,?,?]");
INFER_OK(op, "[];[]", "[]");
set_shapes("[?,3,?]", "[?,?,4]");
INFER_OK(op, "[];[]", "[]");
set_shapes("[?,?,6]", "[?,6,?]");
INFER_OK(op, "[];[]", "[]");
set_shapes("[?,?,5]", "[?,6,?]");
INFER_ERROR("must be equal, but are 5 and 6 for", op, "[];[]");
set_options(true, true, false,
false);
set_shapes("[?,?,?]", "[?,?,?]");
INFER_OK(op, "[];[]", "[]");
set_options(false, false, true,
true);
set_shapes("[?,?,?]", "[?,?,?]");
INFER_OK(op, "[];[]", "[]");
set_options(true, false,
true, true);
set_shapes("[?,?,?]", "[?,?,?]");
INFER_ERROR("Only one of adjoint_a and transpose_a", op, "[];[]");
set_options(false, true,
true, true);
set_shapes("[?,?,?]", "[?,?,?]");
INFER_ERROR("Only one of adjoint_b and transpose_b", op, "[];[]");
}
TEST(SparseMatrixOpsTest, SparseMatrixTranspose_ShapeFn) {
ShapeInferenceTestOp op("SparseMatrixTranspose");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types(1);
shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
shapes_and_types[0].first = "[3,4,5]";
INFER_OK(op, "[]", "[]");
shapes_and_types[0].first = "[3,4]";
INFER_OK(op, "[]", "[]");
shapes_and_types[0].first = "?";
INFER_ERROR("input has an unknown rank", op, "[]");
}
TEST(SparseMatrixOpsTest, SparseMatrixSoftmax_ShapeFn) {
ShapeInferenceTestOp op("SparseMatrixSoftmax");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types(1);
shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
shapes_and_types[0].first = "[?,?,?]";
INFER_OK(op, "[]", "[]");
shapes_and_types[0].first = "[?,?]";
INFER_OK(op, "[]", "[]");
shapes_and_types[0].first = "?";
INFER_ERROR("logits has an unknown rank", op, "[]");
}
TEST(SparseMatrixOpsTest, SparseMatrixSoftmaxGrad_ShapeFn) {
ShapeInferenceTestOp op("SparseMatrixSoftmaxGrad");
std::vector<ShapeInferenceTestOp::ShapeAndType> a_shapes_and_types(1);
std::vector<ShapeInferenceTestOp::ShapeAndType> b_shapes_and_types(1);
a_shapes_and_types[0].second = DT_FLOAT;
b_shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&a_shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(&b_shapes_and_types);
auto set_shapes = [&a_shapes_and_types, &b_shapes_and_types](
const string& a_shape, const string& b_shape) {
a_shapes_and_types[0].first = a_shape;
b_shapes_and_types[0].first = b_shape;
};
set_shapes("[?,?,?]", "[?,?,?]");
INFER_OK(op, "[];[]", "[]");
set_shapes("[?,?]", "[?,?]");
INFER_OK(op, "[];[]", "[]");
set_shapes("[3,4]", "[5,6]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 3 and 5", op,
"[];[]");
set_shapes("?", "[?,?]");
INFER_ERROR("softmax has an unknown rank", op, "[];[]");
set_shapes("[?,?,?]", "?");
INFER_ERROR("grad_softmax has an unknown rank", op, "[];[]");
}
TEST(SparseMatrixOpsTest, SparseMatrixMul_ShapeFn) {
ShapeInferenceTestOp op("SparseMatrixMul");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types(1);
shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
shapes_and_types[0].first = "[3,4]";
INFER_OK(op, "[];[]", "[]");
shapes_and_types[0].first = "[5,3,4]";
INFER_OK(op, "[];[?,1,1]", "[]");
shapes_and_types[0].first = "[?,?,?]";
INFER_ERROR("b must be a scalar or shaped [batch_size, 1, 1]", op,
"[];[3,4]");
shapes_and_types[0].first = "[3,4]";
INFER_ERROR("b must be a scalar or shaped", op, "[];[3,4]");
shapes_and_types[0].first = "[3,4,5]";
INFER_ERROR("b must be a scalar or shaped", op, "[];[3,4,5]");
shapes_and_types[0].first = "[3,4,5]";
INFER_ERROR("must be equal, but are 3 and 4", op, "[];[4,1,1]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/sparse_csr_matrix_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/sparse_csr_matrix_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d87c4941-ceea-46ea-a6dd-ad59cb70dad0 | cpp | tensorflow/tensorflow | loop_schedule_linearizer | third_party/xla/xla/service/loop_schedule_linearizer.cc | third_party/xla/xla/service/loop_schedule_linearizer_test.cc | #include "xla/service/loop_schedule_linearizer.h"
#include <memory>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/graphcycles/graphcycles.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class ComputationInstructionOrdering {
public:
explicit ComputationInstructionOrdering(const HloComputation& computation) {
for (const HloInstruction* instr : computation.instructions()) {
for (const HloInstruction* control_pred : instr->control_predecessors()) {
CHECK(this->InsertEdge(*control_pred, *instr))
<< "Graph already contained a cycle";
}
for (int op_id = 0; op_id < instr->operand_count(); op_id++) {
const HloInstruction* op = instr->operand(op_id);
CHECK(this->InsertEdge(*op, *instr))
<< "Graph already contained a cycle";
}
}
}
int32_t NodeIdForInstruction(const HloInstruction& instr) {
int32_t instruction_id = instr.unique_id();
auto it = node_id_to_graph_id_.find(instruction_id);
if (it != node_id_to_graph_id_.end()) {
return it->second;
}
int32_t node_id = graph_cycles_.NewNode();
node_id_to_graph_id_[instruction_id] = node_id;
return node_id;
}
bool InsertEdge(const HloInstruction& source, const HloInstruction& dest) {
int32_t source_id = NodeIdForInstruction(source);
int32_t dest_id = NodeIdForInstruction(dest);
return graph_cycles_.InsertEdge(source_id, dest_id);
}
private:
absl::flat_hash_map<int32_t, int32_t> node_id_to_graph_id_;
GraphCycles graph_cycles_;
};
}
static absl::StatusOr<bool> AddControlEdgesForLoopWrites(
HloInstruction* xla_while, HloAliasAnalysis& alias_analysis) {
HloDataflowAnalysis& dataflow = alias_analysis.dataflow_analysis();
HloComputation* body = xla_while->while_body();
HloInstruction* root = body->root_instruction();
HloInstruction* input = body->parameter_instruction(0);
bool changed = false;
ComputationInstructionOrdering ordering(*body);
ShapeTree<bool> indices_to_copy(&xla_while->shape());
for (auto& p : indices_to_copy) {
const ShapeIndex& index = p.first;
if (index.empty()) {
continue;
}
if (dataflow.GetValueSet(root, index).values().size() > 1 ||
dataflow.GetValueSet(input, index).values().size() > 1) {
VLOG(2) << "Index " << index.ToString() << " is associated with multiple "
<< "values, not attempting to introduce stricter dependencies";
} else {
HloValue& value_at_root = dataflow.GetUniqueValueAt(root, index);
HloValue& value_at_input = dataflow.GetUniqueValueAt(input, index);
if (value_at_root.shape().IsTuple()) {
continue;
}
HloInstruction* write = value_at_root.defining_instruction();
for (const HloUse& use : value_at_input.GetUses()) {
HloInstruction* read = use.instruction;
if (read != write &&
value_at_root != value_at_input
&& read->parent() == write->parent()) {
VLOG(2) << "Inside " << body->name() << ", index "
<< index.ToString();
if (!ordering.InsertEdge(*read, *write)) {
VLOG(2) << "Not adding a control dependency from "
<< read->ToShortString() << " to " << write->ToShortString()
<< " as it would introduce a cycle";
continue;
}
if (!absl::c_linear_search(read->control_successors(), write)) {
TF_RETURN_IF_ERROR(read->AddControlDependencyTo(write));
VLOG(2) << "Adding dependency: " << read->ToShortString()
<< " before " << write->ToShortString();
changed = true;
}
}
}
}
}
return changed;
}
absl::StatusOr<bool> LoopScheduleLinearizer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::unique_ptr<HloAliasAnalysis> alias_analysis;
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kWhile) {
continue;
}
const HloComputation* body = instruction->while_body();
bool has_async_collectives =
absl::c_any_of(body->instructions(), [](const HloInstruction* instr) {
return hlo_query::IsAsyncCollectiveStartOp(
instr, true) ||
hlo_query::IsAsyncCollectiveDoneOp(
instr, true);
});
if (has_async_collectives) {
VLOG(2) << "Skipping " << instruction->name()
<< " since body has async collectives";
continue;
}
if (alias_analysis == nullptr) {
TF_ASSIGN_OR_RETURN(alias_analysis,
HloAliasAnalysis::Run(module, can_share_buffer_));
}
TF_ASSIGN_OR_RETURN(bool updated_loop, AddControlEdgesForLoopWrites(
instruction, *alias_analysis));
changed |= updated_loop;
}
}
return changed;
}
} | #include "xla/service/loop_schedule_linearizer.h"
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/copy_insertion.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
int64_t CountCopies(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
count++;
}
}
return count;
}
int64_t CountCopies(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountCopies(*computation);
}
return count;
}
int64_t CountControlEdges(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
count += instruction->control_successors().size();
}
return count;
}
int64_t CountControlEdges(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountControlEdges(*computation);
}
return count;
}
class LoopScheduleLinearizerTest : public HloTestBase {
protected:
void InsertCopies(HloModule* module, bool expect_change) {
LoopScheduleLinearizer loop_schedule_linearizer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, loop_schedule_linearizer.Run(module));
ASSERT_EQ(changed, expect_change);
CopyInsertion copy_insertion;
ASSERT_IS_OK(copy_insertion.Run(module).status());
}
};
TEST_F(LoopScheduleLinearizerTest, NoExtraCopiesRequired) {
absl::string_view hlo_string = R"(
HloModule module
while_body {
input = (s32[], s32[]) parameter(0)
counter = s32[] get-tuple-element(input), index=0
buffer = s32[] get-tuple-element(input), index=1
one = s32[] constant(1)
updated_counter = s32[] add(counter, one)
updated_buffer = s32[] add(buffer, counter)
ROOT out = (s32[], s32[]) tuple(updated_counter, updated_buffer)
}
while_cond {
input = (s32[], s32[]) parameter(0)
counter = s32[] get-tuple-element(input), index=0
bound = s32[] constant(100)
ROOT cmp = pred[] compare(counter, bound), direction=LT
}
ENTRY entry {
zero = s32[] constant(0)
buffer = s32[] parameter(0)
while_input = (s32[], s32[]) tuple(zero, buffer)
ROOT out = (s32[], s32[]) while(while_input), condition=while_cond, body=while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get(), true);
EXPECT_EQ(CountCopies(
*module->entry_computation()->root_instruction()->while_body()),
0);
EXPECT_EQ(CountControlEdges(
*module->entry_computation()->root_instruction()->while_body()),
1);
}
TEST_F(LoopScheduleLinearizerTest, SkipAsyncCollectives) {
absl::string_view hlo_string = R"(
HloModule module
add {
x = s32[] parameter(0)
y = s32[] parameter(1)
ROOT add = s32[] add(x, y)
}
while_body {
input = (s32[], s32[]) parameter(0)
counter = s32[] get-tuple-element(input), index=0
buffer = s32[] get-tuple-element(input), index=1
one = s32[] constant(1)
updated_counter = s32[] add(counter, one)
updated_buffer = s32[] add(buffer, counter)
ar_start = s32[] all-reduce-start(updated_buffer), replica_groups={}, to_apply=add
ar_done = s32[] all-reduce-done(ar_start)
ROOT out = (s32[], s32[]) tuple(updated_counter, ar_done)
}
while_cond {
input = (s32[], s32[]) parameter(0)
counter = s32[] get-tuple-element(input), index=0
bound = s32[] constant(100)
ROOT cmp = pred[] compare(counter, bound), direction=LT
}
ENTRY entry {
zero = s32[] constant(0)
buffer = s32[] parameter(0)
while_input = (s32[], s32[]) tuple(zero, buffer)
ROOT out = (s32[], s32[]) while(while_input), condition=while_cond, body=while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get(), false);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/loop_schedule_linearizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/loop_schedule_linearizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
07bd49a1-db7e-41b9-92e8-f63855c89c1b | cpp | google/libaddressinput | testdata_source | cpp/test/testdata_source.cc | cpp/test/testdata_source_test.cc | #include "testdata_source.h"
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <map>
#include <string>
namespace i18n {
namespace addressinput {
const char kDataFileName[] = TEST_DATA_DIR "/countryinfo.txt";
namespace {
const char kNormalPrefix = '-';
const char kAggregatePrefix = '+';
const char kDataKeyPrefix[] = "data/";
const size_t kDataKeyPrefixLength = sizeof kDataKeyPrefix - 1;
const size_t kCldrRegionCodeLength = 2;
const size_t kAggregateDataKeyLength =
kDataKeyPrefixLength + kCldrRegionCodeLength;
std::map<std::string, std::string> InitData(const std::string& src_path) {
std::map<std::string, std::string> data;
std::ifstream file(src_path);
if (!file.is_open()) {
std::cerr << "Error opening \"" << src_path << "\"." << '\n';
std::exit(EXIT_FAILURE);
}
const std::string normal_prefix(1, kNormalPrefix);
const std::string aggregate_prefix(1, kAggregatePrefix);
std::string key;
std::string value;
auto last_data_it = data.end();
auto aggregate_data_it = data.end();
while (file.good()) {
std::getline(file, key, '=');
if (!key.empty()) {
std::getline(file, value, '\n');
last_data_it =
data.emplace_hint(last_data_it, normal_prefix + key, value);
if (key.compare(0,
kDataKeyPrefixLength,
kDataKeyPrefix,
kDataKeyPrefixLength) == 0) {
if (aggregate_data_it != data.end() &&
key.compare(0,
kAggregateDataKeyLength,
aggregate_data_it->first,
sizeof kAggregatePrefix,
kAggregateDataKeyLength) == 0) {
aggregate_data_it->second.append(", \"" + key + "\": " + value);
} else {
assert(key.size() == kAggregateDataKeyLength);
if (aggregate_data_it != data.end()) {
aggregate_data_it->second.push_back('}');
}
const std::string& aggregate_key =
aggregate_prefix + key.substr(0, kAggregateDataKeyLength);
aggregate_data_it = data.emplace_hint(
aggregate_data_it, aggregate_key, "{\"" + key + "\": " + value);
}
}
}
}
file.close();
return data;
}
const std::map<std::string, std::string>& GetData(const std::string& src_path) {
static const std::map<std::string, std::string> kData(InitData(src_path));
return kData;
}
}
TestdataSource::TestdataSource(bool aggregate, const std::string& src_path)
: aggregate_(aggregate), src_path_(src_path) {}
TestdataSource::TestdataSource(bool aggregate)
: aggregate_(aggregate), src_path_(kDataFileName) {}
TestdataSource::~TestdataSource() = default;
void TestdataSource::Get(const std::string& key,
const Callback& data_ready) const {
std::string prefixed_key(1, aggregate_ ? kAggregatePrefix : kNormalPrefix);
prefixed_key += key;
auto data_it = GetData(src_path_).find(prefixed_key);
bool success = data_it != GetData(src_path_).end();
std::string* data = nullptr;
if (success) {
data = new std::string(data_it->second);
} else {
success = true;
data = new std::string("{}");
}
data_ready(success, key, data);
}
}
} | #include "testdata_source.h"
#include <libaddressinput/callback.h>
#include <libaddressinput/source.h>
#include <cstddef>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "region_data_constants.h"
namespace {
using i18n::addressinput::BuildCallback;
using i18n::addressinput::kDataFileName;
using i18n::addressinput::RegionDataConstants;
using i18n::addressinput::Source;
using i18n::addressinput::TestdataSource;
class TestdataSourceTest : public testing::TestWithParam<std::string> {
public:
TestdataSourceTest(const TestdataSourceTest&) = delete;
TestdataSourceTest& operator=(const TestdataSourceTest&) = delete;
protected:
TestdataSourceTest()
: source_(false),
source_with_path_(false, kDataFileName),
aggregate_source_(true),
aggregate_source_with_path_(true, kDataFileName),
success_(false),
key_(),
data_(),
data_ready_(BuildCallback(this, &TestdataSourceTest::OnDataReady)) {}
TestdataSource source_;
TestdataSource source_with_path_;
TestdataSource aggregate_source_;
TestdataSource aggregate_source_with_path_;
bool success_;
std::string key_;
std::string data_;
const std::unique_ptr<const Source::Callback> data_ready_;
private:
void OnDataReady(bool success, const std::string& key, std::string* data) {
ASSERT_FALSE(success && data == nullptr);
success_ = success;
key_ = key;
if (data != nullptr) {
data_ = *data;
delete data;
}
}
};
testing::AssertionResult DataIsValid(const std::string& data,
const std::string& key) {
if (data.empty()) {
return testing::AssertionFailure() << "empty data";
}
std::string expected_data_begin = R"({"id":")" + key + R"(")";
if (data.compare(0, expected_data_begin.length(), expected_data_begin) != 0) {
return testing::AssertionFailure()
<< data << " does not begin with " << expected_data_begin;
}
static const char kDataEnd[] = "\"}";
static const size_t kDataEndLength = sizeof kDataEnd - 1;
if (data.compare(data.length() - kDataEndLength,
kDataEndLength,
kDataEnd,
kDataEndLength) != 0) {
return testing::AssertionFailure()
<< data << " does not end with " << kDataEnd;
}
return testing::AssertionSuccess();
}
TEST_P(TestdataSourceTest, TestdataSourceHasValidDataForRegion) {
std::string key = "data/" + GetParam();
source_.Get(key, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(key, key_);
EXPECT_TRUE(DataIsValid(data_, key));
};
TEST_P(TestdataSourceTest, TestdataSourceWithPathHasValidDataForRegion) {
std::string key = "data/" + GetParam();
source_with_path_.Get(key, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(key, key_);
EXPECT_TRUE(DataIsValid(data_, key));
};
testing::AssertionResult AggregateDataIsValid(const std::string& data,
const std::string& key) {
if (data.empty()) {
return testing::AssertionFailure() << "empty data";
}
std::string expected_data_begin = "{\"" + key;
if (data.compare(0, expected_data_begin.length(), expected_data_begin) != 0) {
return testing::AssertionFailure()
<< data << " does not begin with " << expected_data_begin;
}
static const char kDataEnd[] = "\"}}";
static const size_t kDataEndLength = sizeof kDataEnd - 1;
if (data.compare(data.length() - kDataEndLength,
kDataEndLength,
kDataEnd,
kDataEndLength) != 0) {
return testing::AssertionFailure()
<< data << " does not end with " << kDataEnd;
}
return testing::AssertionSuccess();
}
TEST_P(TestdataSourceTest, TestdataSourceHasValidAggregatedDataForRegion) {
std::string key = "data/" + GetParam();
aggregate_source_.Get(key, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(key, key_);
EXPECT_TRUE(AggregateDataIsValid(data_, key));
};
TEST_P(TestdataSourceTest,
TestdataSourceWithPathHasValidAggregatedDataForRegion) {
std::string key = "data/" + GetParam();
aggregate_source_with_path_.Get(key, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(key, key_);
EXPECT_TRUE(AggregateDataIsValid(data_, key));
};
INSTANTIATE_TEST_SUITE_P(
AllRegions, TestdataSourceTest,
testing::ValuesIn(RegionDataConstants::GetRegionCodes()));
TEST_F(TestdataSourceTest, GetExistingData) {
static const std::string kKey = "data";
source_.Get(kKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_TRUE(DataIsValid(data_, kKey));
}
TEST_F(TestdataSourceTest, GetMissingKeyReturnsEmptyDictionary) {
static const std::string kJunkKey = "junk";
source_.Get(kJunkKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kJunkKey, key_);
EXPECT_EQ("{}", data_);
}
TEST_F(TestdataSourceTest, AggregateGetMissingKeyReturnsEmptyDictionary) {
static const std::string kJunkKey = "junk";
aggregate_source_.Get(kJunkKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kJunkKey, key_);
EXPECT_EQ("{}", data_);
}
TEST_F(TestdataSourceTest, GetEmptyKeyReturnsEmptyDictionary) {
static const std::string kEmptyKey;
source_.Get(kEmptyKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kEmptyKey, key_);
EXPECT_EQ("{}", data_);
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/testdata_source.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/testdata_source_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
0fbf4176-79d2-4fec-be4d-07982b247e44 | cpp | google/arolla | optools | arolla/optools/optools.cc | arolla/qexpr/optools_test.cc | #include "arolla/optools/optools.h"
#include <cstddef>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/string.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::optools::optools_impl {
namespace {
class QExprWrappingOperator final : public expr::BackendExprOperatorTag,
public expr::BasicExprOperator {
public:
QExprWrappingOperator(absl::string_view name,
std::vector<OperatorPtr> qexpr_ops,
expr::ExprOperatorSignature signature,
absl::string_view description)
: expr::BasicExprOperator(
name, signature, description,
FingerprintHasher("arolla::optools_impl::QExprWrappingOperator")
.Combine(name, signature)
.Finish()),
qexpr_ops_(std::move(qexpr_ops)) {}
absl::StatusOr<QTypePtr> GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const override {
for (const OperatorPtr& op : qexpr_ops_) {
absl::Span<const QTypePtr> required_qtypes =
op->signature()->input_types();
bool match = true;
for (size_t i = 0; i < input_qtypes.size(); ++i) {
if (input_qtypes[i] != required_qtypes[i]) {
match = false;
break;
}
}
if (match) {
return op->signature()->output_type();
}
}
std::string msg = "no such overload; available signatures: ";
bool is_first = true;
for (const auto& op : qexpr_ops_) {
absl::StrAppend(&msg, op->signature(), NonFirstComma(is_first, ", "));
}
return absl::InvalidArgumentError(msg);
}
private:
std::vector<OperatorPtr> qexpr_ops_;
};
}
absl::Status RegisterFunctionAsOperatorImpl(
absl::string_view name, std::vector<OperatorPtr> qexpr_ops,
expr::ExprOperatorSignature signature, absl::string_view description) {
RETURN_IF_ERROR(expr::ValidateSignature(signature));
if (expr::HasVariadicParameter(signature)) {
return absl::InvalidArgumentError(
"incorrect operator signature: RegisterFunctionAsOperator doesn't "
"support variadic args");
}
if (qexpr_ops.empty()) {
return absl::InvalidArgumentError(
"at least one qexpr operator is required");
}
size_t arg_count = qexpr_ops[0]->signature()->input_types().size();
for (const OperatorPtr& op : qexpr_ops) {
if (op->signature()->input_types().size() != arg_count) {
return absl::InvalidArgumentError(
"arg count must be the same for all overloads");
}
RETURN_IF_ERROR(
::arolla::OperatorRegistry::GetInstance()->RegisterOperator(name, op));
}
if (signature.parameters.empty()) {
signature = expr::ExprOperatorSignature::MakeArgsN(arg_count);
} else if (signature.parameters.size() != arg_count) {
return absl::InvalidArgumentError(
"operator signature doesn't match the function");
}
return expr::RegisterOperator<QExprWrappingOperator>(
name, name, std::move(qexpr_ops), signature, description)
.status();
}
} | #include "arolla/qexpr/optools.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "arolla/qexpr/operator_factory.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
namespace test_namespace {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::Eq;
AROLLA_REGISTER_QEXPR_OPERATOR("optools_test.to_string_from_lambda",
[](int32_t x) {
std::pair<int32_t, int32_t> p(x, x);
return absl::StrCat(p.first);
});
std::string ToString(int32_t x) { return absl::StrCat(x); }
AROLLA_REGISTER_QEXPR_OPERATOR("optools_test.to_string_from_function",
ToString);
template <typename T>
struct ToStringOp {
std::string operator()(T x) const { return absl::StrCat(x); }
};
AROLLA_REGISTER_QEXPR_OPERATOR("optools_test.to_string_from_functor",
ToStringOp<int32_t>());
template <typename T, typename U>
class ToStringOperatorFamily : public arolla::OperatorFamily {
public:
absl::StatusOr<arolla::OperatorPtr> DoGetOperator(
absl::Span<const arolla::QTypePtr> input_types,
arolla::QTypePtr output_type) const final {
if (input_types.size() != 1 || input_types[0] != arolla::GetQType<T>()) {
return absl::InvalidArgumentError(
"the only supported input type is int32");
}
if (output_type != arolla::GetQType<U>()) {
return absl::InvalidArgumentError(
"the only supported output type is string");
}
return arolla::QExprOperatorFromFunction(ToString);
}
};
AROLLA_REGISTER_QEXPR_OPERATOR_FAMILY(
"optools_test.to_string_from_family",
std::make_unique<ToStringOperatorFamily<int32_t, std::string>>());
TEST(OptoolsTest, FromFunction) {
EXPECT_THAT(arolla::InvokeOperator<std::string>(
"optools_test.to_string_from_function", 57),
IsOkAndHolds(Eq("57")));
}
TEST(OptoolsTest, FromLambda) {
EXPECT_THAT(arolla::InvokeOperator<std::string>(
"optools_test.to_string_from_lambda", 57),
IsOkAndHolds(Eq("57")));
}
TEST(OptoolsTest, FromFunctor) {
EXPECT_THAT(arolla::InvokeOperator<std::string>(
"optools_test.to_string_from_functor", 57),
IsOkAndHolds(Eq("57")));
}
TEST(OptoolsTest, FromFamily) {
EXPECT_THAT(arolla::InvokeOperator<std::string>(
"optools_test.to_string_from_family", 57),
IsOkAndHolds(Eq("57")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/optools/optools.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/optools_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
18d40d61-c549-4e2b-ba54-b152b402180e | cpp | tensorflow/tensorflow | client | third_party/xla/xla/client/client.cc | third_party/xla/xla/tests/client_test.cc | #include "xla/client/client.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/execution_options_util.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/layout.h"
#include "xla/literal.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/service.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
Client::Client(Service* stub) : stub_(stub) {}
Client::~Client() = default;
absl::StatusOr<Literal> Client::Transfer(const GlobalData& data,
const Shape* shape_with_layout) {
return stub_->TransferToClient(data, shape_with_layout);
}
absl::StatusOr<std::unique_ptr<GlobalData>> Client::TransferToServer(
const LiteralSlice& literal, const DeviceHandle* device_handle) {
return stub_->TransferToServer(literal, device_handle);
}
absl::Status Client::TransferToInfeed(const LiteralSlice& literal,
int64_t replica_id,
const DeviceHandle* device_handle) {
return stub_->TransferToInfeed(literal, replica_id, device_handle);
}
absl::StatusOr<Literal> Client::TransferFromOutfeed(
const Shape* shape_with_layout, int64_t replica_id,
const DeviceHandle* device_handle) {
return stub_->TransferFromOutfeed(shape_with_layout, replica_id,
device_handle);
}
absl::Status Client::ResetDevice() { return stub_->ResetDevice(); }
absl::StatusOr<Literal> Client::ExecuteAndTransfer(
const XlaComputation& computation, absl::Span<GlobalData* const> arguments,
const ExecutionOptions* execution_options,
ExecutionProfile* execution_profile) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<GlobalData> data,
Execute(computation, arguments, execution_options, execution_profile));
std::optional<Shape> shape_with_output_layout;
if (execution_options && execution_options->has_shape_with_output_layout()) {
shape_with_output_layout =
Shape(execution_options->shape_with_output_layout());
}
return Transfer(*data, shape_with_output_layout.has_value()
? &(*shape_with_output_layout)
: nullptr);
}
absl::StatusOr<Literal> Client::ComputeConstant(
const XlaComputation& computation, const Layout* output_layout) const {
return stub_->ComputeConstantGraph(computation, output_layout);
}
absl::StatusOr<XlaComputation> Client::LoadSnapshot(const HloSnapshot& module) {
TF_RET_CHECK(module.has_hlo() && module.hlo().has_hlo_module());
return XlaComputation(module.hlo().hlo_module());
}
absl::StatusOr<ExecutionHandle> Client::Compile(
const XlaComputation& computation, absl::Span<const Shape> argument_shapes,
const ExecutionOptions* execution_options) {
std::optional<ExecutionOptions> opts;
if (!execution_options) {
opts = CreateDefaultExecutionOptions();
}
return stub_->Compile(computation, argument_shapes,
execution_options ? *execution_options : *opts);
}
absl::StatusOr<std::unique_ptr<GlobalData>> Client::Execute(
const ExecutionHandle& handle, absl::Span<GlobalData* const> arguments,
ExecutionProfile* execution_profile) {
return stub_->Execute(handle, arguments, execution_profile);
}
absl::StatusOr<std::unique_ptr<GlobalData>> Client::Execute(
const XlaComputation& computation, absl::Span<GlobalData* const> arguments,
const ExecutionOptions* execution_options,
ExecutionProfile* execution_profile) {
std::optional<ExecutionOptions> options_storage;
if (!execution_options || execution_options->device_handles().empty()) {
if (execution_options) {
options_storage.emplace(*execution_options);
} else {
options_storage.emplace(CreateDefaultExecutionOptions());
}
execution_options = &*options_storage;
TF_ASSIGN_OR_RETURN(auto device_handles,
GetDeviceHandles(1));
TF_RET_CHECK(!device_handles.empty());
*options_storage->add_device_handles() = std::move(device_handles[0]);
}
std::vector<XlaComputationInstance> computation_instances = {
XlaComputationInstance{
computation,
std::vector<GlobalData*>(arguments.begin(), arguments.end()),
*execution_options, execution_profile}};
VLOG(1) << "Making ExecuteParallel request: "
<< execution_options->DebugString();
TF_ASSIGN_OR_RETURN(auto results, ExecuteParallel(computation_instances));
VLOG(1) << "ExecuteParallel request done.";
for (int64_t i = 0, end = results.size(); i < end; i++) {
TF_ASSIGN_OR_RETURN(const Shape& shape, GetShape(*results[i]));
if (!ShapeUtil::IsEmptyTuple(shape)) {
VLOG(3) << "Fetching result from device " << i << ": "
<< ShapeUtil::HumanString(shape);
return std::move(results[i]);
}
}
TF_RET_CHECK(!results.empty());
VLOG(1) << "Defaulting to device 0 result";
return std::move(results[0]);
}
absl::StatusOr<std::vector<std::unique_ptr<GlobalData>>>
Client::ExecuteParallel(absl::Span<const XlaComputationInstance> computations) {
return stub_->ExecuteGraphParallel(computations);
}
absl::StatusOr<std::vector<DeviceHandle>> Client::GetDeviceHandles(
int64_t device_count) {
if (device_count < 1) {
return InvalidArgument("device_count must be greater than 0");
}
return stub_->GetDeviceHandles(device_count);
}
absl::Status Client::Unregister(const GlobalData& data) {
return stub_->Unregister(data.handle());
}
absl::StatusOr<std::vector<std::unique_ptr<GlobalData>>>
Client::DeconstructTuple(const GlobalData& data) {
return stub_->DeconstructTuple(data);
}
absl::StatusOr<std::unique_ptr<ProgramShape>> Client::GetComputationShape(
const XlaComputation& computation) {
TF_ASSIGN_OR_RETURN(const auto& result, computation.GetProgramShape());
return std::make_unique<ProgramShape>(result);
}
absl::StatusOr<Shape> Client::GetShape(const GlobalData& data) {
return stub_->GetShape(data);
}
absl::StatusOr<ChannelHandle> Client::CreateChannelHandleByType(
ChannelHandle::ChannelType type) {
return stub_->CreateChannelHandle(type);
}
absl::StatusOr<ChannelHandle> Client::CreateChannelHandle() {
return CreateChannelHandleByType(ChannelHandle::DEVICE_TO_DEVICE);
}
absl::StatusOr<ChannelHandle> Client::CreateHostToDeviceChannelHandle() {
return CreateChannelHandleByType(ChannelHandle::HOST_TO_DEVICE);
}
absl::StatusOr<ChannelHandle> Client::CreateDeviceToHostChannelHandle() {
return CreateChannelHandleByType(ChannelHandle::DEVICE_TO_HOST);
}
} | #include <memory>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/client/global_data.h"
#include "xla/client/local_client.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test_helpers.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "xla/tests/test_utils.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class ClientTest : public ClientLibraryTestBase {};
XLA_TEST_F(ClientTest, ExecuteWithLayout) {
XlaBuilder b(TestName());
std::vector<std::vector<int64_t>> layouts = {{0, 1}, {1, 0}};
for (const std::vector<int64_t>& execute_layout : layouts) {
for (const std::vector<int64_t>& transfer_layout : layouts) {
Add(ConstantR2<int32_t>(&b, {{1, 2}, {3, 4}}),
ConstantR2<int32_t>(&b, {{10, 20}, {30, 40}}));
TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build());
ExecutionOptions execution_options = execution_options_;
*execution_options.mutable_shape_with_output_layout() =
ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 2},
execute_layout)
.ToProto();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> data,
client_->Execute(computation, {}, &execution_options));
Literal expected_literal = LiteralUtil::CreateR2WithLayout<int32_t>(
{{11, 22}, {33, 44}}, LayoutUtil::MakeLayout(transfer_layout));
TF_ASSERT_OK_AND_ASSIGN(
auto computed, client_->Transfer(*data, &expected_literal.shape()));
ASSERT_TRUE(LiteralTestUtil::EqualShapesAndLayouts(
expected_literal.shape(), computed.shape()));
EXPECT_TRUE(LiteralTestUtil::Equal(expected_literal, computed));
}
}
}
XLA_TEST_F(ClientTest, ExecuteWithTupleLayout) {
XlaBuilder b(TestName());
Tuple(&b, {ConstantR2<int32_t>(&b, {{1, 2}, {3, 4}}),
ConstantR2<int32_t>(&b, {{10, 20}, {30, 40}})});
TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build());
ExecutionOptions execution_options = execution_options_;
*execution_options.mutable_shape_with_output_layout() =
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 2},
{0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 2},
{1, 0})})
.ToProto();
TF_ASSERT_OK_AND_ASSIGN(
auto result,
client_->ExecuteAndTransfer(computation, {}, &execution_options));
LiteralTestUtil::ExpectR2Equal<int32_t>({{1, 2}, {3, 4}},
LiteralSlice(result, {0}));
LiteralTestUtil::ExpectR2Equal<int32_t>({{10, 20}, {30, 40}},
LiteralSlice(result, {1}));
EXPECT_TRUE(result.shape().IsTuple());
EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.shape()));
EXPECT_TRUE(ShapeUtil::Equal(
ShapeUtil::GetTupleElementShape(result.shape(), 0),
ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 2},
{0, 1})));
EXPECT_TRUE(ShapeUtil::Equal(
ShapeUtil::GetTupleElementShape(result.shape(), 1),
ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 2},
{1, 0})));
}
XLA_TEST_F(ClientTest,
DISABLED_ON_INTERPRETER(DISABLED_ON_GPU(ExecuteParallel))) {
XlaComputation add_with_one_arg, mul_with_two_args, dot_with_one_arg;
Shape shape = ShapeUtil::MakeShape(S32, {2, 2});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> const_arg,
client_->TransferToServer(
LiteralUtil::CreateR2<int32_t>({{5, 6}, {7, 8}})));
XlaBuilder b(TestName() + ".add");
Add(Parameter(&b, 0, shape, "param_0"),
ConstantR2<int32_t>(&b, {{1, 2}, {3, 4}}));
TF_ASSERT_OK_AND_ASSIGN(add_with_one_arg, b.Build());
std::vector<XlaComputationInstance> computation_instances;
TF_ASSERT_OK_AND_ASSIGN(std::vector<xla::DeviceHandle> devices,
client_->GetDeviceHandles(1));
ASSERT_EQ(devices.size(), 1);
ExecutionOptions options = execution_options_;
*options.add_device_handles() = devices[0];
computation_instances.push_back(XlaComputationInstance(
add_with_one_arg, {const_arg.get()}, options, nullptr));
TF_ASSERT_OK_AND_ASSIGN(auto results,
client_->ExecuteParallel(computation_instances));
auto expected_result = LiteralUtil::CreateR2<int32_t>({{6, 8}, {10, 12}});
TF_ASSERT_OK_AND_ASSIGN(
auto result_literal,
client_->Transfer(*results[0], &expected_result.shape()));
EXPECT_TRUE(LiteralTestUtil::Equal(expected_result, result_literal));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/client/client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
93741d2a-fa68-4282-bf83-455ca44616e7 | cpp | tensorflow/tensorflow | host_stream | third_party/xla/xla/stream_executor/host/host_stream.cc | third_party/xla/xla/stream_executor/host/host_stream_test.cc | #include "xla/stream_executor/host/host_stream.h"
#include <string.h>
#include <cfenv>
#include <cstdint>
#include <memory>
#include <queue>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/host/host_event.h"
#include "xla/stream_executor/host/host_kernel.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_common.h"
#include "tsl/platform/denormal.h"
#include "tsl/platform/env.h"
#include "tsl/platform/setround.h"
namespace stream_executor {
namespace host {
HostStream::HostStream(StreamExecutor* executor)
: StreamCommon(executor),
thread_(tsl::Env::Default()->StartThread({}, "host_executor",
[this]() { WorkLoop(); })) {}
HostStream::~HostStream() {
{
absl::MutexLock lock(&mu_);
work_queue_.push(nullptr);
}
thread_.reset();
parent()->DeallocateStream(this);
}
absl::Status HostStream::Memcpy(DeviceMemoryBase* gpu_dst,
const DeviceMemoryBase& gpu_src,
uint64_t size) {
void* dst_mem = gpu_dst->opaque();
void* src_mem = const_cast<void*>(gpu_src.opaque());
EnqueueTask([src_mem, dst_mem, size]() { memcpy(dst_mem, src_mem, size); });
return absl::OkStatus();
}
absl::Status HostStream::Memcpy(void* host_dst, const DeviceMemoryBase& gpu_src,
uint64_t size) {
void* src_mem = const_cast<void*>(gpu_src.opaque());
EnqueueTask([host_dst, src_mem, size]() { memcpy(host_dst, src_mem, size); });
return absl::OkStatus();
}
absl::Status HostStream::Memcpy(DeviceMemoryBase* gpu_dst, const void* host_src,
uint64_t size) {
void* dst_mem = gpu_dst->opaque();
EnqueueTask([dst_mem, host_src, size]() { memcpy(dst_mem, host_src, size); });
return absl::OkStatus();
}
absl::Status HostStream::Memset32(DeviceMemoryBase* location, uint32_t pattern,
uint64_t size) {
void* gpu_mem = location->opaque();
EnqueueTask([gpu_mem, size, pattern]() { memset(gpu_mem, pattern, size); });
return absl::OkStatus();
}
absl::Status HostStream::MemZero(DeviceMemoryBase* location, uint64_t size) {
void* gpu_mem = location->opaque();
EnqueueTask([gpu_mem, size]() { memset(gpu_mem, 0, size); });
return absl::OkStatus();
}
absl::Status HostStream::WaitFor(Stream* other) {
auto event = std::make_shared<absl::Notification>();
static_cast<HostStream*>(other)->EnqueueTask([event]() { event->Notify(); });
EnqueueTask([event]() { event->WaitForNotification(); });
return absl::OkStatus();
}
absl::Status HostStream::WaitFor(Event* event) {
std::shared_ptr<absl::Notification> notification =
static_cast<HostEvent*>(event)->notification();
EnqueueTask([notification]() { notification->WaitForNotification(); });
return absl::OkStatus();
}
bool HostStream::EnqueueTask(absl::AnyInvocable<void() &&> task) {
return EnqueueTaskWithStatus([task = std::move(task)]() mutable {
std::move(task)();
return absl::OkStatus();
});
}
absl::Status HostStream::RecordEvent(Event* event) {
std::shared_ptr<absl::Notification> notification =
static_cast<HostEvent*>(event)->notification();
EnqueueTask([notification]() {
CHECK(!notification->HasBeenNotified());
notification->Notify();
});
return absl::OkStatus();
}
absl::Status HostStream::DoHostCallbackWithStatus(
absl::AnyInvocable<absl::Status() &&> callback) {
if (EnqueueTaskWithStatus(std::move(callback))) {
return absl::OkStatus();
}
return absl::InternalError("Failed to host callback.");
}
bool HostStream::EnqueueTaskWithStatus(
absl::AnyInvocable<absl::Status() &&> task) {
CHECK(task != nullptr);
absl::MutexLock lock(&mu_);
work_queue_.push(std::move(task));
return true;
}
bool HostStream::WorkAvailable() { return !work_queue_.empty(); }
void HostStream::WorkLoop() {
tsl::port::ScopedFlushDenormal flush;
tsl::port::ScopedSetRound round(FE_TONEAREST);
while (true) {
std::queue<absl::AnyInvocable<absl::Status() &&>> queue;
{
absl::MutexLock lock(&mu_);
mu_.Await(absl::Condition(this, &HostStream::WorkAvailable));
std::swap(queue, work_queue_);
}
while (!queue.empty()) {
absl::AnyInvocable<absl::Status() &&>& fn = queue.front();
if (!fn) {
return;
}
status_.Update(std::move(fn)());
queue.pop();
}
}
}
absl::Status HostStream::BlockUntilDone() {
absl::Notification done;
absl::Status status;
EnqueueTask([&done, &status, this]() {
status = status_;
status_ = absl::OkStatus();
done.Notify();
});
done.WaitForNotification();
return status;
}
absl::Status HostStream::Launch(const ThreadDim& thread_dims,
const BlockDim& block_dims,
const Kernel& kernel, const KernelArgs& args) {
const HostKernel* host_kernel = AsHostKernel(&kernel);
const KernelArgsDeviceMemoryArray* device_mem =
DynCast<KernelArgsDeviceMemoryArray>(&args);
if (device_mem != nullptr) {
return host_kernel->Launch(thread_dims, device_mem->device_memory_args());
}
return absl::UnimplementedError(
"Host kernel implements Launch method only for DeviceMemoryArray "
"arguments.");
}
}
} | #include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace se = stream_executor;
TEST(HostStream, EnforcesFIFOOrder) {
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
se::StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
absl::Mutex mu;
int expected = 0;
bool ok = true;
for (int i = 0; i < 2000; ++i) {
TF_ASSERT_OK(stream->DoHostCallback([i, &mu, &expected, &ok]() {
absl::MutexLock lock(&mu);
if (expected != i) {
ok = false;
}
++expected;
}));
}
TF_ASSERT_OK(stream->BlockHostUntilDone());
absl::MutexLock lock(&mu);
EXPECT_TRUE(ok);
}
TEST(HostStream, ReportsHostCallbackError) {
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
se::StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
TF_ASSERT_OK(stream->DoHostCallbackWithStatus(
[]() { return absl::InternalError("error!"); }));
auto status = stream->BlockHostUntilDone();
ASSERT_EQ(status.code(), tsl::error::INTERNAL);
ASSERT_EQ(status.message(), "error!");
}
TEST(HostStream, ReportsFirstHostCallbackError) {
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
se::StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
TF_ASSERT_OK(stream->DoHostCallbackWithStatus(
[]() { return absl::InternalError("error 1"); }));
TF_ASSERT_OK(stream->DoHostCallbackWithStatus(
[]() { return absl::InternalError("error 2"); }));
ASSERT_EQ(stream->BlockHostUntilDone().message(), "error 1");
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/host/host_stream.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/host/host_stream_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8c0d3074-1316-4946-9977-f3d3c6a6b78b | cpp | google/cel-cpp | parser | parser/parser.cc | parser/parser_test.cc | #include "parser/parser.h"
#include <algorithm>
#include <any>
#include <array>
#include <cstddef>
#include <cstdint>
#include <exception>
#include <functional>
#include <iterator>
#include <limits>
#include <map>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/overload.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
#include "antlr4-runtime.h"
#include "common/ast.h"
#include "common/constant.h"
#include "common/expr_factory.h"
#include "common/operators.h"
#include "common/source.h"
#include "extensions/protobuf/internal/ast.h"
#include "internal/lexis.h"
#include "internal/status_macros.h"
#include "internal/strings.h"
#include "internal/utf8.h"
#include "parser/internal/CelBaseVisitor.h"
#include "parser/internal/CelLexer.h"
#include "parser/internal/CelParser.h"
#include "parser/macro.h"
#include "parser/macro_expr_factory.h"
#include "parser/macro_registry.h"
#include "parser/options.h"
#include "parser/source_factory.h"
namespace google::api::expr::parser {
namespace {
class ParserVisitor;
}
}
namespace cel {
namespace {
std::any ExprPtrToAny(std::unique_ptr<Expr>&& expr) {
return std::make_any<Expr*>(expr.release());
}
std::any ExprToAny(Expr&& expr) {
return ExprPtrToAny(std::make_unique<Expr>(std::move(expr)));
}
std::unique_ptr<Expr> ExprPtrFromAny(std::any&& any) {
return absl::WrapUnique(std::any_cast<Expr*>(std::move(any)));
}
Expr ExprFromAny(std::any&& any) {
auto expr = ExprPtrFromAny(std::move(any));
return std::move(*expr);
}
struct ParserError {
std::string message;
SourceRange range;
};
std::string DisplayParserError(const cel::Source& source,
const ParserError& error) {
auto location =
source.GetLocation(error.range.begin).value_or(SourceLocation{});
return absl::StrCat(absl::StrFormat("ERROR: %s:%zu:%zu: %s",
source.description(), location.line,
location.column + 1, error.message),
source.DisplayErrorLocation(location));
}
int32_t PositiveOrMax(int32_t value) {
return value >= 0 ? value : std::numeric_limits<int32_t>::max();
}
SourceRange SourceRangeFromToken(const antlr4::Token* token) {
SourceRange range;
if (token != nullptr) {
if (auto start = token->getStartIndex(); start != INVALID_INDEX) {
range.begin = static_cast<int32_t>(start);
}
if (auto end = token->getStopIndex(); end != INVALID_INDEX) {
range.end = static_cast<int32_t>(end + 1);
}
}
return range;
}
SourceRange SourceRangeFromParserRuleContext(
const antlr4::ParserRuleContext* context) {
SourceRange range;
if (context != nullptr) {
if (auto start = context->getStart() != nullptr
? context->getStart()->getStartIndex()
: INVALID_INDEX;
start != INVALID_INDEX) {
range.begin = static_cast<int32_t>(start);
}
if (auto end = context->getStop() != nullptr
? context->getStop()->getStopIndex()
: INVALID_INDEX;
end != INVALID_INDEX) {
range.end = static_cast<int32_t>(end + 1);
}
}
return range;
}
}
class ParserMacroExprFactory final : public MacroExprFactory {
public:
explicit ParserMacroExprFactory(const cel::Source& source)
: MacroExprFactory(), source_(source) {}
void BeginMacro(SourceRange macro_position) {
macro_position_ = macro_position;
}
void EndMacro() { macro_position_ = SourceRange{}; }
Expr ReportError(absl::string_view message) override {
return ReportError(macro_position_, message);
}
Expr ReportError(int64_t expr_id, absl::string_view message) {
return ReportError(GetSourceRange(expr_id), message);
}
Expr ReportError(SourceRange range, absl::string_view message) {
++error_count_;
if (errors_.size() <= 100) {
errors_.push_back(ParserError{std::string(message), range});
}
return NewUnspecified(NextId(range));
}
Expr ReportErrorAt(const Expr& expr, absl::string_view message) override {
return ReportError(GetSourceRange(expr.id()), message);
}
SourceRange GetSourceRange(int64_t id) const {
if (auto it = positions_.find(id); it != positions_.end()) {
return it->second;
}
return SourceRange{};
}
int64_t NextId(const SourceRange& range) {
auto id = expr_id_++;
if (range.begin != -1 || range.end != -1) {
positions_.insert(std::pair{id, range});
}
return id;
}
bool HasErrors() const { return error_count_ != 0; }
std::string ErrorMessage() {
std::stable_sort(
errors_.begin(), errors_.end(),
[](const ParserError& lhs, const ParserError& rhs) -> bool {
auto lhs_begin = PositiveOrMax(lhs.range.begin);
auto lhs_end = PositiveOrMax(lhs.range.end);
auto rhs_begin = PositiveOrMax(rhs.range.begin);
auto rhs_end = PositiveOrMax(rhs.range.end);
return lhs_begin < rhs_begin ||
(lhs_begin == rhs_begin && lhs_end < rhs_end);
});
bool errors_truncated = error_count_ > 100;
std::vector<std::string> messages;
messages.reserve(
errors_.size() +
errors_truncated);
std::transform(errors_.begin(), errors_.end(), std::back_inserter(messages),
[this](const ParserError& error) {
return cel::DisplayParserError(source_, error);
});
if (errors_truncated) {
messages.emplace_back(
absl::StrCat(error_count_ - 100, " more errors were truncated."));
}
return absl::StrJoin(messages, "\n");
}
void AddMacroCall(int64_t macro_id, absl::string_view function,
absl::optional<Expr> target, std::vector<Expr> arguments) {
macro_calls_.insert(
{macro_id, target.has_value()
? NewMemberCall(0, function, std::move(*target),
std::move(arguments))
: NewCall(0, function, std::move(arguments))});
}
Expr BuildMacroCallArg(const Expr& expr) {
if (auto it = macro_calls_.find(expr.id()); it != macro_calls_.end()) {
return NewUnspecified(expr.id());
}
return absl::visit(
absl::Overload(
[this, &expr](const UnspecifiedExpr&) -> Expr {
return NewUnspecified(expr.id());
},
[this, &expr](const Constant& const_expr) -> Expr {
return NewConst(expr.id(), const_expr);
},
[this, &expr](const IdentExpr& ident_expr) -> Expr {
return NewIdent(expr.id(), ident_expr.name());
},
[this, &expr](const SelectExpr& select_expr) -> Expr {
return select_expr.test_only()
? NewPresenceTest(
expr.id(),
BuildMacroCallArg(select_expr.operand()),
select_expr.field())
: NewSelect(expr.id(),
BuildMacroCallArg(select_expr.operand()),
select_expr.field());
},
[this, &expr](const CallExpr& call_expr) -> Expr {
std::vector<Expr> macro_arguments;
macro_arguments.reserve(call_expr.args().size());
for (const auto& argument : call_expr.args()) {
macro_arguments.push_back(BuildMacroCallArg(argument));
}
absl::optional<Expr> macro_target;
if (call_expr.has_target()) {
macro_target = BuildMacroCallArg(call_expr.target());
}
return macro_target.has_value()
? NewMemberCall(expr.id(), call_expr.function(),
std::move(*macro_target),
std::move(macro_arguments))
: NewCall(expr.id(), call_expr.function(),
std::move(macro_arguments));
},
[this, &expr](const ListExpr& list_expr) -> Expr {
std::vector<ListExprElement> macro_elements;
macro_elements.reserve(list_expr.elements().size());
for (const auto& element : list_expr.elements()) {
auto& cloned_element = macro_elements.emplace_back();
if (element.has_expr()) {
cloned_element.set_expr(BuildMacroCallArg(element.expr()));
}
cloned_element.set_optional(element.optional());
}
return NewList(expr.id(), std::move(macro_elements));
},
[this, &expr](const StructExpr& struct_expr) -> Expr {
std::vector<StructExprField> macro_fields;
macro_fields.reserve(struct_expr.fields().size());
for (const auto& field : struct_expr.fields()) {
auto& macro_field = macro_fields.emplace_back();
macro_field.set_id(field.id());
macro_field.set_name(field.name());
macro_field.set_value(BuildMacroCallArg(field.value()));
macro_field.set_optional(field.optional());
}
return NewStruct(expr.id(), struct_expr.name(),
std::move(macro_fields));
},
[this, &expr](const MapExpr& map_expr) -> Expr {
std::vector<MapExprEntry> macro_entries;
macro_entries.reserve(map_expr.entries().size());
for (const auto& entry : map_expr.entries()) {
auto& macro_entry = macro_entries.emplace_back();
macro_entry.set_id(entry.id());
macro_entry.set_key(BuildMacroCallArg(entry.key()));
macro_entry.set_value(BuildMacroCallArg(entry.value()));
macro_entry.set_optional(entry.optional());
}
return NewMap(expr.id(), std::move(macro_entries));
},
[this, &expr](const ComprehensionExpr& comprehension_expr) -> Expr {
return NewComprehension(
expr.id(), comprehension_expr.iter_var(),
BuildMacroCallArg(comprehension_expr.iter_range()),
comprehension_expr.accu_var(),
BuildMacroCallArg(comprehension_expr.accu_init()),
BuildMacroCallArg(comprehension_expr.loop_condition()),
BuildMacroCallArg(comprehension_expr.loop_step()),
BuildMacroCallArg(comprehension_expr.result()));
}),
expr.kind());
}
using ExprFactory::NewBoolConst;
using ExprFactory::NewBytesConst;
using ExprFactory::NewCall;
using ExprFactory::NewComprehension;
using ExprFactory::NewConst;
using ExprFactory::NewDoubleConst;
using ExprFactory::NewIdent;
using ExprFactory::NewIntConst;
using ExprFactory::NewList;
using ExprFactory::NewListElement;
using ExprFactory::NewMap;
using ExprFactory::NewMapEntry;
using ExprFactory::NewMemberCall;
using ExprFactory::NewNullConst;
using ExprFactory::NewPresenceTest;
using ExprFactory::NewSelect;
using ExprFactory::NewStringConst;
using ExprFactory::NewStruct;
using ExprFactory::NewStructField;
using ExprFactory::NewUintConst;
using ExprFactory::NewUnspecified;
const absl::btree_map<int64_t, SourceRange>& positions() const {
return positions_;
}
const absl::flat_hash_map<int64_t, Expr>& macro_calls() const {
return macro_calls_;
}
void EraseId(ExprId id) {
positions_.erase(id);
if (expr_id_ == id + 1) {
--expr_id_;
}
}
protected:
int64_t NextId() override { return NextId(macro_position_); }
int64_t CopyId(int64_t id) override {
if (id == 0) {
return 0;
}
return NextId(GetSourceRange(id));
}
private:
int64_t expr_id_ = 1;
absl::btree_map<int64_t, SourceRange> positions_;
absl::flat_hash_map<int64_t, Expr> macro_calls_;
std::vector<ParserError> errors_;
size_t error_count_ = 0;
const Source& source_;
SourceRange macro_position_;
};
}
namespace google::api::expr::parser {
namespace {
using ::antlr4::CharStream;
using ::antlr4::CommonTokenStream;
using ::antlr4::DefaultErrorStrategy;
using ::antlr4::ParseCancellationException;
using ::antlr4::Parser;
using ::antlr4::ParserRuleContext;
using ::antlr4::Token;
using ::antlr4::misc::IntervalSet;
using ::antlr4::tree::ErrorNode;
using ::antlr4::tree::ParseTreeListener;
using ::antlr4::tree::TerminalNode;
using ::cel::Expr;
using ::cel::ExprFromAny;
using ::cel::ExprKind;
using ::cel::ExprToAny;
using ::cel::IdentExpr;
using ::cel::ListExprElement;
using ::cel::MapExprEntry;
using ::cel::SelectExpr;
using ::cel::SourceRangeFromParserRuleContext;
using ::cel::SourceRangeFromToken;
using ::cel::StructExprField;
using ::cel_parser_internal::CelBaseVisitor;
using ::cel_parser_internal::CelLexer;
using ::cel_parser_internal::CelParser;
using common::CelOperator;
using common::ReverseLookupOperator;
using ::google::api::expr::v1alpha1::ParsedExpr;
class CodePointStream final : public CharStream {
public:
CodePointStream(cel::SourceContentView buffer, absl::string_view source_name)
: buffer_(buffer),
source_name_(source_name),
size_(buffer_.size()),
index_(0) {}
void consume() override {
if (ABSL_PREDICT_FALSE(index_ >= size_)) {
ABSL_ASSERT(LA(1) == IntStream::EOF);
throw antlr4::IllegalStateException("cannot consume EOF");
}
index_++;
}
size_t LA(ssize_t i) override {
if (ABSL_PREDICT_FALSE(i == 0)) {
return 0;
}
auto p = static_cast<ssize_t>(index_);
if (i < 0) {
i++;
if (p + i - 1 < 0) {
return IntStream::EOF;
}
}
if (p + i - 1 >= static_cast<ssize_t>(size_)) {
return IntStream::EOF;
}
return buffer_.at(static_cast<size_t>(p + i - 1));
}
ssize_t mark() override { return -1; }
void release(ssize_t marker) override {}
size_t index() override { return index_; }
void seek(size_t index) override { index_ = std::min(index, size_); }
size_t size() override { return size_; }
std::string getSourceName() const override {
return source_name_.empty() ? IntStream::UNKNOWN_SOURCE_NAME
: std::string(source_name_);
}
std::string getText(const antlr4::misc::Interval& interval) override {
if (ABSL_PREDICT_FALSE(interval.a < 0 || interval.b < 0)) {
return std::string();
}
size_t start = static_cast<size_t>(interval.a);
if (ABSL_PREDICT_FALSE(start >= size_)) {
return std::string();
}
size_t stop = static_cast<size_t>(interval.b);
if (ABSL_PREDICT_FALSE(stop >= size_)) {
stop = size_ - 1;
}
return buffer_.ToString(static_cast<cel::SourcePosition>(start),
static_cast<cel::SourcePosition>(stop) + 1);
}
std::string toString() const override { return buffer_.ToString(); }
private:
cel::SourceContentView const buffer_;
const absl::string_view source_name_;
const size_t size_;
size_t index_;
};
class ScopedIncrement final {
public:
explicit ScopedIncrement(int& recursion_depth)
: recursion_depth_(recursion_depth) {
++recursion_depth_;
}
~ScopedIncrement() { --recursion_depth_; }
private:
int& recursion_depth_;
};
class ExpressionBalancer final {
public:
ExpressionBalancer(cel::ParserMacroExprFactory& factory, std::string function,
Expr expr);
void AddTerm(int64_t op, Expr term);
Expr Balance();
private:
Expr BalancedTree(int lo, int hi);
private:
cel::ParserMacroExprFactory& factory_;
std::string function_;
std::vector<Expr> terms_;
std::vector<int64_t> ops_;
};
ExpressionBalancer::ExpressionBalancer(cel::ParserMacroExprFactory& factory,
std::string function, Expr expr)
: factory_(factory), function_(std::move(function)) {
terms_.push_back(std::move(expr));
}
void ExpressionBalancer::AddTerm(int64_t op, Expr term) {
terms_.push_back(std::move(term));
ops_.push_back(op);
}
Expr ExpressionBalancer::Balance() {
if (terms_.size() == 1) {
return std::move(terms_[0]);
}
return BalancedTree(0, ops_.size() - 1);
}
Expr ExpressionBalancer::BalancedTree(int lo, int hi) {
int mid = (lo + hi + 1) / 2;
std::vector<Expr> arguments;
arguments.reserve(2);
if (mid == lo) {
arguments.push_back(std::move(terms_[mid]));
} else {
arguments.push_back(BalancedTree(lo, mid - 1));
}
if (mid == hi) {
arguments.push_back(std::move(terms_[mid + 1]));
} else {
arguments.push_back(BalancedTree(mid + 1, hi));
}
return factory_.NewCall(ops_[mid], function_, std::move(arguments));
}
class ParserVisitor final : public CelBaseVisitor,
public antlr4::BaseErrorListener {
public:
ParserVisitor(const cel::Source& source, int max_recursion_depth,
const cel::MacroRegistry& macro_registry,
bool add_macro_calls = false,
bool enable_optional_syntax = false);
~ParserVisitor() override;
std::any visit(antlr4::tree::ParseTree* tree) override;
std::any visitStart(CelParser::StartContext* ctx) override;
std::any visitExpr(CelParser::ExprContext* ctx) override;
std::any visitConditionalOr(CelParser::ConditionalOrContext* ctx) override;
std::any visitConditionalAnd(CelParser::ConditionalAndContext* ctx) override;
std::any visitRelation(CelParser::RelationContext* ctx) override;
std::any visitCalc(CelParser::CalcContext* ctx) override;
std::any visitUnary(CelParser::UnaryContext* ctx);
std::any visitLogicalNot(CelParser::LogicalNotContext* ctx) override;
std::any visitNegate(CelParser::NegateContext* ctx) override;
std::any visitSelect(CelParser::SelectContext* ctx) override;
std::any visitMemberCall(CelParser::MemberCallContext* ctx) override;
std::any visitIndex(CelParser::IndexContext* ctx) override;
std::any visitCreateMessage(CelParser::CreateMessageContext* ctx) override;
std::any visitFieldInitializerList(
CelParser::FieldInitializerListContext* ctx) override;
std::vector<StructExprField> visitFields(
CelParser::FieldInitializerListContext* ctx);
std::any visitIdentOrGlobalCall(
CelParser::IdentOrGlobalCallContext* ctx) override;
std::any visitNested(CelParser::NestedContext* ctx) override;
std::any visitCreateList(CelParser::CreateListContext* ctx) override;
std::vector<ListExprElement> visitList(CelParser::ListInitContext* ctx);
std::vector<Expr> visitList(CelParser::ExprListContext* ctx);
std::any visitCreateStruct(CelParser::CreateStructContext* ctx) override;
std::any visitConstantLiteral(
CelParser::ConstantLiteralContext* ctx) override;
std::any visitPrimaryExpr(CelParser::PrimaryExprContext* ctx) override;
std::any visitMemberExpr(CelParser::MemberExprContext* ctx) override;
std::any visitMapInitializerList(
CelParser::MapInitializerListContext* ctx) override;
std::vector<MapExprEntry> visitEntries(
CelParser::MapInitializerListContext* ctx);
std::any visitInt(CelParser::IntContext* ctx) override;
std::any visitUint(CelParser::UintContext* ctx) override;
std::any visitDouble(CelParser::DoubleContext* ctx) override;
std::any visitString(CelParser::StringContext* ctx) override;
std::any visitBytes(CelParser::BytesContext* ctx) override;
std::any visitBoolTrue(CelParser::BoolTrueContext* ctx) override;
std::any visitBoolFalse(CelParser::BoolFalseContext* ctx) override;
std::any visitNull(CelParser::NullContext* ctx) override;
absl::Status GetSourceInfo(google::api::expr::v1alpha1::SourceInfo* source_info) const;
EnrichedSourceInfo enriched_source_info() const;
void syntaxError(antlr4::Recognizer* recognizer,
antlr4::Token* offending_symbol, size_t line, size_t col,
const std::string& msg, std::exception_ptr e) override;
bool HasErrored() const;
std::string ErrorMessage();
private:
template <typename... Args>
Expr GlobalCallOrMacro(int64_t expr_id, absl::string_view function,
Args&&... args) {
std::vector<Expr> arguments;
arguments.reserve(sizeof...(Args));
(arguments.push_back(std::forward<Args>(args)), ...);
return GlobalCallOrMacroImpl(expr_id, function, std::move(arguments));
}
template <typename... Args>
Expr ReceiverCallOrMacro(int64_t expr_id, absl::string_view function,
Expr target, Args&&... args) {
std::vector<Expr> arguments;
arguments.reserve(sizeof...(Args));
(arguments.push_back(std::forward<Args>(args)), ...);
return ReceiverCallOrMacroImpl(expr_id, function, std::move(target),
std::move(arguments));
}
Expr GlobalCallOrMacroImpl(int64_t expr_id, absl::string_view function,
std::vector<Expr> args);
Expr ReceiverCallOrMacroImpl(int64_t expr_id, absl::string_view function,
Expr target, std::vector<Expr> args);
std::string ExtractQualifiedName(antlr4::ParserRuleContext* ctx,
const Expr& e);
antlr4::tree::ParseTree* UnnestContext(antlr4::tree::ParseTree* tree);
private:
const cel::Source& source_;
cel::ParserMacroExprFactory factory_;
const cel::MacroRegistry& macro_registry_;
int recursion_depth_;
const int max_recursion_depth_;
const bool add_macro_calls_;
const bool enable_optional_syntax_;
};
ParserVisitor::ParserVisitor(const cel::Source& source,
const int max_recursion_depth,
const cel::MacroRegistry& macro_registry,
const bool add_macro_calls,
bool enable_optional_syntax)
: source_(source),
factory_(source_),
macro_registry_(macro_registry),
recursion_depth_(0),
max_recursion_depth_(max_recursion_depth),
add_macro_calls_(add_macro_calls),
enable_optional_syntax_(enable_optional_syntax) {}
ParserVisitor::~ParserVisitor() {}
template <typename T, typename = std::enable_if_t<
std::is_base_of<antlr4::tree::ParseTree, T>::value>>
T* tree_as(antlr4::tree::ParseTree* tree) {
return dynamic_cast<T*>(tree);
}
std::any ParserVisitor::visit(antlr4::tree::ParseTree* tree) {
ScopedIncrement inc(recursion_depth_);
if (recursion_depth_ > max_recursion_depth_) {
return ExprToAny(factory_.ReportError(
absl::StrFormat("Exceeded max recursion depth of %d when parsing.",
max_recursion_depth_)));
}
tree = UnnestContext(tree);
if (auto* ctx = tree_as<CelParser::StartContext>(tree)) {
return visitStart(ctx);
} else if (auto* ctx = tree_as<CelParser::ExprContext>(tree)) {
return visitExpr(ctx);
} else if (auto* ctx = tree_as<CelParser::ConditionalAndContext>(tree)) {
return visitConditionalAnd(ctx);
} else if (auto* ctx = tree_as<CelParser::ConditionalOrContext>(tree)) {
return visitConditionalOr(ctx);
} else if (auto* ctx = tree_as<CelParser::RelationContext>(tree)) {
return visitRelation(ctx);
} else if (auto* ctx = tree_as<CelParser::CalcContext>(tree)) {
return visitCalc(ctx);
} else if (auto* ctx = tree_as<CelParser::LogicalNotContext>(tree)) {
return visitLogicalNot(ctx);
} else if (auto* ctx = tree_as<CelParser::PrimaryExprContext>(tree)) {
return visitPrimaryExpr(ctx);
} else if (auto* ctx = tree_as<CelParser::MemberExprContext>(tree)) {
return visitMemberExpr(ctx);
} else if (auto* ctx = tree_as<CelParser::SelectContext>(tree)) {
return visitSelect(ctx);
} else if (auto* ctx = tree_as<CelParser::MemberCallContext>(tree)) {
return visitMemberCall(ctx);
} else if (auto* ctx = tree_as<CelParser::MapInitializerListContext>(tree)) {
return visitMapInitializerList(ctx);
} else if (auto* ctx = tree_as<CelParser::NegateContext>(tree)) {
return visitNegate(ctx);
} else if (auto* ctx = tree_as<CelParser::IndexContext>(tree)) {
return visitIndex(ctx);
} else if (auto* ctx = tree_as<CelParser::UnaryContext>(tree)) {
return visitUnary(ctx);
} else if (auto* ctx = tree_as<CelParser::CreateListContext>(tree)) {
return visitCreateList(ctx);
} else if (auto* ctx = tree_as<CelParser::CreateMessageContext>(tree)) {
return visitCreateMessage(ctx);
} else if (auto* ctx = tree_as<CelParser::CreateStructContext>(tree)) {
return visitCreateStruct(ctx);
}
if (tree) {
return ExprToAny(
factory_.ReportError(SourceRangeFromParserRuleContext(
tree_as<antlr4::ParserRuleContext>(tree)),
"unknown parsetree type"));
}
return ExprToAny(factory_.ReportError("<<nil>> parsetree"));
}
std::any ParserVisitor::visitPrimaryExpr(CelParser::PrimaryExprContext* pctx) {
CelParser::PrimaryContext* primary = pctx->primary();
if (auto* ctx = tree_as<CelParser::NestedContext>(primary)) {
return visitNested(ctx);
} else if (auto* ctx =
tree_as<CelParser::IdentOrGlobalCallContext>(primary)) {
return visitIdentOrGlobalCall(ctx);
} else if (auto* ctx = tree_as<CelParser::CreateListContext>(primary)) {
return visitCreateList(ctx);
} else if (auto* ctx = tree_as<CelParser::CreateStructContext>(primary)) {
return visitCreateStruct(ctx);
} else if (auto* ctx = tree_as<CelParser::CreateMessageContext>(primary)) {
return visitCreateMessage(ctx);
} else if (auto* ctx = tree_as<CelParser::ConstantLiteralContext>(primary)) {
return visitConstantLiteral(ctx);
}
if (factory_.HasErrors()) {
return ExprToAny(factory_.NewUnspecified(factory_.NextId({})));
}
return ExprToAny(factory_.ReportError(SourceRangeFromParserRuleContext(pctx),
"invalid primary expression"));
}
std::any ParserVisitor::visitMemberExpr(CelParser::MemberExprContext* mctx) {
CelParser::MemberContext* member = mctx->member();
if (auto* ctx = tree_as<CelParser::PrimaryExprContext>(member)) {
return visitPrimaryExpr(ctx);
} else if (auto* ctx = tree_as<CelParser::SelectContext>(member)) {
return visitSelect(ctx);
} else if (auto* ctx = tree_as<CelParser::MemberCallContext>(member)) {
return visitMemberCall(ctx);
} else if (auto* ctx = tree_as<CelParser::IndexContext>(member)) {
return visitIndex(ctx);
}
return ExprToAny(factory_.ReportError(SourceRangeFromParserRuleContext(mctx),
"unsupported simple expression"));
}
std::any ParserVisitor::visitStart(CelParser::StartContext* ctx) {
return visit(ctx->expr());
}
antlr4::tree::ParseTree* ParserVisitor::UnnestContext(
antlr4::tree::ParseTree* tree) {
antlr4::tree::ParseTree* last = nullptr;
while (tree != last) {
last = tree;
if (auto* ctx = tree_as<CelParser::StartContext>(tree)) {
tree = ctx->expr();
}
if (auto* ctx = tree_as<CelParser::ExprContext>(tree)) {
if (ctx->op != nullptr) {
return ctx;
}
tree = ctx->e;
}
if (auto* ctx = tree_as<CelParser::ConditionalOrContext>(tree)) {
if (!ctx->ops.empty()) {
return ctx;
}
tree = ctx->e;
}
if (auto* ctx = tree_as<CelParser::ConditionalAndContext>(tree)) {
if (!ctx->ops.empty()) {
return ctx;
}
tree = ctx->e;
}
if (auto* ctx = tree_as<CelParser::RelationContext>(tree)) {
if (ctx->calc() == nullptr) {
return ctx;
}
tree = ctx->calc();
}
if (auto* ctx = tree_as<CelParser::CalcContext>(tree)) {
if (ctx->unary() == nullptr) {
return ctx;
}
tree = ctx->unary();
}
if (auto* ctx = tree_as<CelParser::MemberExprContext>(tree)) {
tree = ctx->member();
}
if (auto* ctx = tree_as<CelParser::PrimaryExprContext>(tree)) {
if (auto* nested = tree_as<CelParser::NestedContext>(ctx->primary())) {
tree = nested->e;
} else {
return ctx;
}
}
}
return tree;
}
std::any ParserVisitor::visitExpr(CelParser::ExprContext* ctx) {
auto result = ExprFromAny(visit(ctx->e));
if (!ctx->op) {
return ExprToAny(std::move(result));
}
std::vector<Expr> arguments;
arguments.reserve(3);
arguments.push_back(std::move(result));
int64_t op_id = factory_.NextId(SourceRangeFromToken(ctx->op));
arguments.push_back(ExprFromAny(visit(ctx->e1)));
arguments.push_back(ExprFromAny(visit(ctx->e2)));
return ExprToAny(
factory_.NewCall(op_id, CelOperator::CONDITIONAL, std::move(arguments)));
}
std::any ParserVisitor::visitConditionalOr(
CelParser::ConditionalOrContext* ctx) {
auto result = ExprFromAny(visit(ctx->e));
if (ctx->ops.empty()) {
return ExprToAny(std::move(result));
}
ExpressionBalancer b(factory_, CelOperator::LOGICAL_OR, std::move(result));
for (size_t i = 0; i < ctx->ops.size(); ++i) {
auto op = ctx->ops[i];
if (i >= ctx->e1.size()) {
return ExprToAny(
factory_.ReportError(SourceRangeFromParserRuleContext(ctx),
"unexpected character, wanted '||'"));
}
auto next = ExprFromAny(visit(ctx->e1[i]));
int64_t op_id = factory_.NextId(SourceRangeFromToken(op));
b.AddTerm(op_id, std::move(next));
}
return ExprToAny(b.Balance());
}
std::any ParserVisitor::visitConditionalAnd(
CelParser::ConditionalAndContext* ctx) {
auto result = ExprFromAny(visit(ctx->e));
if (ctx->ops.empty()) {
return ExprToAny(std::move(result));
}
ExpressionBalancer b(factory_, CelOperator::LOGICAL_AND, std::move(result));
for (size_t i = 0; i < ctx->ops.size(); ++i) {
auto op = ctx->ops[i];
if (i >= ctx->e1.size()) {
return ExprToAny(
factory_.ReportError(SourceRangeFromParserRuleContext(ctx),
"unexpected character, wanted '&&'"));
}
auto next = ExprFromAny(visit(ctx->e1[i]));
int64_t op_id = factory_.NextId(SourceRangeFromToken(op));
b.AddTerm(op_id, std::move(next));
}
return ExprToAny(b.Balance());
}
std::any ParserVisitor::visitRelation(CelParser::RelationContext* ctx) {
if (ctx->calc()) {
return visit(ctx->calc());
}
std::string op_text;
if (ctx->op) {
op_text = ctx->op->getText();
}
auto op = ReverseLookupOperator(op_text);
if (op) {
auto lhs = ExprFromAny(visit(ctx->relation(0)));
int64_t op_id = factory_.NextId(SourceRangeFromToken(ctx->op));
auto rhs = ExprFromAny(visit(ctx->relation(1)));
return ExprToAny(
GlobalCallOrMacro(op_id, *op, std::move(lhs), std::move(rhs)));
}
return ExprToAny(factory_.ReportError(SourceRangeFromParserRuleContext(ctx),
"operator not found"));
}
std::any ParserVisitor::visitCalc(CelParser::CalcContext* ctx) {
if (ctx->unary()) {
return visit(ctx->unary());
}
std::string op_text;
if (ctx->op) {
op_text = ctx->op->getText();
}
auto op = ReverseLookupOperator(op_text);
if (op) {
auto lhs = ExprFromAny(visit(ctx->calc(0)));
int64_t op_id = factory_.NextId(SourceRangeFromToken(ctx->op));
auto rhs = ExprFromAny(visit(ctx->calc(1)));
return ExprToAny(
GlobalCallOrMacro(op_id, *op, std::move(lhs), std::move(rhs)));
}
return ExprToAny(factory_.ReportError(SourceRangeFromParserRuleContext(ctx),
"operator not found"));
}
std::any ParserVisitor::visitUnary(CelParser::UnaryContext* ctx) {
return ExprToAny(factory_.NewStringConst(
factory_.NextId(SourceRangeFromParserRuleContext(ctx)), "<<error>>"));
}
std::any ParserVisitor::visitLogicalNot(CelParser::LogicalNotContext* ctx) {
if (ctx->ops.size() % 2 == 0) {
return visit(ctx->member());
}
int64_t op_id = factory_.NextId(SourceRangeFromToken(ctx->ops[0]));
auto target = ExprFromAny(visit(ctx->member()));
return ExprToAny(
GlobalCallOrMacro(op_id, CelOperator::LOGICAL_NOT, std::move(target)));
}
std::any ParserVisitor::visitNegate(CelParser::NegateContext* ctx) {
if (ctx->ops.size() % 2 == 0) {
return visit(ctx->member());
}
int64_t op_id = factory_.NextId(SourceRangeFromToken(ctx->ops[0]));
auto target = ExprFromAny(visit(ctx->member()));
return ExprToAny(
GlobalCallOrMacro(op_id, CelOperator::NEGATE, std::move(target)));
}
std::any ParserVisitor::visitSelect(CelParser::SelectContext* ctx) {
auto operand = ExprFromAny(visit(ctx->member()));
if (!ctx->id || !ctx->op) {
return ExprToAny(factory_.NewUnspecified(
factory_.NextId(SourceRangeFromParserRuleContext(ctx))));
}
auto id = ctx->id->getText();
if (ctx->opt != nullptr) {
if (!enable_optional_syntax_) {
return ExprToAny(factory_.ReportError(
SourceRangeFromParserRuleContext(ctx), "unsupported syntax '.?'"));
}
auto op_id = factory_.NextId(SourceRangeFromToken(ctx->op));
std::vector<Expr> arguments;
arguments.reserve(2);
arguments.push_back(std::move(operand));
arguments.push_back(factory_.NewStringConst(
factory_.NextId(SourceRangeFromParserRuleContext(ctx)), std::move(id)));
return ExprToAny(factory_.NewCall(op_id, "_?._", std::move(arguments)));
}
return ExprToAny(
factory_.NewSelect(factory_.NextId(SourceRangeFromToken(ctx->op)),
std::move(operand), std::move(id)));
}
std::any ParserVisitor::visitMemberCall(CelParser::MemberCallContext* ctx) {
auto operand = ExprFromAny(visit(ctx->member()));
if (!ctx->id) {
return ExprToAny(factory_.NewUnspecified(
factory_.NextId(SourceRangeFromParserRuleContext(ctx))));
}
auto id = ctx->id->getText();
int64_t op_id = factory_.NextId(SourceRangeFromToken(ctx->open));
auto args = visitList(ctx->args);
return ExprToAny(
ReceiverCallOrMacroImpl(op_id, id, std::move(operand), std::move(args)));
}
std::any ParserVisitor::visitIndex(CelParser::IndexContext* ctx) {
auto target = ExprFromAny(visit(ctx->member()));
int64_t op_id = factory_.NextId(SourceRangeFromToken(ctx->op));
auto index = ExprFromAny(visit(ctx->index));
if (!enable_optional_syntax_ && ctx->opt != nullptr) {
return ExprToAny(factory_.ReportError(SourceRangeFromParserRuleContext(ctx),
"unsupported syntax '.?'"));
}
return ExprToAny(GlobalCallOrMacro(
op_id, ctx->opt != nullptr ? "_[?_]" : CelOperator::INDEX,
std::move(target), std::move(index)));
}
std::any ParserVisitor::visitCreateMessage(
CelParser::CreateMessageContext* ctx) {
std::vector<std::string> parts;
parts.reserve(ctx->ids.size());
for (const auto* id : ctx->ids) {
parts.push_back(id->getText());
}
std::string name;
if (ctx->leadingDot) {
name.push_back('.');
name.append(absl::StrJoin(parts, "."));
} else {
name = absl::StrJoin(parts, ".");
}
int64_t obj_id = factory_.NextId(SourceRangeFromToken(ctx->op));
std::vector<StructExprField> fields;
if (ctx->entries) {
fields = visitFields(ctx->entries);
}
return ExprToAny(
factory_.NewStruct(obj_id, std::move(name), std::move(fields)));
}
std::any ParserVisitor::visitFieldInitializerList(
CelParser::FieldInitializerListContext* ctx) {
return ExprToAny(factory_.ReportError(SourceRangeFromParserRuleContext(ctx),
"<<unreachable>>"));
}
std::vector<StructExprField> ParserVisitor::visitFields(
CelParser::FieldInitializerListContext* ctx) {
std::vector<StructExprField> res;
if (!ctx || ctx->fields.empty()) {
return res;
}
res.reserve(ctx->fields.size());
for (size_t i = 0; i < ctx->fields.size(); ++i) {
if (i >= ctx->cols.size() || i >= ctx->values.size()) {
return res;
}
const auto* f = ctx->fields[i];
if (f->id == nullptr) {
ABSL_DCHECK(HasErrored());
return res;
}
int64_t init_id = factory_.NextId(SourceRangeFromToken(ctx->cols[i]));
if (!enable_optional_syntax_ && f->opt) {
factory_.ReportError(SourceRangeFromParserRuleContext(ctx),
"unsupported syntax '?'");
continue;
}
auto value = ExprFromAny(visit(ctx->values[i]));
res.push_back(factory_.NewStructField(init_id, f->id->getText(),
std::move(value), f->opt != nullptr));
}
return res;
}
std::any ParserVisitor::visitIdentOrGlobalCall(
CelParser::IdentOrGlobalCallContext* ctx) {
std::string ident_name;
if (ctx->leadingDot) {
ident_name = ".";
}
if (!ctx->id) {
return ExprToAny(factory_.NewUnspecified(
factory_.NextId(SourceRangeFromParserRuleContext(ctx))));
}
if (cel::internal::LexisIsReserved(ctx->id->getText())) {
return ExprToAny(factory_.ReportError(
SourceRangeFromParserRuleContext(ctx),
absl::StrFormat("reserved identifier: %s", ctx->id->getText())));
}
ident_name += ctx->id->getText();
if (ctx->op) {
int64_t op_id = factory_.NextId(SourceRangeFromToken(ctx->op));
auto args = visitList(ctx->args);
return ExprToAny(
GlobalCallOrMacroImpl(op_id, std::move(ident_name), std::move(args)));
}
return ExprToAny(factory_.NewIdent(
factory_.NextId(SourceRangeFromToken(ctx->id)), std::move(ident_name)));
}
std::any ParserVisitor::visitNested(CelParser::NestedContext* ctx) {
return visit(ctx->e);
}
std::any ParserVisitor::visitCreateList(CelParser::CreateListContext* ctx) {
int64_t list_id = factory_.NextId(SourceRangeFromToken(ctx->op));
auto elems = visitList(ctx->elems);
return ExprToAny(factory_.NewList(list_id, std::move(elems)));
}
std::vector<ListExprElement> ParserVisitor::visitList(
CelParser::ListInitContext* ctx) {
std::vector<ListExprElement> rv;
if (!ctx) return rv;
rv.reserve(ctx->elems.size());
for (size_t i = 0; i < ctx->elems.size(); ++i) {
auto* expr_ctx = ctx->elems[i];
if (expr_ctx == nullptr) {
return rv;
}
if (!enable_optional_syntax_ && expr_ctx->opt != nullptr) {
factory_.ReportError(SourceRangeFromParserRuleContext(ctx),
"unsupported syntax '?'");
rv.push_back(factory_.NewListElement(factory_.NewUnspecified(0), false));
continue;
}
rv.push_back(factory_.NewListElement(ExprFromAny(visitExpr(expr_ctx->e)),
expr_ctx->opt != nullptr));
}
return rv;
}
std::vector<Expr> ParserVisitor::visitList(CelParser::ExprListContext* ctx) {
std::vector<Expr> rv;
if (!ctx) return rv;
std::transform(ctx->e.begin(), ctx->e.end(), std::back_inserter(rv),
[this](CelParser::ExprContext* expr_ctx) {
return ExprFromAny(visitExpr(expr_ctx));
});
return rv;
}
std::any ParserVisitor::visitCreateStruct(CelParser::CreateStructContext* ctx) {
int64_t struct_id = factory_.NextId(SourceRangeFromToken(ctx->op));
std::vector<MapExprEntry> entries;
if (ctx->entries) {
entries = visitEntries(ctx->entries);
}
return ExprToAny(factory_.NewMap(struct_id, std::move(entries)));
}
std::any ParserVisitor::visitConstantLiteral(
CelParser::ConstantLiteralContext* clctx) {
CelParser::LiteralContext* literal = clctx->literal();
if (auto* ctx = tree_as<CelParser::IntContext>(literal)) {
return visitInt(ctx);
} else if (auto* ctx = tree_as<CelParser::UintContext>(literal)) {
return visitUint(ctx);
} else if (auto* ctx = tree_as<CelParser::DoubleContext>(literal)) {
return visitDouble(ctx);
} else if (auto* ctx = tree_as<CelParser::StringContext>(literal)) {
return visitString(ctx);
} else if (auto* ctx = tree_as<CelParser::BytesContext>(literal)) {
return visitBytes(ctx);
} else if (auto* ctx = tree_as<CelParser::BoolFalseContext>(literal)) {
return visitBoolFalse(ctx);
} else if (auto* ctx = tree_as<CelParser::BoolTrueContext>(literal)) {
return visitBoolTrue(ctx);
} else if (auto* ctx = tree_as<CelParser::NullContext>(literal)) {
return visitNull(ctx);
}
return ExprToAny(factory_.ReportError(SourceRangeFromParserRuleContext(clctx),
"invalid constant literal expression"));
}
std::any ParserVisitor::visitMapInitializerList(
CelParser::MapInitializerListContext* ctx) {
return ExprToAny(factory_.ReportError(SourceRangeFromParserRuleContext(ctx),
"<<unreachable>>"));
}
std::vector<MapExprEntry> ParserVisitor::visitEntries(
CelParser::MapInitializerListContext* ctx) {
std::vector<MapExprEntry> res;
if (!ctx || ctx->keys.empty()) {
return res;
}
res.reserve(ctx->cols.size());
for (size_t i = 0; i < ctx->cols.size(); ++i) {
auto id = factory_.NextId(SourceRangeFromToken(ctx->cols[i]));
if (!enable_optional_syntax_ && ctx->keys[i]->opt) {
factory_.ReportError(SourceRangeFromParserRuleContext(ctx),
"unsupported syntax '?'");
res.push_back(factory_.NewMapEntry(0, factory_.NewUnspecified(0),
factory_.NewUnspecified(0), false));
continue;
}
auto key = ExprFromAny(visit(ctx->keys[i]->e));
auto value = ExprFromAny(visit(ctx->values[i]));
res.push_back(factory_.NewMapEntry(id, std::move(key), std::move(value),
ctx->keys[i]->opt != nullptr));
}
return res;
}
std::any ParserVisitor::visitInt(CelParser::IntContext* ctx) {
std::string value;
if (ctx->sign) {
value = ctx->sign->getText();
}
value += ctx->tok->getText();
int64_t int_value;
if (absl::StartsWith(ctx->tok->getText(), "0x")) {
if (absl::SimpleHexAtoi(value, &int_value)) {
return ExprToAny(factory_.NewIntConst(
factory_.NextId(SourceRangeFromParserRuleContext(ctx)), int_value));
} else {
return ExprToAny(factory_.ReportError(
SourceRangeFromParserRuleContext(ctx), "invalid hex int literal"));
}
}
if (absl::SimpleAtoi(value, &int_value)) {
return ExprToAny(factory_.NewIntConst(
factory_.NextId(SourceRangeFromParserRuleContext(ctx)), int_value));
} else {
return ExprToAny(factory_.ReportError(SourceRangeFromParserRuleContext(ctx),
"invalid int literal"));
}
}
std::any ParserVisitor::visitUint(CelParser::UintContext* ctx) {
std::string value = ctx->tok->getText();
if (!value.empty()) {
value.resize(value.size() - 1);
}
uint64_t uint_value;
if (absl::StartsWith(ctx->tok->getText(), "0x")) {
if (absl::SimpleHexAtoi(value, &uint_value)) {
return ExprToAny(factory_.NewUintConst(
factory_.NextId(SourceRangeFromParserRuleContext(ctx)), uint_value));
} else {
return ExprToAny(factory_.ReportError(
SourceRangeFromParserRuleContext(ctx), "invalid hex uint literal"));
}
}
if (absl::SimpleAtoi(value, &uint_value)) {
return ExprToAny(factory_.NewUintConst(
factory_.NextId(SourceRangeFromParserRuleContext(ctx)), uint_value));
} else {
return ExprToAny(factory_.ReportError(SourceRangeFromParserRuleContext(ctx),
"invalid uint literal"));
}
}
std::any ParserVisitor::visitDouble(CelParser::DoubleContext* ctx) {
std::string value;
if (ctx->sign) {
value = ctx->sign->getText();
}
value += ctx->tok->getText();
double double_value;
if (absl::SimpleAtod(value, &double_value)) {
return ExprToAny(factory_.NewDoubleConst(
factory_.NextId(SourceRangeFromParserRuleContext(ctx)), double_value));
} else {
return ExprToAny(factory_.ReportError(SourceRangeFromParserRuleContext(ctx),
"invalid double literal"));
}
}
std::any ParserVisitor::visitString(CelParser::StringContext* ctx) {
auto status_or_value = cel::internal::ParseStringLiteral(ctx->tok->getText());
if (!status_or_value.ok()) {
return ExprToAny(factory_.ReportError(SourceRangeFromParserRuleContext(ctx),
status_or_value.status().message()));
}
return ExprToAny(factory_.NewStringConst(
factory_.NextId(SourceRangeFromParserRuleContext(ctx)),
std::move(status_or_value).value()));
}
std::any ParserVisitor::visitBytes(CelParser::BytesContext* ctx) {
auto status_or_value = cel::internal::ParseBytesLiteral(ctx->tok->getText());
if (!status_or_value.ok()) {
return ExprToAny(factory_.ReportError(SourceRangeFromParserRuleContext(ctx),
status_or_value.status().message()));
}
return ExprToAny(factory_.NewBytesConst(
factory_.NextId(SourceRangeFromParserRuleContext(ctx)),
std::move(status_or_value).value()));
}
std::any ParserVisitor::visitBoolTrue(CelParser::BoolTrueContext* ctx) {
return ExprToAny(factory_.NewBoolConst(
factory_.NextId(SourceRangeFromParserRuleContext(ctx)), true));
}
std::any ParserVisitor::visitBoolFalse(CelParser::BoolFalseContext* ctx) {
return ExprToAny(factory_.NewBoolConst(
factory_.NextId(SourceRangeFromParserRuleContext(ctx)), false));
}
std::any ParserVisitor::visitNull(CelParser::NullContext* ctx) {
return ExprToAny(factory_.NewNullConst(
factory_.NextId(SourceRangeFromParserRuleContext(ctx))));
}
absl::Status ParserVisitor::GetSourceInfo(
google::api::expr::v1alpha1::SourceInfo* source_info) const {
source_info->set_location(source_.description());
for (const auto& positions : factory_.positions()) {
source_info->mutable_positions()->insert(
std::pair{positions.first, positions.second.begin});
}
source_info->mutable_line_offsets()->Reserve(source_.line_offsets().size());
for (const auto& line_offset : source_.line_offsets()) {
source_info->mutable_line_offsets()->Add(line_offset);
}
for (const auto& macro_call : factory_.macro_calls()) {
google::api::expr::v1alpha1::Expr macro_call_proto;
CEL_RETURN_IF_ERROR(cel::extensions::protobuf_internal::ExprToProto(
macro_call.second, ¯o_call_proto));
source_info->mutable_macro_calls()->insert(
std::pair{macro_call.first, std::move(macro_call_proto)});
}
return absl::OkStatus();
}
EnrichedSourceInfo ParserVisitor::enriched_source_info() const {
std::map<int64_t, std::pair<int32_t, int32_t>> offsets;
for (const auto& positions : factory_.positions()) {
offsets.insert(
std::pair{positions.first,
std::pair{positions.second.begin, positions.second.end - 1}});
}
return EnrichedSourceInfo(std::move(offsets));
}
void ParserVisitor::syntaxError(antlr4::Recognizer* recognizer,
antlr4::Token* offending_symbol, size_t line,
size_t col, const std::string& msg,
std::exception_ptr e) {
cel::SourceRange range;
if (auto position = source_.GetPosition(cel::SourceLocation{
static_cast<int32_t>(line), static_cast<int32_t>(col)});
position) {
range.begin = *position;
}
factory_.ReportError(range, absl::StrCat("Syntax error: ", msg));
}
bool ParserVisitor::HasErrored() const { return factory_.HasErrors(); }
std::string ParserVisitor::ErrorMessage() { return factory_.ErrorMessage(); }
Expr ParserVisitor::GlobalCallOrMacroImpl(int64_t expr_id,
absl::string_view function,
std::vector<Expr> args) {
if (auto macro = macro_registry_.FindMacro(function, args.size(), false);
macro) {
std::vector<Expr> macro_args;
if (add_macro_calls_) {
macro_args.reserve(args.size());
for (const auto& arg : args) {
macro_args.push_back(factory_.BuildMacroCallArg(arg));
}
}
factory_.BeginMacro(factory_.GetSourceRange(expr_id));
auto expr = macro->Expand(factory_, absl::nullopt, absl::MakeSpan(args));
factory_.EndMacro();
if (expr) {
if (add_macro_calls_) {
factory_.AddMacroCall(expr->id(), function, absl::nullopt,
std::move(macro_args));
}
factory_.EraseId(expr_id);
return std::move(*expr);
}
}
return factory_.NewCall(expr_id, function, std::move(args));
}
Expr ParserVisitor::ReceiverCallOrMacroImpl(int64_t expr_id,
absl::string_view function,
Expr target,
std::vector<Expr> args) {
if (auto macro = macro_registry_.FindMacro(function, args.size(), true);
macro) {
Expr macro_target;
std::vector<Expr> macro_args;
if (add_macro_calls_) {
macro_args.reserve(args.size());
macro_target = factory_.BuildMacroCallArg(target);
for (const auto& arg : args) {
macro_args.push_back(factory_.BuildMacroCallArg(arg));
}
}
factory_.BeginMacro(factory_.GetSourceRange(expr_id));
auto expr = macro->Expand(factory_, std::ref(target), absl::MakeSpan(args));
factory_.EndMacro();
if (expr) {
if (add_macro_calls_) {
factory_.AddMacroCall(expr->id(), function, std::move(macro_target),
std::move(macro_args));
}
factory_.EraseId(expr_id);
return std::move(*expr);
}
}
return factory_.NewMemberCall(expr_id, function, std::move(target),
std::move(args));
}
std::string ParserVisitor::ExtractQualifiedName(antlr4::ParserRuleContext* ctx,
const Expr& e) {
if (e == Expr{}) {
return "";
}
if (const auto* ident_expr = absl::get_if<IdentExpr>(&e.kind()); ident_expr) {
return ident_expr->name();
}
if (const auto* select_expr = absl::get_if<SelectExpr>(&e.kind());
select_expr) {
std::string prefix = ExtractQualifiedName(ctx, select_expr->operand());
if (!prefix.empty()) {
return absl::StrCat(prefix, ".", select_expr->field());
}
}
factory_.ReportError(factory_.GetSourceRange(e.id()),
"expected a qualified name");
return "";
}
static constexpr auto kStandardReplacements =
std::array<std::pair<absl::string_view, absl::string_view>, 3>{
std::make_pair("\n", "\\n"),
std::make_pair("\r", "\\r"),
std::make_pair("\t", "\\t"),
};
static constexpr absl::string_view kSingleQuote = "'";
class ExprRecursionListener final : public ParseTreeListener {
public:
explicit ExprRecursionListener(
const int max_recursion_depth = kDefaultMaxRecursionDepth)
: max_recursion_depth_(max_recursion_depth), recursion_depth_(0) {}
~ExprRecursionListener() override {}
void visitTerminal(TerminalNode* node) override {};
void visitErrorNode(ErrorNode* error) override {};
void enterEveryRule(ParserRuleContext* ctx) override;
void exitEveryRule(ParserRuleContext* ctx) override;
private:
const int max_recursion_depth_;
int recursion_depth_;
};
void ExprRecursionListener::enterEveryRule(ParserRuleContext* ctx) {
if (ctx->getRuleIndex() == CelParser::RuleExpr) {
if (recursion_depth_ > max_recursion_depth_) {
throw ParseCancellationException(
absl::StrFormat("Expression recursion limit exceeded. limit: %d",
max_recursion_depth_));
}
recursion_depth_++;
}
}
void ExprRecursionListener::exitEveryRule(ParserRuleContext* ctx) {
if (ctx->getRuleIndex() == CelParser::RuleExpr) {
recursion_depth_--;
}
}
class RecoveryLimitErrorStrategy final : public DefaultErrorStrategy {
public:
explicit RecoveryLimitErrorStrategy(
int recovery_limit = kDefaultErrorRecoveryLimit,
int recovery_token_lookahead_limit =
kDefaultErrorRecoveryTokenLookaheadLimit)
: recovery_limit_(recovery_limit),
recovery_attempts_(0),
recovery_token_lookahead_limit_(recovery_token_lookahead_limit) {}
void recover(Parser* recognizer, std::exception_ptr e) override {
checkRecoveryLimit(recognizer);
DefaultErrorStrategy::recover(recognizer, e);
}
Token* recoverInline(Parser* recognizer) override {
checkRecoveryLimit(recognizer);
return DefaultErrorStrategy::recoverInline(recognizer);
}
void consumeUntil(Parser* recognizer, const IntervalSet& set) override {
size_t ttype = recognizer->getInputStream()->LA(1);
int recovery_search_depth = 0;
while (ttype != Token::EOF && !set.contains(ttype) &&
recovery_search_depth++ < recovery_token_lookahead_limit_) {
recognizer->consume();
ttype = recognizer->getInputStream()->LA(1);
}
if (recovery_search_depth == recovery_token_lookahead_limit_) {
throw ParseCancellationException("Unable to find a recovery token");
}
}
protected:
std::string escapeWSAndQuote(const std::string& s) const override {
std::string result;
result.reserve(s.size() + 2);
absl::StrAppend(&result, kSingleQuote, s, kSingleQuote);
absl::StrReplaceAll(kStandardReplacements, &result);
return result;
}
private:
void checkRecoveryLimit(Parser* recognizer) {
if (recovery_attempts_++ >= recovery_limit_) {
std::string too_many_errors =
absl::StrFormat("More than %d parse errors.", recovery_limit_);
recognizer->notifyErrorListeners(too_many_errors);
throw ParseCancellationException(too_many_errors);
}
}
int recovery_limit_;
int recovery_attempts_;
int recovery_token_lookahead_limit_;
};
}
absl::StatusOr<ParsedExpr> Parse(absl::string_view expression,
absl::string_view description,
const ParserOptions& options) {
std::vector<Macro> macros = Macro::AllMacros();
if (options.enable_optional_syntax) {
macros.push_back(cel::OptMapMacro());
macros.push_back(cel::OptFlatMapMacro());
}
return ParseWithMacros(expression, macros, description, options);
}
absl::StatusOr<ParsedExpr> ParseWithMacros(absl::string_view expression,
const std::vector<Macro>& macros,
absl::string_view description,
const ParserOptions& options) {
CEL_ASSIGN_OR_RETURN(auto verbose_parsed_expr,
EnrichedParse(expression, macros, description, options));
return verbose_parsed_expr.parsed_expr();
}
absl::StatusOr<VerboseParsedExpr> EnrichedParse(
absl::string_view expression, const std::vector<Macro>& macros,
absl::string_view description, const ParserOptions& options) {
CEL_ASSIGN_OR_RETURN(auto source,
cel::NewSource(expression, std::string(description)));
cel::MacroRegistry macro_registry;
CEL_RETURN_IF_ERROR(macro_registry.RegisterMacros(macros));
return EnrichedParse(*source, macro_registry, options);
}
absl::StatusOr<VerboseParsedExpr> EnrichedParse(
const cel::Source& source, const cel::MacroRegistry& registry,
const ParserOptions& options) {
try {
CodePointStream input(source.content(), source.description());
if (input.size() > options.expression_size_codepoint_limit) {
return absl::InvalidArgumentError(absl::StrCat(
"expression size exceeds codepoint limit.", " input size: ",
input.size(), ", limit: ", options.expression_size_codepoint_limit));
}
CelLexer lexer(&input);
CommonTokenStream tokens(&lexer);
CelParser parser(&tokens);
ExprRecursionListener listener(options.max_recursion_depth);
ParserVisitor visitor(source, options.max_recursion_depth, registry,
options.add_macro_calls,
options.enable_optional_syntax);
lexer.removeErrorListeners();
parser.removeErrorListeners();
lexer.addErrorListener(&visitor);
parser.addErrorListener(&visitor);
parser.addParseListener(&listener);
parser.setErrorHandler(std::make_shared<RecoveryLimitErrorStrategy>(
options.error_recovery_limit,
options.error_recovery_token_lookahead_limit));
Expr expr;
try {
expr = ExprFromAny(visitor.visit(parser.start()));
} catch (const ParseCancellationException& e) {
if (visitor.HasErrored()) {
return absl::InvalidArgumentError(visitor.ErrorMessage());
}
return absl::CancelledError(e.what());
}
if (visitor.HasErrored()) {
return absl::InvalidArgumentError(visitor.ErrorMessage());
}
ParsedExpr parsed_expr;
CEL_RETURN_IF_ERROR(cel::extensions::protobuf_internal::ExprToProto(
expr, parsed_expr.mutable_expr()));
CEL_RETURN_IF_ERROR(
visitor.GetSourceInfo(parsed_expr.mutable_source_info()));
auto enriched_source_info = visitor.enriched_source_info();
return VerboseParsedExpr(std::move(parsed_expr),
std::move(enriched_source_info));
} catch (const std::exception& e) {
return absl::AbortedError(e.what());
} catch (const char* what) {
return absl::AbortedError(what);
} catch (...) {
return absl::UnknownError("An unknown exception occurred");
}
}
absl::StatusOr<google::api::expr::v1alpha1::ParsedExpr> Parse(
const cel::Source& source, const cel::MacroRegistry& registry,
const ParserOptions& options) {
CEL_ASSIGN_OR_RETURN(auto verbose_expr,
EnrichedParse(source, registry, options));
return verbose_expr.parsed_expr();
}
} | #include "parser/parser.h"
#include <list>
#include <string>
#include <thread>
#include <utility>
#include <vector>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/algorithm/container.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/optional.h"
#include "internal/benchmark.h"
#include "internal/testing.h"
#include "parser/macro.h"
#include "parser/options.h"
#include "parser/source_factory.h"
#include "testutil/expr_printer.h"
namespace google::api::expr::parser {
namespace {
using ::absl_testing::IsOk;
using ::google::api::expr::v1alpha1::Expr;
using ::testing::HasSubstr;
using ::testing::Not;
struct TestInfo {
TestInfo(const std::string& I, const std::string& P,
const std::string& E = "", const std::string& L = "",
const std::string& R = "", const std::string& M = "",
bool benchmark = true)
: I(I), P(P), E(E), L(L), R(R), M(M), benchmark(benchmark) {}
std::string I;
std::string P;
std::string E;
std::string L;
std::string R;
std::string M;
bool benchmark;
};
std::vector<TestInfo> test_cases = {
{"x * 2",
"_*_(\n"
" x^#1:Expr.Ident#,\n"
" 2^#3:int64#\n"
")^#2:Expr.Call#"},
{"x * 2u",
"_*_(\n"
" x^#1:Expr.Ident#,\n"
" 2u^#3:uint64#\n"
")^#2:Expr.Call#"},
{"x * 2.0",
"_*_(\n"
" x^#1:Expr.Ident#,\n"
" 2.^#3:double#\n"
")^#2:Expr.Call#"},
{"\"\\u2764\"", "\"\u2764\"^#1:string#"},
{"\"\u2764\"", "\"\u2764\"^#1:string#"},
{"! false",
"!_(\n"
" false^#2:bool#\n"
")^#1:Expr.Call#"},
{"-a",
"-_(\n"
" a^#2:Expr.Ident#\n"
")^#1:Expr.Call#"},
{"a.b(5)",
"a^#1:Expr.Ident#.b(\n"
" 5^#3:int64#\n"
")^#2:Expr.Call#"},
{"a[3]",
"_[_](\n"
" a^#1:Expr.Ident#,\n"
" 3^#3:int64#\n"
")^#2:Expr.Call#"},
{"SomeMessage{foo: 5, bar: \"xyz\"}",
"SomeMessage{\n"
" foo:5^#3:int64#^#2:Expr.CreateStruct.Entry#,\n"
" bar:\"xyz\"^#5:string#^#4:Expr.CreateStruct.Entry#\n"
"}^#1:Expr.CreateStruct#"},
{"[3, 4, 5]",
"[\n"
" 3^#2:int64#,\n"
" 4^#3:int64#,\n"
" 5^#4:int64#\n"
"]^#1:Expr.CreateList#"},
{"{foo: 5, bar: \"xyz\"}",
"{\n"
" foo^#3:Expr.Ident#:5^#4:int64#^#2:Expr.CreateStruct.Entry#,\n"
" bar^#6:Expr.Ident#:\"xyz\"^#7:string#^#5:Expr.CreateStruct.Entry#\n"
"}^#1:Expr.CreateStruct#"},
{"a > 5 && a < 10",
"_&&_(\n"
" _>_(\n"
" a^#1:Expr.Ident#,\n"
" 5^#3:int64#\n"
" )^#2:Expr.Call#,\n"
" _<_(\n"
" a^#4:Expr.Ident#,\n"
" 10^#6:int64#\n"
" )^#5:Expr.Call#\n"
")^#7:Expr.Call#"},
{"a < 5 || a > 10",
"_||_(\n"
" _<_(\n"
" a^#1:Expr.Ident#,\n"
" 5^#3:int64#\n"
" )^#2:Expr.Call#,\n"
" _>_(\n"
" a^#4:Expr.Ident#,\n"
" 10^#6:int64#\n"
" )^#5:Expr.Call#\n"
")^#7:Expr.Call#"},
{"{", "",
"ERROR: <input>:1:2: Syntax error: mismatched input '<EOF>' expecting "
"{'[', "
"'{', '}', '(', '.', ',', '-', '!', '\\u003F', 'true', 'false', 'null', "
"NUM_FLOAT, "
"NUM_INT, "
"NUM_UINT, STRING, BYTES, IDENTIFIER}\n | {\n"
" | .^"},
{"\"A\"", "\"A\"^#1:string#"},
{"true", "true^#1:bool#"},
{"false", "false^#1:bool#"},
{"0", "0^#1:int64#"},
{"42", "42^#1:int64#"},
{"0u", "0u^#1:uint64#"},
{"23u", "23u^#1:uint64#"},
{"24u", "24u^#1:uint64#"},
{"0xAu", "10u^#1:uint64#"},
{"-0xA", "-10^#1:int64#"},
{"0xA", "10^#1:int64#"},
{"-1", "-1^#1:int64#"},
{"4--4",
"_-_(\n"
" 4^#1:int64#,\n"
" -4^#3:int64#\n"
")^#2:Expr.Call#"},
{"4--4.1",
"_-_(\n"
" 4^#1:int64#,\n"
" -4.1^#3:double#\n"
")^#2:Expr.Call#"},
{"b\"abc\"", "b\"abc\"^#1:bytes#"},
{"23.39", "23.39^#1:double#"},
{"!a",
"!_(\n"
" a^#2:Expr.Ident#\n"
")^#1:Expr.Call#"},
{"null", "null^#1:NullValue#"},
{"a", "a^#1:Expr.Ident#"},
{"a?b:c",
"_?_:_(\n"
" a^#1:Expr.Ident#,\n"
" b^#3:Expr.Ident#,\n"
" c^#4:Expr.Ident#\n"
")^#2:Expr.Call#"},
{"a || b",
"_||_(\n"
" a^#1:Expr.Ident#,\n"
" b^#2:Expr.Ident#\n"
")^#3:Expr.Call#"},
{"a || b || c || d || e || f ",
"_||_(\n"
" _||_(\n"
" _||_(\n"
" a^#1:Expr.Ident#,\n"
" b^#2:Expr.Ident#\n"
" )^#3:Expr.Call#,\n"
" c^#4:Expr.Ident#\n"
" )^#5:Expr.Call#,\n"
" _||_(\n"
" _||_(\n"
" d^#6:Expr.Ident#,\n"
" e^#8:Expr.Ident#\n"
" )^#9:Expr.Call#,\n"
" f^#10:Expr.Ident#\n"
" )^#11:Expr.Call#\n"
")^#7:Expr.Call#"},
{"a && b",
"_&&_(\n"
" a^#1:Expr.Ident#,\n"
" b^#2:Expr.Ident#\n"
")^#3:Expr.Call#"},
{"a && b && c && d && e && f && g",
"_&&_(\n"
" _&&_(\n"
" _&&_(\n"
" a^#1:Expr.Ident#,\n"
" b^#2:Expr.Ident#\n"
" )^#3:Expr.Call#,\n"
" _&&_(\n"
" c^#4:Expr.Ident#,\n"
" d^#6:Expr.Ident#\n"
" )^#7:Expr.Call#\n"
" )^#5:Expr.Call#,\n"
" _&&_(\n"
" _&&_(\n"
" e^#8:Expr.Ident#,\n"
" f^#10:Expr.Ident#\n"
" )^#11:Expr.Call#,\n"
" g^#12:Expr.Ident#\n"
" )^#13:Expr.Call#\n"
")^#9:Expr.Call#"},
{"a && b && c && d || e && f && g && h",
"_||_(\n"
" _&&_(\n"
" _&&_(\n"
" a^#1:Expr.Ident#,\n"
" b^#2:Expr.Ident#\n"
" )^#3:Expr.Call#,\n"
" _&&_(\n"
" c^#4:Expr.Ident#,\n"
" d^#6:Expr.Ident#\n"
" )^#7:Expr.Call#\n"
" )^#5:Expr.Call#,\n"
" _&&_(\n"
" _&&_(\n"
" e^#8:Expr.Ident#,\n"
" f^#9:Expr.Ident#\n"
" )^#10:Expr.Call#,\n"
" _&&_(\n"
" g^#11:Expr.Ident#,\n"
" h^#13:Expr.Ident#\n"
" )^#14:Expr.Call#\n"
" )^#12:Expr.Call#\n"
")^#15:Expr.Call#"},
{"a + b",
"_+_(\n"
" a^#1:Expr.Ident#,\n"
" b^#3:Expr.Ident#\n"
")^#2:Expr.Call#"},
{"a - b",
"_-_(\n"
" a^#1:Expr.Ident#,\n"
" b^#3:Expr.Ident#\n"
")^#2:Expr.Call#"},
{"a * b",
"_*_(\n"
" a^#1:Expr.Ident#,\n"
" b^#3:Expr.Ident#\n"
")^#2:Expr.Call#"},
{"a / b",
"_/_(\n"
" a^#1:Expr.Ident#,\n"
" b^#3:Expr.Ident#\n"
")^#2:Expr.Call#"},
{
"a % b",
"_%_(\n"
" a^#1:Expr.Ident#,\n"
" b^#3:Expr.Ident#\n"
")^#2:Expr.Call#",
},
{"a in b",
"@in(\n"
" a^#1:Expr.Ident#,\n"
" b^#3:Expr.Ident#\n"
")^#2:Expr.Call#"},
{"a == b",
"_==_(\n"
" a^#1:Expr.Ident#,\n"
" b^#3:Expr.Ident#\n"
")^#2:Expr.Call#"},
{"a != b",
"_!=_(\n"
" a^#1:Expr.Ident#,\n"
" b^#3:Expr.Ident#\n"
")^#2:Expr.Call#"},
{"a > b",
"_>_(\n"
" a^#1:Expr.Ident#,\n"
" b^#3:Expr.Ident#\n"
")^#2:Expr.Call#"},
{"a >= b",
"_>=_(\n"
" a^#1:Expr.Ident#,\n"
" b^#3:Expr.Ident#\n"
")^#2:Expr.Call#"},
{"a < b",
"_<_(\n"
" a^#1:Expr.Ident#,\n"
" b^#3:Expr.Ident#\n"
")^#2:Expr.Call#"},
{"a <= b",
"_<=_(\n"
" a^#1:Expr.Ident#,\n"
" b^#3:Expr.Ident#\n"
")^#2:Expr.Call#"},
{"a.b", "a^#1:Expr.Ident#.b^#2:Expr.Select#"},
{"a.b.c", "a^#1:Expr.Ident#.b^#2:Expr.Select#.c^#3:Expr.Select#"},
{"a[b]",
"_[_](\n"
" a^#1:Expr.Ident#,\n"
" b^#3:Expr.Ident#\n"
")^#2:Expr.Call#"},
{"foo{ }", "foo{}^#1:Expr.CreateStruct#"},
{"foo{ a:b }",
"foo{\n"
" a:b^#3:Expr.Ident#^#2:Expr.CreateStruct.Entry#\n"
"}^#1:Expr.CreateStruct#"},
{"foo{ a:b, c:d }",
"foo{\n"
" a:b^#3:Expr.Ident#^#2:Expr.CreateStruct.Entry#,\n"
" c:d^#5:Expr.Ident#^#4:Expr.CreateStruct.Entry#\n"
"}^#1:Expr.CreateStruct#"},
{"{}", "{}^#1:Expr.CreateStruct#"},
{"{a:b, c:d}",
"{\n"
" a^#3:Expr.Ident#:b^#4:Expr.Ident#^#2:Expr.CreateStruct.Entry#,\n"
" c^#6:Expr.Ident#:d^#7:Expr.Ident#^#5:Expr.CreateStruct.Entry#\n"
"}^#1:Expr.CreateStruct#"},
{"[]", "[]^#1:Expr.CreateList#"},
{"[a]",
"[\n"
" a^#2:Expr.Ident#\n"
"]^#1:Expr.CreateList#"},
{"[a, b, c]",
"[\n"
" a^#2:Expr.Ident#,\n"
" b^#3:Expr.Ident#,\n"
" c^#4:Expr.Ident#\n"
"]^#1:Expr.CreateList#"},
{"(a)", "a^#1:Expr.Ident#"},
{"((a))", "a^#1:Expr.Ident#"},
{"a()", "a()^#1:Expr.Call#"},
{"a(b)",
"a(\n"
" b^#2:Expr.Ident#\n"
")^#1:Expr.Call#"},
{"a(b, c)",
"a(\n"
" b^#2:Expr.Ident#,\n"
" c^#3:Expr.Ident#\n"
")^#1:Expr.Call#"},
{"a.b()", "a^#1:Expr.Ident#.b()^#2:Expr.Call#"},
{
"a.b(c)",
"a^#1:Expr.Ident#.b(\n"
" c^#3:Expr.Ident#\n"
")^#2:Expr.Call#",
"",
"a^#1[1,0]#.b(\n"
" c^#3[1,4]#\n"
")^#2[1,3]#",
"[1,0,0]^#[2,3,3]^#[3,4,4]",
},
{
"aaa.bbb(ccc)",
"aaa^#1:Expr.Ident#.bbb(\n"
" ccc^#3:Expr.Ident#\n"
")^#2:Expr.Call#",
"",
"aaa^#1[1,0]#.bbb(\n"
" ccc^#3[1,8]#\n"
")^#2[1,7]#",
"[1,0,2]^#[2,7,7]^#[3,8,10]",
},
{"*@a | b", "",
"ERROR: <input>:1:1: Syntax error: extraneous input '*' expecting {'[', "
"'{', "
"'(', '.', '-', '!', 'true', 'false', 'null', NUM_FLOAT, NUM_INT, "
"NUM_UINT, STRING, BYTES, IDENTIFIER}\n"
" | *@a | b\n"
" | ^\n"
"ERROR: <input>:1:2: Syntax error: token recognition error at: '@'\n"
" | *@a | b\n"
" | .^\n"
"ERROR: <input>:1:5: Syntax error: token recognition error at: '| '\n"
" | *@a | b\n"
" | ....^\n"
"ERROR: <input>:1:7: Syntax error: extraneous input 'b' expecting <EOF>\n"
" | *@a | b\n"
" | ......^"},
{"a | b", "",
"ERROR: <input>:1:3: Syntax error: token recognition error at: '| '\n"
" | a | b\n"
" | ..^\n"
"ERROR: <input>:1:5: Syntax error: extraneous input 'b' expecting <EOF>\n"
" | a | b\n"
" | ....^"},
{"?", "",
"ERROR: <input>:1:1: Syntax error: mismatched input '?' expecting "
"{'[', '{', '(', '.', '-', '!', 'true', 'false', 'null', NUM_FLOAT, "
"NUM_INT, NUM_UINT, STRING, BYTES, IDENTIFIER}\n | ?\n | ^\n"
"ERROR: <input>:1:2: Syntax error: mismatched input '<EOF>' expecting "
"{'[', '{', '(', '.', '-', '!', 'true', 'false', 'null', NUM_FLOAT, "
"NUM_INT, NUM_UINT, STRING, BYTES, IDENTIFIER}\n | ?\n | .^\n"
"ERROR: <input>:4294967295:0: <<nil>> parsetree"},
{"t{>C}", "",
"ERROR: <input>:1:3: Syntax error: extraneous input '>' expecting {'}', "
"',', '\\u003F', IDENTIFIER}\n | t{>C}\n | ..^\nERROR: <input>:1:5: "
"Syntax error: "
"mismatched input '}' expecting ':'\n | t{>C}\n | ....^"},
{"has(m.f)", "m^#2:Expr.Ident#.f~test-only~^#4:Expr.Select#", "",
"m^#2[1,4]#.f~test-only~^#4[1,3]#", "[2,4,4]^#[3,5,5]^#[4,3,3]",
"has(\n"
" m^#2:Expr.Ident#.f^#3:Expr.Select#\n"
")^#4:has"},
{"m.exists_one(v, f)",
"__comprehension__(\n"
"
" v,\n"
"
" m^#1:Expr.Ident#,\n"
"
" __result__,\n"
"
" 0^#5:int64#,\n"
"
" true^#6:bool#,\n"
"
" _?_:_(\n"
" f^#4:Expr.Ident#,\n"
" _+_(\n"
" __result__^#7:Expr.Ident#,\n"
" 1^#8:int64#\n"
" )^#9:Expr.Call#,\n"
" __result__^#10:Expr.Ident#\n"
" )^#11:Expr.Call#,\n"
"
" _==_(\n"
" __result__^#12:Expr.Ident#,\n"
" 1^#13:int64#\n"
" )^#14:Expr.Call#)^#15:Expr.Comprehension#",
"", "", "",
"m^#1:Expr.Ident#.exists_one(\n"
" v^#3:Expr.Ident#,\n"
" f^#4:Expr.Ident#\n"
")^#15:exists_one"},
{"m.map(v, f)",
"__comprehension__(\n"
"
" v,\n"
"
" m^#1:Expr.Ident#,\n"
"
" __result__,\n"
"
" []^#5:Expr.CreateList#,\n"
"
" true^#6:bool#,\n"
"
" _+_(\n"
" __result__^#7:Expr.Ident#,\n"
" [\n"
" f^#4:Expr.Ident#\n"
" ]^#8:Expr.CreateList#\n"
" )^#9:Expr.Call#,\n"
"
" __result__^#10:Expr.Ident#)^#11:Expr.Comprehension#",
"", "", "",
"m^#1:Expr.Ident#.map(\n"
" v^#3:Expr.Ident#,\n"
" f^#4:Expr.Ident#\n"
")^#11:map"},
{"m.map(v, p, f)",
"__comprehension__(\n"
"
" v,\n"
"
" m^#1:Expr.Ident#,\n"
"
" __result__,\n"
"
" []^#6:Expr.CreateList#,\n"
"
" true^#7:bool#,\n"
"
" _?_:_(\n"
" p^#4:Expr.Ident#,\n"
" _+_(\n"
" __result__^#8:Expr.Ident#,\n"
" [\n"
" f^#5:Expr.Ident#\n"
" ]^#9:Expr.CreateList#\n"
" )^#10:Expr.Call#,\n"
" __result__^#11:Expr.Ident#\n"
" )^#12:Expr.Call#,\n"
"
" __result__^#13:Expr.Ident#)^#14:Expr.Comprehension#",
"", "", "",
"m^#1:Expr.Ident#.map(\n"
" v^#3:Expr.Ident#,\n"
" p^#4:Expr.Ident#,\n"
" f^#5:Expr.Ident#\n"
")^#14:map"},
{"m.filter(v, p)",
"__comprehension__(\n"
"
" v,\n"
"
" m^#1:Expr.Ident#,\n"
"
" __result__,\n"
"
" []^#5:Expr.CreateList#,\n"
"
" true^#6:bool#,\n"
"
" _?_:_(\n"
" p^#4:Expr.Ident#,\n"
" _+_(\n"
" __result__^#7:Expr.Ident#,\n"
" [\n"
" v^#3:Expr.Ident#\n"
" ]^#8:Expr.CreateList#\n"
" )^#9:Expr.Call#,\n"
" __result__^#10:Expr.Ident#\n"
" )^#11:Expr.Call#,\n"
"
" __result__^#12:Expr.Ident#)^#13:Expr.Comprehension#",
"", "", "",
"m^#1:Expr.Ident#.filter(\n"
" v^#3:Expr.Ident#,\n"
" p^#4:Expr.Ident#\n"
")^#13:filter"},
{"[] + [1,2,3,] + [4]",
"_+_(\n"
" _+_(\n"
" []^#1:Expr.CreateList#,\n"
" [\n"
" 1^#4:int64#,\n"
" 2^#5:int64#,\n"
" 3^#6:int64#\n"
" ]^#3:Expr.CreateList#\n"
" )^#2:Expr.Call#,\n"
" [\n"
" 4^#9:int64#\n"
" ]^#8:Expr.CreateList#\n"
")^#7:Expr.Call#"},
{"{1:2u, 2:3u}",
"{\n"
" 1^#3:int64#:2u^#4:uint64#^#2:Expr.CreateStruct.Entry#,\n"
" 2^#6:int64#:3u^#7:uint64#^#5:Expr.CreateStruct.Entry#\n"
"}^#1:Expr.CreateStruct#"},
{"TestAllTypes{single_int32: 1, single_int64: 2}",
"TestAllTypes{\n"
" single_int32:1^#3:int64#^#2:Expr.CreateStruct.Entry#,\n"
" single_int64:2^#5:int64#^#4:Expr.CreateStruct.Entry#\n"
"}^#1:Expr.CreateStruct#"},
{"TestAllTypes(){single_int32: 1, single_int64: 2}", "",
"ERROR: <input>:1:15: Syntax error: mismatched input '{' expecting <EOF>\n"
" | TestAllTypes(){single_int32: 1, single_int64: 2}\n"
" | ..............^"},
{"size(x) == x.size()",
"_==_(\n"
" size(\n"
" x^#2:Expr.Ident#\n"
" )^#1:Expr.Call#,\n"
" x^#4:Expr.Ident#.size()^#5:Expr.Call#\n"
")^#3:Expr.Call#"},
{"1 + $", "",
"ERROR: <input>:1:5: Syntax error: token recognition error at: '$'\n"
" | 1 + $\n"
" | ....^\n"
"ERROR: <input>:1:6: Syntax error: mismatched input '<EOF>' expecting "
"{'[', "
"'{', '(', '.', '-', '!', 'true', 'false', 'null', NUM_FLOAT, NUM_INT, "
"NUM_UINT, STRING, BYTES, IDENTIFIER}\n"
" | 1 + $\n"
" | .....^"},
{"1 + 2\n"
"3 +",
"",
"ERROR: <input>:2:1: Syntax error: mismatched input '3' expecting <EOF>\n"
" | 3 +\n"
" | ^"},
{"\"\\\"\"", "\"\\\"\"^#1:string#"},
{"[1,3,4][0]",
"_[_](\n"
" [\n"
" 1^#2:int64#,\n"
" 3^#3:int64#,\n"
" 4^#4:int64#\n"
" ]^#1:Expr.CreateList#,\n"
" 0^#6:int64#\n"
")^#5:Expr.Call#"},
{"1.all(2, 3)", "",
"ERROR: <input>:1:7: all() variable name must be a simple identifier\n"
" | 1.all(2, 3)\n"
" | ......^"},
{"x[\"a\"].single_int32 == 23",
"_==_(\n"
" _[_](\n"
" x^#1:Expr.Ident#,\n"
" \"a\"^#3:string#\n"
" )^#2:Expr.Call#.single_int32^#4:Expr.Select#,\n"
" 23^#6:int64#\n"
")^#5:Expr.Call#"},
{"x.single_nested_message != null",
"_!=_(\n"
" x^#1:Expr.Ident#.single_nested_message^#2:Expr.Select#,\n"
" null^#4:NullValue#\n"
")^#3:Expr.Call#"},
{"false && !true || false ? 2 : 3",
"_?_:_(\n"
" _||_(\n"
" _&&_(\n"
" false^#1:bool#,\n"
" !_(\n"
" true^#3:bool#\n"
" )^#2:Expr.Call#\n"
" )^#4:Expr.Call#,\n"
" false^#5:bool#\n"
" )^#6:Expr.Call#,\n"
" 2^#8:int64#,\n"
" 3^#9:int64#\n"
")^#7:Expr.Call#"},
{"b\"abc\" + B\"def\"",
"_+_(\n"
" b\"abc\"^#1:bytes#,\n"
" b\"def\"^#3:bytes#\n"
")^#2:Expr.Call#"},
{"1 + 2 * 3 - 1 / 2 == 6 % 1",
"_==_(\n"
" _-_(\n"
" _+_(\n"
" 1^#1:int64#,\n"
" _*_(\n"
" 2^#3:int64#,\n"
" 3^#5:int64#\n"
" )^#4:Expr.Call#\n"
" )^#2:Expr.Call#,\n"
" _/_(\n"
" 1^#7:int64#,\n"
" 2^#9:int64#\n"
" )^#8:Expr.Call#\n"
" )^#6:Expr.Call#,\n"
" _%_(\n"
" 6^#11:int64#,\n"
" 1^#13:int64#\n"
" )^#12:Expr.Call#\n"
")^#10:Expr.Call#"},
{"---a",
"-_(\n"
" a^#2:Expr.Ident#\n"
")^#1:Expr.Call#"},
{"1 + +", "",
"ERROR: <input>:1:5: Syntax error: mismatched input '+' expecting {'[', "
"'{',"
" '(', '.', '-', '!', 'true', 'false', 'null', NUM_FLOAT, NUM_INT, "
"NUM_UINT,"
" STRING, BYTES, IDENTIFIER}\n"
" | 1 + +\n"
" | ....^\n"
"ERROR: <input>:1:6: Syntax error: mismatched input '<EOF>' expecting "
"{'[', "
"'{', '(', '.', '-', '!', 'true', 'false', 'null', NUM_FLOAT, NUM_INT, "
"NUM_UINT, STRING, BYTES, IDENTIFIER}\n"
" | 1 + +\n"
" | .....^"},
{"\"abc\" + \"def\"",
"_+_(\n"
" \"abc\"^#1:string#,\n"
" \"def\"^#3:string#\n"
")^#2:Expr.Call#"},
{"{\"a\": 1}.\"a\"", "",
"ERROR: <input>:1:10: Syntax error: no viable alternative at input "
"'.\"a\"'\n"
" | {\"a\": 1}.\"a\"\n"
" | .........^"},
{"\"\\xC3\\XBF\"", "\"ÿ\"^#1:string#"},
{"\"\\303\\277\"", "\"ÿ\"^#1:string#"},
{"\"hi\\u263A \\u263Athere\"", "\"hi☺ ☺there\"^#1:string#"},
{"\"\\U000003A8\\?\"", "\"Ψ?\"^#1:string#"},
{"\"\\a\\b\\f\\n\\r\\t\\v'\\\"\\\\\\? Legal escapes\"",
"\"\\x07\\x08\\x0c\\n\\r\\t\\x0b'\\\"\\\\? Legal escapes\"^#1:string#"},
{"\"\\xFh\"", "",
"ERROR: <input>:1:1: Syntax error: token recognition error at: '\"\\xFh'\n"
" | \"\\xFh\"\n"
" | ^\n"
"ERROR: <input>:1:6: Syntax error: token recognition error at: '\"'\n"
" | \"\\xFh\"\n"
" | .....^\n"
"ERROR: <input>:1:7: Syntax error: mismatched input '<EOF>' expecting "
"{'[', "
"'{', '(', '.', '-', '!', 'true', 'false', 'null', NUM_FLOAT, NUM_INT, "
"NUM_UINT, STRING, BYTES, IDENTIFIER}\n"
" | \"\\xFh\"\n"
" | ......^"},
{"\"\\a\\b\\f\\n\\r\\t\\v\\'\\\"\\\\\\? Illegal escape \\>\"", "",
"ERROR: <input>:1:1: Syntax error: token recognition error at: "
"'\"\\a\\b\\f\\n\\r\\t\\v\\'\\\"\\\\\\? Illegal escape \\>'\n"
" | \"\\a\\b\\f\\n\\r\\t\\v\\'\\\"\\\\\\? Illegal escape \\>\"\n"
" | ^\n"
"ERROR: <input>:1:42: Syntax error: token recognition error at: '\"'\n"
" | \"\\a\\b\\f\\n\\r\\t\\v\\'\\\"\\\\\\? Illegal escape \\>\"\n"
" | .........................................^\n"
"ERROR: <input>:1:43: Syntax error: mismatched input '<EOF>' expecting "
"{'[',"
" '{', '(', '.', '-', '!', 'true', 'false', 'null', NUM_FLOAT, NUM_INT, "
"NUM_UINT, STRING, BYTES, IDENTIFIER}\n"
" | \"\\a\\b\\f\\n\\r\\t\\v\\'\\\"\\\\\\? Illegal escape \\>\"\n"
" | ..........................................^"},
{"'😁' in ['😁', '😑', '😦']",
"@in(\n"
" \"😁\"^#1:string#,\n"
" [\n"
" \"😁\"^#4:string#,\n"
" \"😑\"^#5:string#,\n"
" \"😦\"^#6:string#\n"
" ]^#3:Expr.CreateList#\n"
")^#2:Expr.Call#"},
{"'\u00ff' in ['\u00ff', '\u00ff', '\u00ff']",
"@in(\n"
" \"\u00ff\"^#1:string#,\n"
" [\n"
" \"\u00ff\"^#4:string#,\n"
" \"\u00ff\"^#5:string#,\n"
" \"\u00ff\"^#6:string#\n"
" ]^#3:Expr.CreateList#\n"
")^#2:Expr.Call#"},
{"'\u00ff' in ['\uffff', '\U00100000', '\U0010ffff']",
"@in(\n"
" \"\u00ff\"^#1:string#,\n"
" [\n"
" \"\uffff\"^#4:string#,\n"
" \"\U00100000\"^#5:string#,\n"
" \"\U0010ffff\"^#6:string#\n"
" ]^#3:Expr.CreateList#\n"
")^#2:Expr.Call#"},
{"'\u00ff' in ['\U00100000', '\uffff', '\U0010ffff']",
"@in(\n"
" \"\u00ff\"^#1:string#,\n"
" [\n"
" \"\U00100000\"^#4:string#,\n"
" \"\uffff\"^#5:string#,\n"
" \"\U0010ffff\"^#6:string#\n"
" ]^#3:Expr.CreateList#\n"
")^#2:Expr.Call#"},
{"'😁' in ['😁', '😑', '😦']\n"
" && in.😁",
"",
"ERROR: <input>:2:7: Syntax error: extraneous input 'in' expecting {'[', "
"'{', '(', '.', '-', '!', 'true', 'false', 'null', NUM_FLOAT, NUM_INT, "
"NUM_UINT, STRING, BYTES, IDENTIFIER}\n"
" | && in.😁\n"
" | ......^\n"
"ERROR: <input>:2:10: Syntax error: token recognition error at: '😁'\n"
" | && in.😁\n"
" | .........^\n"
"ERROR: <input>:2:11: Syntax error: no viable alternative at input '.'\n"
" | && in.😁\n"
" | ..........^"},
{"as", "",
"ERROR: <input>:1:1: reserved identifier: as\n"
" | as\n"
" | ^"},
{"break", "",
"ERROR: <input>:1:1: reserved identifier: break\n"
" | break\n"
" | ^"},
{"const", "",
"ERROR: <input>:1:1: reserved identifier: const\n"
" | const\n"
" | ^"},
{"continue", "",
"ERROR: <input>:1:1: reserved identifier: continue\n"
" | continue\n"
" | ^"},
{"else", "",
"ERROR: <input>:1:1: reserved identifier: else\n"
" | else\n"
" | ^"},
{"for", "",
"ERROR: <input>:1:1: reserved identifier: for\n"
" | for\n"
" | ^"},
{"function", "",
"ERROR: <input>:1:1: reserved identifier: function\n"
" | function\n"
" | ^"},
{"if", "",
"ERROR: <input>:1:1: reserved identifier: if\n"
" | if\n"
" | ^"},
{"import", "",
"ERROR: <input>:1:1: reserved identifier: import\n"
" | import\n"
" | ^"},
{"in", "",
"ERROR: <input>:1:1: Syntax error: mismatched input 'in' expecting {'[', "
"'{', '(', '.', '-', '!', 'true', 'false', 'null', NUM_FLOAT, NUM_INT, "
"NUM_UINT, STRING, BYTES, IDENTIFIER}\n"
" | in\n"
" | ^\n"
"ERROR: <input>:1:3: Syntax error: mismatched input '<EOF>' expecting "
"{'[', "
"'{', '(', '.', '-', '!', 'true', 'false', 'null', NUM_FLOAT, NUM_INT, "
"NUM_UINT, STRING, BYTES, IDENTIFIER}\n"
" | in\n"
" | ..^"},
{"let", "",
"ERROR: <input>:1:1: reserved identifier: let\n"
" | let\n"
" | ^"},
{"loop", "",
"ERROR: <input>:1:1: reserved identifier: loop\n"
" | loop\n"
" | ^"},
{"package", "",
"ERROR: <input>:1:1: reserved identifier: package\n"
" | package\n"
" | ^"},
{"namespace", "",
"ERROR: <input>:1:1: reserved identifier: namespace\n"
" | namespace\n"
" | ^"},
{"return", "",
"ERROR: <input>:1:1: reserved identifier: return\n"
" | return\n"
" | ^"},
{"var", "",
"ERROR: <input>:1:1: reserved identifier: var\n"
" | var\n"
" | ^"},
{"void", "",
"ERROR: <input>:1:1: reserved identifier: void\n"
" | void\n"
" | ^"},
{"while", "",
"ERROR: <input>:1:1: reserved identifier: while\n"
" | while\n"
" | ^"},
{"[1, 2, 3].map(var, var * var)", "",
"ERROR: <input>:1:15: reserved identifier: var\n"
" | [1, 2, 3].map(var, var * var)\n"
" | ..............^\n"
"ERROR: <input>:1:15: map() variable name must be a simple identifier\n"
" | [1, 2, 3].map(var, var * var)\n"
" | ..............^\n"
"ERROR: <input>:1:20: reserved identifier: var\n"
" | [1, 2, 3].map(var, var * var)\n"
" | ...................^\n"
"ERROR: <input>:1:26: reserved identifier: var\n"
" | [1, 2, 3].map(var, var * var)\n"
" | .........................^"},
{"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["
"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["
"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["
"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[['too many']]]]]]]]]]]]]]]]]]]]]]]]]]]]"
"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]"
"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]"
"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]"
"]]]]]]",
"", "Expression recursion limit exceeded. limit: 32", "", "", "", false},
{
"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[['just fine'],[1],[2],[3],[4],[5]]]]]]]"
"]]]]]]]]]]]]]]]]]]]]]]]]",
"",
"",
"",
"",
"",
false,
},
{
"[\n\t\r[\n\t\r[\n\t\r]\n\t\r]\n\t\r",
"",
"ERROR: <input>:6:3: Syntax error: mismatched input '<EOF>' expecting "
"{']', ','}\n"
" | \r\n"
" | ..^",
},
{"x.filter(y, y.filter(z, z > 0))",
"__comprehension__(\n"
"
" y,\n"
"
" x^#1:Expr.Ident#,\n"
"
" __result__,\n"
"
" []^#19:Expr.CreateList#,\n"
"
" true^#20:bool#,\n"
"
" _?_:_(\n"
" __comprehension__(\n"
"
" z,\n"
"
" y^#4:Expr.Ident#,\n"
"
" __result__,\n"
"
" []^#10:Expr.CreateList#,\n"
"
" true^#11:bool#,\n"
"
" _?_:_(\n"
" _>_(\n"
" z^#7:Expr.Ident#,\n"
" 0^#9:int64#\n"
" )^#8:Expr.Call#,\n"
" _+_(\n"
" __result__^#12:Expr.Ident#,\n"
" [\n"
" z^#6:Expr.Ident#\n"
" ]^#13:Expr.CreateList#\n"
" )^#14:Expr.Call#,\n"
" __result__^#15:Expr.Ident#\n"
" )^#16:Expr.Call#,\n"
"
" __result__^#17:Expr.Ident#)^#18:Expr.Comprehension#,\n"
" _+_(\n"
" __result__^#21:Expr.Ident#,\n"
" [\n"
" y^#3:Expr.Ident#\n"
" ]^#22:Expr.CreateList#\n"
" )^#23:Expr.Call#,\n"
" __result__^#24:Expr.Ident#\n"
" )^#25:Expr.Call#,\n"
"
" __result__^#26:Expr.Ident#)^#27:Expr.Comprehension#"
"",
"", "", "",
"x^#1:Expr.Ident#.filter(\n"
" y^#3:Expr.Ident#,\n"
" ^#18:filter#\n"
")^#27:filter#,\n"
"y^#4:Expr.Ident#.filter(\n"
" z^#6:Expr.Ident#,\n"
" _>_(\n"
" z^#7:Expr.Ident#,\n"
" 0^#9:int64#\n"
" )^#8:Expr.Call#\n"
")^#18:filter"},
{"has(a.b).filter(c, c)",
"__comprehension__(\n"
"
" c,\n"
"
" a^#2:Expr.Ident#.b~test-only~^#4:Expr.Select#,\n"
"
" __result__,\n"
"
" []^#8:Expr.CreateList#,\n"
"
" true^#9:bool#,\n"
"
" _?_:_(\n"
" c^#7:Expr.Ident#,\n"
" _+_(\n"
" __result__^#10:Expr.Ident#,\n"
" [\n"
" c^#6:Expr.Ident#\n"
" ]^#11:Expr.CreateList#\n"
" )^#12:Expr.Call#,\n"
" __result__^#13:Expr.Ident#\n"
" )^#14:Expr.Call#,\n"
"
" __result__^#15:Expr.Ident#)^#16:Expr.Comprehension#",
"", "", "",
"^#4:has#.filter(\n"
" c^#6:Expr.Ident#,\n"
" c^#7:Expr.Ident#\n"
")^#16:filter#,\n"
"has(\n"
" a^#2:Expr.Ident#.b^#3:Expr.Select#\n"
")^#4:has"},
{"x.filter(y, y.exists(z, has(z.a)) && y.exists(z, has(z.b)))",
"__comprehension__(\n"
"
" y,\n"
"
" x^#1:Expr.Ident#,\n"
"
" __result__,\n"
"
" []^#35:Expr.CreateList#,\n"
"
" true^#36:bool#,\n"
"
" _?_:_(\n"
" _&&_(\n"
" __comprehension__(\n"
"
" z,\n"
"
" y^#4:Expr.Ident#,\n"
"
" __result__,\n"
"
" false^#11:bool#,\n"
"
" @not_strictly_false(\n"
" !_(\n"
" __result__^#12:Expr.Ident#\n"
" )^#13:Expr.Call#\n"
" )^#14:Expr.Call#,\n"
"
" _||_(\n"
" __result__^#15:Expr.Ident#,\n"
" z^#8:Expr.Ident#.a~test-only~^#10:Expr.Select#\n"
" )^#16:Expr.Call#,\n"
"
" __result__^#17:Expr.Ident#)^#18:Expr.Comprehension#,\n"
" __comprehension__(\n"
"
" z,\n"
"
" y^#19:Expr.Ident#,\n"
"
" __result__,\n"
"
" false^#26:bool#,\n"
"
" @not_strictly_false(\n"
" !_(\n"
" __result__^#27:Expr.Ident#\n"
" )^#28:Expr.Call#\n"
" )^#29:Expr.Call#,\n"
"
" _||_(\n"
" __result__^#30:Expr.Ident#,\n"
" z^#23:Expr.Ident#.b~test-only~^#25:Expr.Select#\n"
" )^#31:Expr.Call#,\n"
"
" __result__^#32:Expr.Ident#)^#33:Expr.Comprehension#\n"
" )^#34:Expr.Call#,\n"
" _+_(\n"
" __result__^#37:Expr.Ident#,\n"
" [\n"
" y^#3:Expr.Ident#\n"
" ]^#38:Expr.CreateList#\n"
" )^#39:Expr.Call#,\n"
" __result__^#40:Expr.Ident#\n"
" )^#41:Expr.Call#,\n"
"
" __result__^#42:Expr.Ident#)^#43:Expr.Comprehension#",
"", "", "",
"x^#1:Expr.Ident#.filter(\n"
" y^#3:Expr.Ident#,\n"
" _&&_(\n"
" ^#18:exists#,\n"
" ^#33:exists#\n"
" )^#34:Expr.Call#\n"
")^#43:filter#,\n"
"y^#19:Expr.Ident#.exists(\n"
" z^#21:Expr.Ident#,\n"
" ^#25:has#\n"
")^#33:exists#,\n"
"has(\n"
" z^#23:Expr.Ident#.b^#24:Expr.Select#\n"
")^#25:has#,\n"
"y^#4:Expr.Ident#."
"exists(\n"
" z^#6:Expr.Ident#,\n"
" ^#10:has#\n"
")^#18:exists#,\n"
"has(\n"
" z^#8:Expr.Ident#.a^#9:Expr.Select#\n"
")^#10:has"},
{"has(a.b).asList().exists(c, c)",
"__comprehension__(\n"
"
" c,\n"
"
" a^#2:Expr.Ident#.b~test-only~^#4:Expr.Select#.asList()^#5:Expr.Call#,\n"
"
" __result__,\n"
"
" false^#9:bool#,\n"
"
" @not_strictly_false(\n"
" !_(\n"
" __result__^#10:Expr.Ident#\n"
" )^#11:Expr.Call#\n"
" )^#12:Expr.Call#,\n"
"
" _||_(\n"
" __result__^#13:Expr.Ident#,\n"
" c^#8:Expr.Ident#\n"
" )^#14:Expr.Call#,\n"
"
" __result__^#15:Expr.Ident#)^#16:Expr.Comprehension#",
"", "", "",
"^#4:has#.asList()^#5:Expr.Call#.exists(\n"
" c^#7:Expr.Ident#,\n"
" c^#8:Expr.Ident#\n"
")^#16:exists#,\n"
"has(\n"
" a^#2:Expr.Ident#.b^#3:Expr.Select#\n"
")^#4:has"},
{"[has(a.b), has(c.d)].exists(e, e)",
"__comprehension__(\n"
"
" e,\n"
"
" [\n"
" a^#3:Expr.Ident#.b~test-only~^#5:Expr.Select#,\n"
" c^#7:Expr.Ident#.d~test-only~^#9:Expr.Select#\n"
" ]^#1:Expr.CreateList#,\n"
"
" __result__,\n"
"
" false^#13:bool#,\n"
"
" @not_strictly_false(\n"
" !_(\n"
" __result__^#14:Expr.Ident#\n"
" )^#15:Expr.Call#\n"
" )^#16:Expr.Call#,\n"
"
" _||_(\n"
" __result__^#17:Expr.Ident#,\n"
" e^#12:Expr.Ident#\n"
" )^#18:Expr.Call#,\n"
"
" __result__^#19:Expr.Ident#)^#20:Expr.Comprehension#",
"", "", "",
"[\n"
" ^#5:has#,\n"
" ^#9:has#\n"
"]^#1:Expr.CreateList#.exists(\n"
" e^#11:Expr.Ident#,\n"
" e^#12:Expr.Ident#\n"
")^#20:exists#,\n"
"has(\n"
" c^#7:Expr.Ident#.d^#8:Expr.Select#\n"
")^#9:has#,\n"
"has(\n"
" a^#3:Expr.Ident#.b^#4:Expr.Select#\n"
")^#5:has"},
{"b'\\UFFFFFFFF'", "",
"ERROR: <input>:1:1: Invalid bytes literal: Illegal escape sequence: "
"Unicode escape sequence \\U cannot be used in bytes literals\n | "
"b'\\UFFFFFFFF'\n | ^"},
{"a.?b[?0] && a[?c]",
"_&&_(\n _[?_](\n _?._(\n a^#1:Expr.Ident#,\n "
"\"b\"^#3:string#\n )^#2:Expr.Call#,\n 0^#5:int64#\n "
")^#4:Expr.Call#,\n _[?_](\n a^#6:Expr.Ident#,\n "
"c^#8:Expr.Ident#\n )^#7:Expr.Call#\n)^#9:Expr.Call#"},
{"{?'key': value}",
"{\n "
"?\"key\"^#3:string#:value^#4:Expr.Ident#^#2:Expr.CreateStruct.Entry#\n}^#"
"1:Expr.CreateStruct#"},
{"[?a, ?b]",
"[\n ?a^#2:Expr.Ident#,\n ?b^#3:Expr.Ident#\n]^#1:Expr.CreateList#"},
{"[?a[?b]]",
"[\n ?_[?_](\n a^#2:Expr.Ident#,\n b^#4:Expr.Ident#\n "
")^#3:Expr.Call#\n]^#1:Expr.CreateList#"},
{"Msg{?field: value}",
"Msg{\n "
"?field:value^#3:Expr.Ident#^#2:Expr.CreateStruct.Entry#\n}^#1:Expr."
"CreateStruct#"},
{"m.optMap(v, f)",
"_?_:_(\n m^#1:Expr.Ident#.hasValue()^#6:Expr.Call#,\n optional.of(\n "
" __comprehension__(\n
"Target\n []^#7:Expr.CreateList#,\n
"
"LoopCondition\n false^#9:bool#,\n
"v^#3:Expr.Ident#,\n
"f^#4:Expr.Ident#)^#10:Expr.Comprehension#\n )^#11:Expr.Call#,\n "
"optional.none()^#12:Expr.Call#\n)^#13:Expr.Call#"},
{"m.optFlatMap(v, f)",
"_?_:_(\n m^#1:Expr.Ident#.hasValue()^#6:Expr.Call#,\n "
"__comprehension__(\n
"[]^#7:Expr.CreateList#,\n
"m^#5:Expr.Ident#.value()^#8:Expr.Call#,\n
"false^#9:bool#,\n
" f^#4:Expr.Ident#)^#10:Expr.Comprehension#,\n "
"optional.none()^#11:Expr.Call#\n)^#12:Expr.Call#"}};
class KindAndIdAdorner : public testutil::ExpressionAdorner {
public:
explicit KindAndIdAdorner(
const google::api::expr::v1alpha1::SourceInfo& source_info =
google::api::expr::v1alpha1::SourceInfo::default_instance())
: source_info_(source_info) {}
std::string adorn(const Expr& e) const override {
if (source_info_.macro_calls_size() != 0 &&
source_info_.macro_calls().contains(e.id())) {
return absl::StrFormat(
"^#%d:%s#", e.id(),
source_info_.macro_calls().at(e.id()).call_expr().function());
}
if (e.has_const_expr()) {
auto& const_expr = e.const_expr();
auto reflection = const_expr.GetReflection();
auto oneof = const_expr.GetDescriptor()->FindOneofByName("constant_kind");
auto field_desc = reflection->GetOneofFieldDescriptor(const_expr, oneof);
auto enum_desc = field_desc->enum_type();
if (enum_desc) {
return absl::StrFormat("^#%d:%s#", e.id(), nameChain(enum_desc));
} else {
return absl::StrFormat("^#%d:%s#", e.id(), field_desc->type_name());
}
} else {
auto reflection = e.GetReflection();
auto oneof = e.GetDescriptor()->FindOneofByName("expr_kind");
auto desc = reflection->GetOneofFieldDescriptor(e, oneof)->message_type();
return absl::StrFormat("^#%d:%s#", e.id(), nameChain(desc));
}
}
std::string adorn(const Expr::CreateStruct::Entry& e) const override {
return absl::StrFormat("^#%d:Expr.CreateStruct.Entry#", e.id());
}
private:
template <class T>
std::string nameChain(const T* descriptor) const {
std::list<std::string> name_chain{descriptor->name()};
const google::protobuf::Descriptor* desc = descriptor->containing_type();
while (desc) {
name_chain.push_front(desc->name());
desc = desc->containing_type();
}
return absl::StrJoin(name_chain, ".");
}
const google::api::expr::v1alpha1::SourceInfo& source_info_;
};
class LocationAdorner : public testutil::ExpressionAdorner {
public:
explicit LocationAdorner(const google::api::expr::v1alpha1::SourceInfo& source_info)
: source_info_(source_info) {}
absl::optional<std::pair<int32_t, int32_t>> getLocation(int64_t id) const {
absl::optional<std::pair<int32_t, int32_t>> location;
const auto& positions = source_info_.positions();
if (positions.find(id) == positions.end()) {
return location;
}
int32_t pos = positions.at(id);
int32_t line = 1;
for (int i = 0; i < source_info_.line_offsets_size(); ++i) {
if (source_info_.line_offsets(i) > pos) {
break;
} else {
line += 1;
}
}
int32_t col = pos;
if (line > 1) {
col = pos - source_info_.line_offsets(line - 2);
}
return std::make_pair(line, col);
}
std::string adorn(const Expr& e) const override {
auto loc = getLocation(e.id());
if (loc) {
return absl::StrFormat("^#%d[%d,%d]#", e.id(), loc->first, loc->second);
} else {
return absl::StrFormat("^#%d[NO_POS]#", e.id());
}
}
std::string adorn(const Expr::CreateStruct::Entry& e) const override {
auto loc = getLocation(e.id());
if (loc) {
return absl::StrFormat("^#%d[%d,%d]#", e.id(), loc->first, loc->second);
} else {
return absl::StrFormat("^#%d[NO_POS]#", e.id());
}
}
private:
template <class T>
std::string nameChain(const T* descriptor) const {
std::list<std::string> name_chain{descriptor->name()};
const google::protobuf::Descriptor* desc = descriptor->containing_type();
while (desc) {
name_chain.push_front(desc->name());
desc = desc->containing_type();
}
return absl::StrJoin(name_chain, ".");
}
private:
const google::api::expr::v1alpha1::SourceInfo& source_info_;
};
std::string ConvertEnrichedSourceInfoToString(
const EnrichedSourceInfo& enriched_source_info) {
std::vector<std::string> offsets;
for (const auto& offset : enriched_source_info.offsets()) {
offsets.push_back(absl::StrFormat(
"[%d,%d,%d]", offset.first, offset.second.first, offset.second.second));
}
return absl::StrJoin(offsets, "^#");
}
std::string ConvertMacroCallsToString(
const google::api::expr::v1alpha1::SourceInfo& source_info) {
KindAndIdAdorner macro_calls_adorner(source_info);
testutil::ExprPrinter w(macro_calls_adorner);
std::vector<std::pair<int64_t, google::api::expr::v1alpha1::Expr>> macro_calls;
for (auto pair : source_info.macro_calls()) {
pair.second.set_id(pair.first);
macro_calls.push_back(pair);
}
absl::c_sort(macro_calls,
[](const std::pair<int64_t, google::api::expr::v1alpha1::Expr>& p1,
const std::pair<int64_t, google::api::expr::v1alpha1::Expr>& p2) {
return p1.first > p2.first;
});
std::string result = "";
for (const auto& pair : macro_calls) {
result += w.print(pair.second) += ",\n";
}
return result.substr(0, result.size() - 3);
}
class ExpressionTest : public testing::TestWithParam<TestInfo> {};
TEST_P(ExpressionTest, Parse) {
const TestInfo& test_info = GetParam();
ParserOptions options;
if (!test_info.M.empty()) {
options.add_macro_calls = true;
}
options.enable_optional_syntax = true;
std::vector<Macro> macros = Macro::AllMacros();
macros.push_back(cel::OptMapMacro());
macros.push_back(cel::OptFlatMapMacro());
auto result = EnrichedParse(test_info.I, macros, "<input>", options);
if (test_info.E.empty()) {
EXPECT_THAT(result, IsOk());
} else {
EXPECT_THAT(result, Not(IsOk()));
EXPECT_EQ(test_info.E, result.status().message());
}
if (!test_info.P.empty()) {
KindAndIdAdorner kind_and_id_adorner;
testutil::ExprPrinter w(kind_and_id_adorner);
std::string adorned_string = w.print(result->parsed_expr().expr());
EXPECT_EQ(test_info.P, adorned_string) << result->parsed_expr();
}
if (!test_info.L.empty()) {
LocationAdorner location_adorner(result->parsed_expr().source_info());
testutil::ExprPrinter w(location_adorner);
std::string adorned_string = w.print(result->parsed_expr().expr());
EXPECT_EQ(test_info.L, adorned_string) << result->parsed_expr();
;
}
if (!test_info.R.empty()) {
EXPECT_EQ(test_info.R, ConvertEnrichedSourceInfoToString(
result->enriched_source_info()));
}
if (!test_info.M.empty()) {
EXPECT_EQ(test_info.M, ConvertMacroCallsToString(
result.value().parsed_expr().source_info()))
<< result->parsed_expr();
;
}
}
TEST(ExpressionTest, TsanOom) {
Parse(
"[[a([[???[a[[??[a([[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["
"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["
"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["
"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["
"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["
"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["
"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["
"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["
"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["
"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["
"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["
"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["
"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["
"[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[???["
"a([[????")
.IgnoreError();
}
TEST(ExpressionTest, ErrorRecoveryLimits) {
ParserOptions options;
options.error_recovery_limit = 1;
auto result = Parse("......", "", options);
EXPECT_THAT(result, Not(IsOk()));
EXPECT_EQ(result.status().message(),
"ERROR: :1:1: Syntax error: More than 1 parse errors.\n | ......\n "
"| ^\nERROR: :1:2: Syntax error: no viable alternative at input "
"'..'\n | ......\n | .^");
}
TEST(ExpressionTest, ExpressionSizeLimit) {
ParserOptions options;
options.expression_size_codepoint_limit = 10;
auto result = Parse("...............", "", options);
EXPECT_THAT(result, Not(IsOk()));
EXPECT_EQ(
result.status().message(),
"expression size exceeds codepoint limit. input size: 15, limit: 10");
}
TEST(ExpressionTest, RecursionDepthLongArgList) {
ParserOptions options;
options.max_recursion_depth = 16;
EXPECT_THAT(Parse("[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]", "", options), IsOk());
}
TEST(ExpressionTest, RecursionDepthExceeded) {
ParserOptions options;
options.max_recursion_depth = 6;
auto result = Parse("1 + 2 + 3 + 4 + 5 + 6 + 7", "", options);
EXPECT_THAT(result, Not(IsOk()));
EXPECT_THAT(result.status().message(),
HasSubstr("Exceeded max recursion depth of 6 when parsing."));
}
TEST(ExpressionTest, RecursionDepthIgnoresParentheses) {
ParserOptions options;
options.max_recursion_depth = 6;
auto result = Parse("(((1 + 2 + 3 + 4 + (5 + 6))))", "", options);
EXPECT_THAT(result, IsOk());
}
std::string TestName(const testing::TestParamInfo<TestInfo>& test_info) {
std::string name = absl::StrCat(test_info.index, "-", test_info.param.I);
absl::c_replace_if(name, [](char c) { return !absl::ascii_isalnum(c); }, '_');
return name;
return name;
}
INSTANTIATE_TEST_SUITE_P(CelParserTest, ExpressionTest,
testing::ValuesIn(test_cases), TestName);
void BM_Parse(benchmark::State& state) {
std::vector<Macro> macros = Macro::AllMacros();
for (auto s : state) {
for (const auto& test_case : test_cases) {
if (test_case.benchmark) {
benchmark::DoNotOptimize(ParseWithMacros(test_case.I, macros));
}
}
}
}
BENCHMARK(BM_Parse)->ThreadRange(1, std::thread::hardware_concurrency());
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/parser/parser.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/parser/parser_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
7091306e-56ad-4352-8992-08f37b677804 | cpp | google/quiche | ip_range | quiche/quic/qbone/platform/ip_range.cc | quiche/quic/qbone/platform/ip_range_test.cc | #include "quiche/quic/qbone/platform/ip_range.h"
#include <string>
#include "quiche/common/quiche_endian.h"
namespace quic {
namespace {
constexpr size_t kIPv4Size = 32;
constexpr size_t kIPv6Size = 128;
QuicIpAddress TruncateToLength(const QuicIpAddress& input,
size_t* prefix_length) {
QuicIpAddress output;
if (input.IsIPv4()) {
if (*prefix_length > kIPv4Size) {
*prefix_length = kIPv4Size;
return input;
}
uint32_t raw_address =
*reinterpret_cast<const uint32_t*>(input.ToPackedString().data());
raw_address = quiche::QuicheEndian::NetToHost32(raw_address);
raw_address &= ~0U << (kIPv4Size - *prefix_length);
raw_address = quiche::QuicheEndian::HostToNet32(raw_address);
output.FromPackedString(reinterpret_cast<const char*>(&raw_address),
sizeof(raw_address));
return output;
}
if (input.IsIPv6()) {
if (*prefix_length > kIPv6Size) {
*prefix_length = kIPv6Size;
return input;
}
uint64_t raw_address[2];
memcpy(raw_address, input.ToPackedString().data(), sizeof(raw_address));
raw_address[0] = quiche::QuicheEndian::NetToHost64(raw_address[0]);
raw_address[1] = quiche::QuicheEndian::NetToHost64(raw_address[1]);
if (*prefix_length <= kIPv6Size / 2) {
raw_address[0] &= ~uint64_t{0} << (kIPv6Size / 2 - *prefix_length);
raw_address[1] = 0;
} else {
raw_address[1] &= ~uint64_t{0} << (kIPv6Size - *prefix_length);
}
raw_address[0] = quiche::QuicheEndian::HostToNet64(raw_address[0]);
raw_address[1] = quiche::QuicheEndian::HostToNet64(raw_address[1]);
output.FromPackedString(reinterpret_cast<const char*>(raw_address),
sizeof(raw_address));
return output;
}
return output;
}
}
IpRange::IpRange(const QuicIpAddress& prefix, size_t prefix_length)
: prefix_(prefix), prefix_length_(prefix_length) {
prefix_ = TruncateToLength(prefix_, &prefix_length_);
}
bool IpRange::operator==(IpRange other) const {
return prefix_ == other.prefix_ && prefix_length_ == other.prefix_length_;
}
bool IpRange::operator!=(IpRange other) const { return !(*this == other); }
bool IpRange::FromString(const std::string& range) {
size_t slash_pos = range.find('/');
if (slash_pos == std::string::npos) {
return false;
}
QuicIpAddress prefix;
bool success = prefix.FromString(range.substr(0, slash_pos));
if (!success) {
return false;
}
uint64_t num_processed = 0;
size_t prefix_length = std::stoi(range.substr(slash_pos + 1), &num_processed);
if (num_processed + 1 + slash_pos != range.length()) {
return false;
}
prefix_ = TruncateToLength(prefix, &prefix_length);
prefix_length_ = prefix_length;
return true;
}
QuicIpAddress IpRange::FirstAddressInRange() const { return prefix(); }
} | #include "quiche/quic/qbone/platform/ip_range.h"
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace {
TEST(IpRangeTest, TruncateWorksIPv4) {
QuicIpAddress before_truncate;
before_truncate.FromString("255.255.255.255");
EXPECT_EQ("128.0.0.0/1", IpRange(before_truncate, 1).ToString());
EXPECT_EQ("192.0.0.0/2", IpRange(before_truncate, 2).ToString());
EXPECT_EQ("255.224.0.0/11", IpRange(before_truncate, 11).ToString());
EXPECT_EQ("255.255.255.224/27", IpRange(before_truncate, 27).ToString());
EXPECT_EQ("255.255.255.254/31", IpRange(before_truncate, 31).ToString());
EXPECT_EQ("255.255.255.255/32", IpRange(before_truncate, 32).ToString());
EXPECT_EQ("255.255.255.255/32", IpRange(before_truncate, 33).ToString());
}
TEST(IpRangeTest, TruncateWorksIPv6) {
QuicIpAddress before_truncate;
before_truncate.FromString("ffff:ffff:ffff:ffff:f903::5");
EXPECT_EQ("fe00::/7", IpRange(before_truncate, 7).ToString());
EXPECT_EQ("ffff:ffff:ffff::/48", IpRange(before_truncate, 48).ToString());
EXPECT_EQ("ffff:ffff:ffff:ffff::/64",
IpRange(before_truncate, 64).ToString());
EXPECT_EQ("ffff:ffff:ffff:ffff:8000::/65",
IpRange(before_truncate, 65).ToString());
EXPECT_EQ("ffff:ffff:ffff:ffff:f903::4/127",
IpRange(before_truncate, 127).ToString());
}
TEST(IpRangeTest, FromStringWorksIPv4) {
IpRange range;
ASSERT_TRUE(range.FromString("127.0.3.249/26"));
EXPECT_EQ("127.0.3.192/26", range.ToString());
}
TEST(IpRangeTest, FromStringWorksIPv6) {
IpRange range;
ASSERT_TRUE(range.FromString("ff01:8f21:77f9::/33"));
EXPECT_EQ("ff01:8f21::/33", range.ToString());
}
TEST(IpRangeTest, FirstAddressWorksIPv6) {
IpRange range;
ASSERT_TRUE(range.FromString("ffff:ffff::/64"));
QuicIpAddress first_address = range.FirstAddressInRange();
EXPECT_EQ("ffff:ffff::", first_address.ToString());
}
TEST(IpRangeTest, FirstAddressWorksIPv4) {
IpRange range;
ASSERT_TRUE(range.FromString("10.0.0.0/24"));
QuicIpAddress first_address = range.FirstAddressInRange();
EXPECT_EQ("10.0.0.0", first_address.ToString());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/platform/ip_range.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/platform/ip_range_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
edecf673-cc94-4a5f-b4eb-dbb5af005541 | cpp | tensorflow/tensorflow | async_buffers | tensorflow/lite/delegates/gpu/async_buffers.cc | tensorflow/lite/delegates/gpu/async_buffers_test.cc | #include "tensorflow/lite/delegates/gpu/async_buffers.h"
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include <GLES2/gl2ext.h>
#include <GLES3/gl31.h>
#include "absl/status/status.h"
#include "tensorflow/lite/delegates/gpu/android_hardware_buffer.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_errors.h"
namespace {
PFNGLBUFFERSTORAGEEXTERNALEXTPROC glBufferStorageExternalEXT;
PFNEGLGETNATIVECLIENTBUFFERANDROIDPROC eglGetNativeClientBufferANDROID;
bool IsGlSupported() {
static const bool extensions_allowed = [] {
eglGetNativeClientBufferANDROID =
reinterpret_cast<PFNEGLGETNATIVECLIENTBUFFERANDROIDPROC>(
eglGetProcAddress("eglGetNativeClientBufferANDROID"));
glBufferStorageExternalEXT =
reinterpret_cast<PFNGLBUFFERSTORAGEEXTERNALEXTPROC>(
eglGetProcAddress("glBufferStorageExternalEXT"));
return eglGetNativeClientBufferANDROID && glBufferStorageExternalEXT;
}();
return extensions_allowed;
}
}
namespace tflite {
namespace gpu {
absl::Status AsyncBuffer::MapAHardwareBufferToGlBuffer() {
if (!IsGlSupported()) {
return absl::UnknownError(
"No GL extension functions found to bind AHardwareBuffer and "
"OpenGL buffer");
}
EGLClientBuffer native_buffer = eglGetNativeClientBufferANDROID(ahwb_);
if (!native_buffer) {
return absl::UnknownError("Can't get native buffer");
}
glBufferStorageExternalEXT(GL_SHADER_STORAGE_BUFFER, 0, bytes_, native_buffer,
GL_MAP_READ_BIT | GL_MAP_WRITE_BIT |
GL_MAP_COHERENT_BIT_EXT |
GL_MAP_PERSISTENT_BIT_EXT);
return gl::GetOpenGlErrors();
}
absl::Status AsyncBuffer::AllocateOpenGlBuffer() {
if (opengl_buffer_ == GL_INVALID_INDEX) {
glGenBuffers(1, &opengl_buffer_);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, opengl_buffer_);
absl::Status status = MapAHardwareBufferToGlBuffer();
if (!status.ok()) {
if (ahwb_ != nullptr) {
if (OptionalAndroidHardwareBuffer::Instance().Supported()) {
OptionalAndroidHardwareBuffer::Instance().Release(ahwb_);
}
ahwb_ = nullptr;
}
glBufferData(GL_SHADER_STORAGE_BUFFER, bytes_, nullptr, GL_STREAM_COPY);
}
glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
}
return absl::OkStatus();
}
absl::Status AsyncBuffer::GetOpenGlBuffer(GLuint& buffer_ref) {
if (!valid_) {
absl::Status status = AllocateOpenGlBuffer();
if (!status.ok()) {
return status;
}
}
valid_ = true;
buffer_ref = opengl_buffer_;
return absl::OkStatus();
}
}
} | #include "tensorflow/lite/delegates/gpu/async_buffers.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/android_hardware_buffer.h"
#include "tensorflow/lite/delegates/gpu/api.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/gl/egl_environment.h"
namespace tflite {
namespace gpu {
namespace {
TEST(AsyncBufferTest, DuplicateTest) {
if (__builtin_available(android 26, *)) {
auto Instance = OptionalAndroidHardwareBuffer::Instance;
TensorObjectDef* tie = new TensorObjectDef();
tie->object_def.data_type = DataType::FLOAT32;
tie->object_def.data_layout = DataLayout::BHWC;
tie->dimensions = Dimensions(2, 2, 2, 2);
AHardwareBuffer_Desc buffDesc = {};
buffDesc.width = 1000;
buffDesc.height = 1;
buffDesc.layers = 1;
buffDesc.format = AHARDWAREBUFFER_FORMAT_BLOB;
buffDesc.usage = AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN |
AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN |
AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER;
AHardwareBuffer* ahwb;
EXPECT_TRUE(Instance().IsSupported(&buffDesc));
EXPECT_EQ(Instance().Allocate(&buffDesc, &ahwb), 0);
std::unique_ptr<gl::EglEnvironment> env;
EXPECT_OK(gl::EglEnvironment::NewEglEnvironment(&env));
AsyncBuffer async_buffer1 = AsyncBuffer(*tie, ahwb);
GLuint buffer1, buffer2;
EXPECT_OK(async_buffer1.GetOpenGlBuffer(buffer1));
EXPECT_GE(buffer1, 0);
EXPECT_OK(async_buffer1.GetOpenGlBuffer(buffer2));
EXPECT_EQ(buffer1, buffer2);
AsyncBuffer async_buffer2 = AsyncBuffer(*tie, ahwb);
EXPECT_OK(async_buffer2.GetOpenGlBuffer(buffer2));
EXPECT_NE(buffer1, buffer2);
} else {
GTEST_SKIP();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/async_buffers.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/async_buffers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b24371fc-a9b3-45b6-b90e-9778a16abea8 | cpp | tensorflow/tensorflow | hlo_constant_splitter | third_party/xla/xla/hlo/transforms/hlo_constant_splitter.cc | third_party/xla/xla/hlo/transforms/hlo_constant_splitter_test.cc | #include "xla/hlo/transforms/hlo_constant_splitter.h"
#include <iterator>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsSupportedConstant(const HloInstruction* instruction,
bool split_expressions) {
return instruction->opcode() == HloOpcode::kConstant ||
(split_expressions && instruction->opcode() == HloOpcode::kIota);
}
bool IsSupportedConstantExpression(const HloInstruction* instruction) {
if (instruction->HasSideEffect()) {
return false;
}
if (instruction->IsElementwise()) {
return true;
}
switch (instruction->opcode()) {
case HloOpcode::kBroadcast:
case HloOpcode::kSlice:
return true;
default:
return false;
}
}
absl::StatusOr<bool> DuplicateConstantExpressionPerUser(
HloComputation* computation, HloInstruction* to_clone,
HloInstruction* user) {
absl::InlinedVector<std::pair<const HloInstruction*, int>, 8> worklist(
1, std::make_pair(to_clone, 0));
absl::InlinedVector<const HloInstruction*, 8> to_clone_vec;
absl::flat_hash_set<const HloInstruction*> visited;
bool changed = false;
VLOG(10) << "Duplicating: " << to_clone->ToString() << " for user "
<< user->ToString();
while (!worklist.empty()) {
auto& [to_clone_i, index] = worklist.back();
if (index >= to_clone_i->operand_count()) {
to_clone_vec.push_back(to_clone_i);
worklist.pop_back();
continue;
}
int64_t prev_idx = index++;
if (visited.insert(to_clone_i->operands()[prev_idx]).second) {
VLOG(10) << "Adding operand to worklist: "
<< to_clone_i->operands()[prev_idx]->ToString();
worklist.push_back(std::make_pair(to_clone_i->operands()[prev_idx], 0));
}
}
absl::flat_hash_map<const HloInstruction*, HloInstruction*>
cloned_instructions_map;
for (auto* i : to_clone_vec) {
absl::InlinedVector<HloInstruction*, 4> new_operand_vector;
for (auto* op : i->operands()) {
auto it = cloned_instructions_map.find(op);
CHECK(it != cloned_instructions_map.end())
<< "Expected already cloned instruction for operand: "
<< op->ToString() << " Instruction to clone: " << i->ToString();
new_operand_vector.push_back(it->second);
}
HloInstruction* cloned_instr = computation->AddInstruction(
i->CloneWithNewOperands(i->shape(), new_operand_vector));
cloned_instructions_map[i] = cloned_instr;
if (i == to_clone) {
TF_RETURN_IF_ERROR(to_clone->ReplaceUseWith(user, cloned_instr));
changed = true;
}
}
return changed;
}
}
absl::StatusOr<bool> HloConstantSplitter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation : module->computations(execution_threads)) {
absl::flat_hash_set<HloInstruction*> constants_set;
std::vector<HloInstruction*> constants_list;
std::vector<HloInstruction*> worklist;
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
VLOG(10) << "Considering: " << instruction->ToString();
if (IsSupportedConstant(instruction, split_expressions_) &&
extra_constraints_(instruction)) {
VLOG(10) << "Adding to constant list: " << instruction->ToString();
constants_set.insert(instruction);
constants_list.push_back(instruction);
}
}
int64_t previous_total_constants = 0;
while (constants_list.size() != previous_total_constants) {
VLOG(10) << "Previous total: " << previous_total_constants
<< " current constants: " << constants_list.size();
previous_total_constants = constants_list.size();
worklist.clear();
worklist.insert(worklist.end(), constants_list.begin(),
constants_list.end());
while (!worklist.empty()) {
auto* i = worklist.back();
worklist.pop_back();
bool is_constant = true;
for (auto* ops : i->operands()) {
if (!constants_set.contains(ops)) {
is_constant = false;
break;
}
}
if (is_constant) {
if (constants_set.insert(i).second) {
constants_list.push_back(i);
}
if (split_expressions_) {
for (auto* u : i->users()) {
if (IsSupportedConstantExpression(u) &&
!constants_set.contains(u)) {
worklist.push_back(u);
}
}
}
}
}
}
if (VLOG_IS_ON(5)) {
VLOG(5) << "For computation: " << computation->ToString();
for (HloInstruction* instruction : constants_list) {
VLOG(5) << "Is a constant: " << instruction->ToString();
}
}
for (HloInstruction* instruction : constants_list) {
if (IsSupportedConstant(instruction, split_expressions_) &&
instruction->user_count() <= 1) {
continue;
}
absl::InlinedVector<HloInstruction*, 8> users;
users.reserve(instruction->user_count());
for (HloInstruction* user : instruction->users()) {
if (instruction->opcode() == HloOpcode::kConstant ||
!constants_set.contains(user)) {
users.push_back(user);
}
}
for (auto* u : users) {
TF_ASSIGN_OR_RETURN(bool duplicated, DuplicateConstantExpressionPerUser(
computation, instruction, u));
changed |= duplicated;
}
}
}
return changed;
}
} | #include "xla/hlo/transforms/hlo_constant_splitter.h"
#include <cstdint>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using HloConstantSplitterTest = HloTestBase;
TEST_F(HloConstantSplitterTest, SplitConstants) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
param = (f32[], f32[]) parameter(0),
sharding={{maximal device=0}, {maximal device=0}}
gte0 = f32[] get-tuple-element(param), index=0
gte1 = f32[] get-tuple-element(param), index=1
constant = f32[] constant(94.1934)
add1 = f32[] add(constant, gte0)
add2 = f32[] add(constant, gte1)
ROOT root = (f32[], f32[], f32[]) tuple(constant, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
TF_ASSERT_OK(HloConstantSplitter().Run(module.get()).status());
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kConstant) {
EXPECT_LE(instruction->user_count(), 1);
}
}
}
}
TEST_F(HloConstantSplitterTest, OnlySplitConstantsAllowedBySeedConstraints) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
param = (f32[], f32[]) parameter(0),
sharding={{maximal device=0}, {maximal device=0}}
gte0 = f32[] get-tuple-element(param), index=0
gte1 = f32[] get-tuple-element(param), index=1
constant1 = f32[] constant(1)
add0 = f32[] add(constant1, gte0)
add1 = f32[] add(constant1, add0)
constant2 = f32[] constant(2)
add2 = f32[] multiply(constant2, gte1)
ROOT root = (f32[], f32[], f32[]) tuple(constant2, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
TF_ASSERT_OK(HloConstantSplitter( false,
[](const HloInstruction* instruction) {
return instruction->name() != "constant1";
})
.Run(module.get())
.status());
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kConstant &&
instruction->name() != "constant1") {
EXPECT_LE(instruction->user_count(), 1);
}
}
}
const HloInstruction* constant1 = FindInstruction(module.get(), "constant1");
ASSERT_NE(constant1, nullptr);
EXPECT_EQ(constant1->user_count(), 2);
}
TEST_F(HloConstantSplitterTest, PreservingConstantsWithZeroUsers) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
param = (f32[], f32[]) parameter(0),
sharding={{maximal device=0}, {maximal device=0}}
gte0 = f32[] get-tuple-element(param), index=0
gte1 = f32[] get-tuple-element(param), index=1
constant1 = f32[] constant(94.1934)
constant2 = f32[] constant(9.1934)
ROOT root = (f32[], f32[]) tuple(gte0, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
HloConstantSplitter pass = HloConstantSplitter();
const auto status_or = HloTestBase::RunHloPass(&pass, module.get());
TF_ASSERT_OK(status_or.status());
EXPECT_FALSE(status_or.value());
}
TEST_F(HloConstantSplitterTest, SplittingExpressionsWithBroadcast) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
gte0 = f32[1024] parameter(0)
gte1 = f32[1024] parameter(1)
constant1 = f32[1024] iota(), iota_dimension=0
constant2 = f32[] constant(9.1934)
constant3 = f32[] constant(0.0)
constant4 = f32[] constant(1.0)
b = f32[1024] broadcast(constant2), dimensions={}
b2 = f32[1024] broadcast(constant3), dimensions={}
b3 = f32[1024] broadcast(constant4), dimensions={}
cmp = pred[1024] compare(constant1, b), direction=LT
s = f32[1024] select(cmp, b2, b3)
a1 = f32[1024] add(s, gte0)
a2 = f32[1024] add(s, gte1)
ROOT root = (f32[1024], f32[1024]) tuple(a1, a2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
HloConstantSplitter pass = HloConstantSplitter(true);
const auto status_or = HloTestBase::RunHloPass(&pass, module.get());
TF_ASSERT_OK(status_or.status());
EXPECT_TRUE(status_or.value());
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
XLA_VLOG_LINES(1, module->entry_computation()->ToString());
EXPECT_EQ(module->entry_computation()->instruction_count(), 23);
}
TEST_F(HloConstantSplitterTest, SplittingExpressionsWithSlice) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
iota.0 = u32[64] iota(), iota_dimension=0
slice.0 = u32[32] slice(iota.0), slice={[0:32]}
broadcast.0 = u32[16,32] broadcast(slice.0), dimensions={1}
broadcast.1 = u32[32,32] broadcast(slice.0), dimensions={1}
p.0 = u32[16,32] parameter(0)
p.1 = u32[32,32] parameter(1)
add.0 = u32[16,32] add(p.0, broadcast.0)
add.1 = u32[32,32] add(p.1, broadcast.1)
ROOT root = (u32[16,32], u32[32,32]) tuple(add.0, add.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
HloConstantSplitter pass = HloConstantSplitter(true);
const auto status_or = HloTestBase::RunHloPass(&pass, module.get());
TF_ASSERT_OK(status_or.status());
EXPECT_TRUE(status_or.value());
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
XLA_VLOG_LINES(1, module->entry_computation()->ToString());
EXPECT_EQ(module->entry_computation()->instruction_count(), 11);
}
TEST_F(HloConstantSplitterTest, NoSplittingSideEffectExpressions) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
gte0 = f32[1024] parameter(0)
gte1 = f32[1024] parameter(1)
constant1 = f32[1024] iota(), iota_dimension=0
constant2 = f32[] constant(9.1934)
constant3 = f32[] constant(0.0)
constant4 = f32[] constant(0.0)
constant5 = f32[] constant(1.0)
b = f32[1024] broadcast(constant2), dimensions={}
b2 = f32[1024] broadcast(constant3), dimensions={}
rng = f32[] rng(constant4, constant5), distribution=rng_uniform
b3 = f32[1024] broadcast(rng), dimensions={}
cmp = pred[1024] compare(constant1, b), direction=LT
s = f32[1024] select(cmp, b2, b3)
a1 = f32[1024] add(s, gte0)
a2 = f32[1024] add(s, gte1)
ROOT root = (f32[1024], f32[1024]) tuple(a1, a2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
HloConstantSplitter pass = HloConstantSplitter(true);
const int64_t count_before = module->entry_computation()->instruction_count();
TF_ASSERT_OK_AND_ASSIGN(bool changed,
HloTestBase::RunHloPass(&pass, module.get()));
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
const int64_t count_after_dce =
module->entry_computation()->instruction_count();
EXPECT_TRUE(changed);
EXPECT_EQ(count_before, count_after_dce);
int64_t rng_count = 0;
for (HloInstruction* instruction :
module->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kRng) {
rng_count++;
}
}
EXPECT_EQ(rng_count, 1);
}
TEST_F(HloConstantSplitterTest, InstructionsWithOneUser) {
const char* module_str = R"(
HloModule test_module, entry_computation_layout={(f32[1024]{0:T(512)})->f32[1024]{0:T(512)}}
reduce.add {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry_computation {
constant1 = f32[] constant(1.1)
b1 = f32[1024]{0} broadcast(constant1), dimensions={}
iota.1 = f32[1024]{0} iota(), iota_dimension=0
add.1 = f32[1024]{0} add(b1, iota.1)
p0 = f32[1024]{0} parameter(0), sharding={devices=[4]0,1,2,3}
custom-call.0 = f32[256]{0} custom-call(p0), custom_call_target="SPMDFullToShardShape", sharding={manual}
constant0 = f32[] constant(0)
reduce.1 = f32[] reduce(custom-call.0, constant0), dimensions={0}, to_apply=reduce.add
b3 = f32[1024]{0} broadcast(reduce.1), dimensions={}
add.2 = f32[1024]{0} add(add.1, b3)
custom-call.1 = f32[4096]{0} custom-call(add.2), custom_call_target="SPMDShardToFullShape", sharding={devices=[4]0,1,2,3}
reshape = f32[4,1024]{1,0} reshape(custom-call.1)
reduce.2 = f32[1024]{0} reduce(reshape, constant0), dimensions={0}, to_apply=reduce.add
iota.2 = f32[1024]{0} iota(), iota_dimension=0
mul = f32[1024]{0} multiply(b1, iota.2)
ROOT sub = f32[1024]{0} subtract(reduce.2, mul), sharding={devices=[4]0,1,2,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
HloConstantSplitter pass = HloConstantSplitter(true);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
HloTestBase::RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
int64_t broadcast_count_before_dce = 0, broadcast_count_after_dce = 0;
for (HloInstruction* instruction :
module->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kBroadcast) {
broadcast_count_before_dce++;
}
}
EXPECT_EQ(broadcast_count_before_dce, 4);
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
for (HloInstruction* instruction :
module->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kBroadcast) {
broadcast_count_after_dce++;
}
}
EXPECT_EQ(broadcast_count_after_dce, 3);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/transforms/hlo_constant_splitter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/transforms/hlo_constant_splitter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
db2ed32f-50df-41fd-a494-8b379eaecd49 | cpp | tensorflow/tensorflow | kernel_def_util | tensorflow/core/framework/kernel_def_util.cc | tensorflow/core/framework/kernel_def_util_test.cc | #include "tensorflow/core/framework/kernel_def_util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/types.h"
namespace tensorflow {
namespace {
bool InTypeList(DataType dt, const AttrValue& type_list) {
for (int in_list : type_list.list().type()) {
if (dt == in_list) return true;
}
return false;
}
}
Status KernelAttrsMatch(const KernelDef& kernel_def, AttrSlice attrs,
bool* match) {
*match = false;
for (const auto& constraint : kernel_def.constraint()) {
auto constraint_value_case = AttrValue::VALUE_NOT_SET;
int value_type_num = 0;
if (constraint.allowed_values().list().type_size() > 0) {
constraint_value_case = AttrValue::kType;
value_type_num++;
}
if (constraint.allowed_values().list().s_size() > 0) {
constraint_value_case = AttrValue::kS;
value_type_num++;
}
if (constraint.allowed_values().list().i_size() > 0) {
constraint_value_case = AttrValue::kI;
value_type_num++;
}
if (constraint.allowed_values().list().b_size() > 0) {
constraint_value_case = AttrValue::kB;
value_type_num++;
}
if (value_type_num == 0) {
return errors::Unimplemented(
"KernelDef '", kernel_def.ShortDebugString(),
" has constraint on attr '", constraint.name(),
"' with unsupported type: ",
SummarizeAttrValue(constraint.allowed_values()));
}
if (value_type_num > 1) {
return errors::InvalidArgument(
"KernelDef '", kernel_def.ShortDebugString(),
" has constraint on attr '", constraint.name(),
"' with more than one value type: ",
SummarizeAttrValue(constraint.allowed_values()));
}
const AttrValue* attr_value = attrs.Find(constraint.name());
if (attr_value == nullptr) {
return errors::InvalidArgument(
"OpKernel '", kernel_def.op(), "' has constraint on attr '",
constraint.name(), "' not in NodeDef '", attrs.SummarizeNode(),
"', KernelDef: '", kernel_def.ShortDebugString(), "'");
}
#define RETURN_IF_ATTR_NOT_FOUND(n, oneof_case, type_str) \
do { \
if (constraint_value_case == AttrValue::oneof_case) { \
Status s = AttrValueHasType(*attr_value, type_str); \
if (!s.ok()) { \
return errors::InvalidArgument( \
"KernelDef '", kernel_def.ShortDebugString(), \
"' has constraint on attr '", constraint.name(), \
"' that has value '", SummarizeAttrValue(*attr_value), \
"' that does not have the same type in NodeDef " \
"'", \
attrs.SummarizeNode(), "'"); \
} \
bool found = false; \
for (auto& value : constraint.allowed_values().list().n()) { \
if (value == attr_value->n()) { \
found = true; \
break; \
} \
} \
if (!found) { \
return OkStatus(); \
} \
} \
} while (false)
RETURN_IF_ATTR_NOT_FOUND(s, kS, "string");
RETURN_IF_ATTR_NOT_FOUND(i, kI, "int");
RETURN_IF_ATTR_NOT_FOUND(b, kB, "bool");
#undef RETURN_IF_ATTR_NOT_FOUND
if (constraint_value_case != AttrValue::kType) {
continue;
}
if (attr_value->type() != DT_INVALID) {
if (!InTypeList(attr_value->type(), constraint.allowed_values())) {
return absl::OkStatus();
}
} else {
if (!AttrValueHasType(*attr_value, "list(type)").ok()) {
return errors::InvalidArgument(
"KernelDef '", kernel_def.ShortDebugString(),
"' has constraint on attr '", constraint.name(),
"' that has value '", SummarizeAttrValue(*attr_value),
"' that does not have type 'type' or 'list(type)' in NodeDef "
"'",
attrs.SummarizeNode(), "'");
}
for (int t : attr_value->list().type()) {
if (!InTypeList(static_cast<DataType>(t),
constraint.allowed_values())) {
return absl::OkStatus();
}
}
}
}
*match = true;
return absl::OkStatus();
}
} | #include "tensorflow/core/framework/kernel_def_util.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
NodeDef NodeDefFromText(const string& text) {
NodeDef node_def;
EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &node_def));
return node_def;
}
KernelDef KernelDefFromText(const string& text) {
KernelDef kernel_def;
EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &kernel_def));
return kernel_def;
}
class AttrsMatchTest : public ::testing::Test {
protected:
void ExpectStatus(const string& node_def_str, const string& kernel_def_str,
error::Code code) {
bool match;
auto status = KernelAttrsMatch(KernelDefFromText(kernel_def_str),
NodeDefFromText(node_def_str), &match);
LOG(INFO) << "status: " << status;
EXPECT_EQ(code, status.code());
if (!status.ok()) {
EXPECT_FALSE(match)
<< "Expect no match between the given NodeDef and KernelDef";
}
}
};
TEST_F(AttrsMatchTest, ValidConstraint) {
string node_def_str = R"(
name: "ValidConstraint-op"
op: "ValidConstraint"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
)";
string kernel_def_str = R"(
op: "ValidConstraint"
device_type: "CPU"
constraint {
name: "T"
allowed_values {
list {
type: DT_FLOAT
}
}
}
)";
ExpectStatus(node_def_str, kernel_def_str, error::OK);
}
TEST_F(AttrsMatchTest, BadConstraint) {
string node_def_str = R"(
name: "BadConstraint-op"
op: "BadConstraint"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
)";
string kernel_def_str = R"(
op: "BadConstraint"
device_type: "CPU"
constraint {
name: "T"
allowed_values {
list {
type: DT_FLOAT
}
}
}
)";
ExpectStatus(node_def_str, kernel_def_str, error::INVALID_ARGUMENT);
}
TEST_F(AttrsMatchTest, Unimplemented) {
string node_def_str = R"(
name: "BadConstraint-op"
op: "BadConstraint"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
)";
string kernel_def_str = R"(
op: "BadConstraint"
device_type: "CPU"
constraint {
name: "T"
allowed_values {
list {
}
}
}
)";
ExpectStatus(node_def_str, kernel_def_str, error::UNIMPLEMENTED);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/kernel_def_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/kernel_def_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
86a41926-4ca1-4b47-996d-dc64e389821d | cpp | tensorflow/tensorflow | lsh_projection | tensorflow/lite/kernels/lsh_projection.cc | tensorflow/lite/kernels/lsh_projection_test.cc | #include <stddef.h>
#include <stdint.h>
#include <cstring>
#include <memory>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include <farmhash.h>
namespace tflite {
namespace ops {
namespace builtin {
namespace lsh_projection {
TfLiteStatus Resize(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data);
TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* hash;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &hash));
TF_LITE_ENSURE_EQ(context, NumDimensions(hash), 2);
TF_LITE_ENSURE(context, SizeOfDimension(hash, 1) <= 32);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input));
TF_LITE_ENSURE(context, NumDimensions(input) >= 1);
TF_LITE_ENSURE(context, SizeOfDimension(input, 0) >= 1);
if (NumInputs(node) == 3) {
const TfLiteTensor* weight;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &weight));
TF_LITE_ENSURE_EQ(context, NumDimensions(weight), 1);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(weight, 0),
SizeOfDimension(input, 0));
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1);
switch (params->type) {
case kTfLiteLshProjectionSparse:
outputSize->data[0] = SizeOfDimension(hash, 0);
break;
case kTfLiteLshProjectionDense:
outputSize->data[0] = SizeOfDimension(hash, 0) * SizeOfDimension(hash, 1);
break;
default:
return kTfLiteError;
}
return context->ResizeTensor(context, output, outputSize);
}
int RunningSignBit(const TfLiteTensor* input, const TfLiteTensor* weight,
float seed) {
double score = 0.0;
int input_item_bytes = input->bytes / SizeOfDimension(input, 0);
char* input_ptr = input->data.raw;
const size_t seed_size = sizeof(float);
const size_t key_bytes = sizeof(float) + input_item_bytes;
std::unique_ptr<char[]> key(new char[key_bytes]);
const float* weight_ptr = GetTensorData<float>(weight);
for (int i = 0; i < SizeOfDimension(input, 0); ++i) {
memcpy(key.get(), &seed, seed_size);
memcpy(key.get() + seed_size, input_ptr, input_item_bytes);
int64_t hash_signature = ::util::Fingerprint64(key.get(), key_bytes);
double running_value = static_cast<double>(hash_signature);
input_ptr += input_item_bytes;
if (weight_ptr == nullptr) {
score += running_value;
} else {
score += weight_ptr[i] * running_value;
}
}
return (score > 0) ? 1 : 0;
}
void SparseLshProjection(const TfLiteTensor* hash, const TfLiteTensor* input,
const TfLiteTensor* weight, int32_t* out_buf) {
int num_hash = SizeOfDimension(hash, 0);
int num_bits = SizeOfDimension(hash, 1);
for (int i = 0; i < num_hash; i++) {
int32_t hash_signature = 0;
for (int j = 0; j < num_bits; j++) {
float seed = GetTensorData<float>(hash)[i * num_bits + j];
int bit = RunningSignBit(input, weight, seed);
hash_signature = (hash_signature << 1) | bit;
}
*out_buf++ = hash_signature + i * (1 << num_bits);
}
}
void DenseLshProjection(const TfLiteTensor* hash, const TfLiteTensor* input,
const TfLiteTensor* weight, int32_t* out_buf) {
int num_hash = SizeOfDimension(hash, 0);
int num_bits = SizeOfDimension(hash, 1);
for (int i = 0; i < num_hash; i++) {
for (int j = 0; j < num_bits; j++) {
float seed = GetTensorData<float>(hash)[i * num_bits + j];
int bit = RunningSignBit(input, weight, seed);
*out_buf++ = bit;
}
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data);
TfLiteTensor* out_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &out_tensor));
int32_t* out_buf = out_tensor->data.i32;
const TfLiteTensor* hash;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &hash));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input));
const TfLiteTensor* weight =
NumInputs(node) == 2 ? nullptr : GetInput(context, node, 2);
switch (params->type) {
case kTfLiteLshProjectionDense:
DenseLshProjection(hash, input, weight, out_buf);
break;
case kTfLiteLshProjectionSparse:
SparseLshProjection(hash, input, weight, out_buf);
break;
default:
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_LSH_PROJECTION() {
static TfLiteRegistration r = {nullptr, nullptr, lsh_projection::Resize,
lsh_projection::Eval};
return &r;
}
}
}
} | #include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
class LSHProjectionOpModel : public SingleOpModel {
public:
LSHProjectionOpModel(LSHProjectionType type,
std::initializer_list<int> hash_shape,
std::initializer_list<int> input_shape,
std::initializer_list<int> weight_shape) {
hash_ = AddInput(TensorType_FLOAT32);
input_ = AddInput(TensorType_INT32);
if (weight_shape.size() > 0) {
weight_ = AddInput(TensorType_FLOAT32);
}
output_ = AddOutput(TensorType_INT32);
SetBuiltinOp(BuiltinOperator_LSH_PROJECTION,
BuiltinOptions_LSHProjectionOptions,
CreateLSHProjectionOptions(builder_, type).Union());
if (weight_shape.size() > 0) {
BuildInterpreter({hash_shape, input_shape, weight_shape});
} else {
BuildInterpreter({hash_shape, input_shape});
}
output_size_ = 1;
for (int i : hash_shape) {
output_size_ *= i;
if (type == LSHProjectionType_SPARSE) {
break;
}
}
}
void SetInput(std::initializer_list<int> data) {
PopulateTensor(input_, data);
}
void SetHash(std::initializer_list<float> data) {
PopulateTensor(hash_, data);
}
void SetWeight(std::initializer_list<float> f) { PopulateTensor(weight_, f); }
std::vector<int> GetOutput() { return ExtractVector<int>(output_); }
private:
int input_;
int hash_;
int weight_;
int output_;
int output_size_;
};
TEST(LSHProjectionOpTest2, Dense1DInputs) {
LSHProjectionOpModel m(LSHProjectionType_DENSE, {3, 2}, {5}, {5});
m.SetInput({12345, 54321, 67890, 9876, -12345678});
m.SetHash({0.123, 0.456, -0.321, 1.234, 5.678, -4.321});
m.SetWeight({1.0, 1.0, 1.0, 1.0, 1.0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
EXPECT_THAT(m.GetOutput(), ElementsAre(0, 0, 1, 1, 1, 0));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0, 0, 0, 1, 0, 0));
#endif
}
TEST(LSHProjectionOpTest2, Sparse1DInputs) {
LSHProjectionOpModel m(LSHProjectionType_SPARSE, {3, 2}, {5}, {});
m.SetInput({12345, 54321, 67890, 9876, -12345678});
m.SetHash({0.123, 0.456, -0.321, 1.234, 5.678, -4.321});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 3, 8 + 2));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 1, 8 + 0));
#endif
}
TEST(LSHProjectionOpTest2, Sparse3DInputs) {
LSHProjectionOpModel m(LSHProjectionType_SPARSE, {3, 2}, {5, 2, 2}, {5});
m.SetInput({1234, 2345, 3456, 1234, 4567, 5678, 6789, 4567, 7891, 8912,
9123, 7890, -987, -876, -765, -987, -543, -432, -321, -543});
m.SetHash({0.123, 0.456, -0.321, 1.234, 5.678, -4.321});
m.SetWeight({0.12, 0.34, 0.56, 0.67, 0.78});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 3, 8 + 2));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 2, 4 + 1, 8 + 1));
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/lsh_projection.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/lsh_projection_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b6c66bda-f15d-4819-892f-c0d9a4e0df25 | cpp | tensorflow/tensorflow | dynamic_update_slice | tensorflow/lite/kernels/dynamic_update_slice.cc | tensorflow/lite/kernels/dynamic_update_slice_test.cc | #include <algorithm>
#include <cmath>
#include <cstdint>
#include <vector>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace dynamic_update_slice {
constexpr int kOperandTensor = 0;
constexpr int kUpdateTensor = 1;
constexpr int kStartIndicesTensor = 2;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* operand;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kOperandTensor, &operand));
const TfLiteTensor* update;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kUpdateTensor, &update));
const TfLiteTensor* start_indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartIndicesTensor,
&start_indices));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE(context, NumDimensions(start_indices) == 1);
TF_LITE_ENSURE(context,
SizeOfDimension(start_indices, 0) == NumDimensions(operand));
TF_LITE_ENSURE(context, NumDimensions(update) == NumDimensions(operand));
for (int i = 0; i < NumDimensions(operand); i++) {
TF_LITE_ENSURE(context,
SizeOfDimension(update, i) <= SizeOfDimension(operand, i));
}
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TF_LITE_ENSURE_TYPES_EQ(context, operand->type, update->type);
TF_LITE_ENSURE(context, start_indices->type == kTfLiteInt32 ||
start_indices->type == kTfLiteInt64);
output->type = operand->type;
TfLiteIntArray* output_size = TfLiteIntArrayCopy(operand->dims);
return context->ResizeTensor(context, output, output_size);
}
int TensorIndexToFlat(const int* index, const int dims,
const RuntimeShape& shape,
const int* start_indices = nullptr) {
int flat_index = index[0] + (start_indices ? start_indices[0] : 0);
for (int i = 1; i < dims; i++) {
flat_index = flat_index * shape.Dims(i) + index[i] +
(start_indices ? start_indices[i] : 0);
}
return flat_index;
}
std::vector<int> ClampStartIndices(int input_dims, const int64_t* indices_data,
const RuntimeShape& input_shape,
const RuntimeShape& update_shape) {
std::vector<int> clamped_start_indices(input_dims, 0);
for (int i = 0; i < input_dims; i++) {
clamped_start_indices[i] = static_cast<int32_t>(
std::min<int64_t>(std::max<int64_t>(0, indices_data[i]),
input_shape.Dims(i) - update_shape.Dims(i)));
}
return clamped_start_indices;
}
template <typename T>
void update_slice(int current_dim, int max_dim, const int32_t* output_stride,
const int32_t* update_stride, const int32_t* update_shape,
const T* update, const int32_t* indices_data, T* output) {
if (current_dim == max_dim) return;
if (current_dim == max_dim - 1) {
output += indices_data[current_dim] * output_stride[current_dim];
memcpy(output, update, update_shape[max_dim - 1] * sizeof(T));
} else {
output += indices_data[current_dim] * output_stride[current_dim];
for (int i = 0; i < update_shape[current_dim]; ++i) {
update_slice(current_dim + 1, max_dim, output_stride, update_stride,
update_shape, update, indices_data, output);
output += output_stride[current_dim];
update += update_stride[current_dim];
}
}
}
template <typename T>
void DynamicUpdateSlice(const TfLiteTensor* input, const TfLiteTensor* update,
const int64_t* indices_data, TfLiteTensor* output) {
const auto& input_shape = GetTensorShape(input);
const auto& update_shape = GetTensorShape(update);
const T* update_data = GetTensorData<T>(update);
T* output_data = GetTensorData<T>(output);
const int input_dims = input_shape.DimensionsCount();
if (input_shape.FlatSize() == update_shape.FlatSize()) {
memcpy(output_data, update_data, input_shape.FlatSize() * sizeof(T));
return;
}
std::vector<int> clamped_start_indices =
ClampStartIndices(input_dims, indices_data, input_shape, update_shape);
if (input->data.data != output->data.data) {
memcpy(output->data.data, input->data.data, input->bytes);
}
if (update_shape.FlatSize() == 0) {
return;
}
std::vector<int> output_stride(input_dims);
std::vector<int> update_stride(input_dims);
output_stride[input_dims - 1] = 1;
update_stride[input_dims - 1] = 1;
const int32_t* input_shape_data = input_shape.DimsData();
const int32_t* update_shape_data = update_shape.DimsData();
for (int i = input_dims - 2; i >= 0; --i) {
output_stride[i] = output_stride[i + 1] * input_shape_data[i + 1];
update_stride[i] = update_stride[i + 1] * update_shape_data[i + 1];
}
update_slice(0, input_dims, output_stride.data(), update_stride.data(),
update_shape.DimsData(), update_data,
clamped_start_indices.data(), output_data);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* operand;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kOperandTensor, &operand));
const TfLiteTensor* update;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kUpdateTensor, &update));
const TfLiteTensor* indice;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kStartIndicesTensor, &indice));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const auto& input_shape = GetTensorShape(operand);
const int input_dims = input_shape.DimensionsCount();
std::vector<int64_t> indices_data_i64;
if (indice->type == kTfLiteInt32) {
for (int i = 0; i < input_dims; i++)
indices_data_i64.push_back(static_cast<int64_t>(indice->data.i32[i]));
} else if (indice->type == kTfLiteInt64) {
for (int i = 0; i < input_dims; i++)
indices_data_i64.push_back(indice->data.i64[i]);
} else {
TF_LITE_KERNEL_LOG(context,
"DynamicUpdateSlice only currently supports "
"int32 or int64 indices type, got %d.",
indice->type);
return kTfLiteError;
}
switch (operand->type) {
case kTfLiteFloat32:
DynamicUpdateSlice<float>(operand, update, indices_data_i64.data(),
output);
break;
case kTfLiteBool:
DynamicUpdateSlice<bool>(operand, update, indices_data_i64.data(),
output);
break;
case kTfLiteInt8:
DynamicUpdateSlice<int8_t>(operand, update, indices_data_i64.data(),
output);
break;
case kTfLiteInt32:
DynamicUpdateSlice<int32_t>(operand, update, indices_data_i64.data(),
output);
break;
case kTfLiteInt64:
DynamicUpdateSlice<int64_t>(operand, update, indices_data_i64.data(),
output);
break;
default:
TF_LITE_KERNEL_LOG(context,
"DynamicUpdateSlice only currently supports "
"1-bit/8-bit/32-bit/64-bit integer or "
"float type, got %d.",
operand->type);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_DYNAMIC_UPDATE_SLICE() {
static TfLiteRegistration r = {nullptr,
nullptr,
dynamic_update_slice::Prepare,
dynamic_update_slice::Eval,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared};
return &r;
}
}
}
} | #include <stdint.h>
#include <algorithm>
#include <initializer_list>
#include <string>
#include <unordered_map>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
class DynamicUpdateSliceOpModel : public SingleOpModel {
public:
DynamicUpdateSliceOpModel(const TensorData& operand, const TensorData& update,
const TensorData& start_indices) {
input_ = AddInput(operand);
update_ = AddInput(update);
start_indices_ = AddInput(start_indices);
output_ = AddOutput(operand.type);
SetBuiltinOp(BuiltinOperator_DYNAMIC_UPDATE_SLICE,
BuiltinOptions_DynamicUpdateSliceOptions,
CreateDynamicUpdateSliceOptions(builder_).Union());
BuildInterpreter(
{GetShape(input_), GetShape(update_), GetShape(start_indices_)});
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor<T>(input_, data);
}
template <typename T>
void SetUpdate(std::initializer_list<T> data) {
PopulateTensor<T>(update_, data);
}
void SetStringInput(std::initializer_list<string> data) {
PopulateStringTensor(input_, data);
}
template <typename T>
void SetStartIndices(std::initializer_list<T> data) {
PopulateTensor<T>(start_indices_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
std::vector<string> GetStringOutput() {
return ExtractVector<string>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input_;
int update_;
int start_indices_;
int output_;
};
TEST(DynamicUpdateSliceOpTest, SimpleTestF32InPlaceInput) {
DynamicUpdateSliceOpModel m({TensorType_FLOAT32, {3, 3}},
{TensorType_FLOAT32, {2, 1}},
{TensorType_INT32, {2}});
m.SetInput<float>({1, 2, 3,
4, 5, 6,
7, 8, 9});
m.SetUpdate<float>({-1, -2});
m.SetStartIndices<int32_t>({1, 1});
const int kInplaceInputTensorIdx = 0;
const int kInplaceOutputTensorIdx = 0;
const TfLiteTensor* input_tensor = m.GetInputTensor(kInplaceInputTensorIdx);
TfLiteTensor* output_tensor = m.GetOutputTensor(kInplaceOutputTensorIdx);
output_tensor->data.data = input_tensor->data.data;
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({1, 2, 3,
4, -1, 6,
7, -2, 9})));
EXPECT_EQ(output_tensor->data.data, input_tensor->data.data);
}
TEST(DynamicUpdateSliceOpTest, SimpleTestF32) {
DynamicUpdateSliceOpModel m({TensorType_FLOAT32, {3, 3}},
{TensorType_FLOAT32, {2, 1}},
{TensorType_INT32, {2}});
m.SetInput<float>({1, 2, 3,
4, 5, 6,
7, 8, 9});
m.SetUpdate<float>({-1, -2});
m.SetStartIndices<int32_t>({1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({1, 2, 3,
4, -1, 6,
7, -2, 9})));
}
TEST(DynamicUpdateSliceOpTest, SimpleTestI1) {
DynamicUpdateSliceOpModel m({TensorType_BOOL, {3, 3}},
{TensorType_BOOL, {2, 1}},
{TensorType_INT32, {2}});
m.SetInput<bool>({true, true, true,
true, true, true,
true, true, true});
m.SetUpdate<bool>({false, false});
m.SetStartIndices<int32_t>({1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<bool>(), ElementsAreArray({true, true, true,
true, false, true,
true, false, true}));
}
TEST(DynamicUpdateSliceOpTest, SimpleTestI8) {
DynamicUpdateSliceOpModel m({TensorType_INT8, {3, 3}},
{TensorType_INT8, {2, 1}},
{TensorType_INT32, {2}});
m.SetInput<int8_t>({1, 2, 3,
4, 5, 6,
7, 8, 9});
m.SetUpdate<int8_t>({-1, -2});
m.SetStartIndices<int32_t>({1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int8_t>(), ElementsAreArray({1, 2, 3,
4, -1, 6,
7, -2, 9}));
}
TEST(DynamicUpdateSliceOpTest, SimpleTestI32) {
DynamicUpdateSliceOpModel m({TensorType_INT32, {3, 3}},
{TensorType_INT32, {2, 1}},
{TensorType_INT32, {2}});
m.SetInput<int32_t>({1, 2, 3,
4, 5, 6,
7, 8, 9});
m.SetUpdate<int32_t>({-1, -2});
m.SetStartIndices<int32_t>({1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int32_t>(), ElementsAreArray({1, 2, 3,
4, -1, 6,
7, -2, 9}));
}
TEST(DynamicUpdateSliceOpTest, ZeroSizeTestI32) {
DynamicUpdateSliceOpModel m({TensorType_INT32, {3, 3}},
{TensorType_INT32, {2, 0}},
{TensorType_INT32, {2}});
m.SetInput<int32_t>({1, 2, 3,
4, 5, 6,
7, 8, 9});
m.SetStartIndices<int32_t>({1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int32_t>(), ElementsAreArray({1, 2, 3,
4, 5, 6,
7, 8, 9}));
}
TEST(DynamicUpdateSliceOpTest, SimpleTestI64) {
DynamicUpdateSliceOpModel m({TensorType_INT64, {3, 3}},
{TensorType_INT64, {2, 1}},
{TensorType_INT32, {2}});
m.SetInput<int64_t>({1, 2, 3,
4, 5, 6,
7, 8, 9});
m.SetUpdate<int64_t>({-1, -2});
m.SetStartIndices<int32_t>({1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int64_t>(), ElementsAreArray({1, 2, 3,
4, -1, 6,
7, -2, 9}));
}
TEST(DynamicUpdateSliceOpTest, SimpleTestI64Indices) {
DynamicUpdateSliceOpModel m({TensorType_INT64, {3, 3}},
{TensorType_INT64, {2, 1}},
{TensorType_INT64, {2}});
m.SetInput<int64_t>({1, 2, 3,
4, 5, 6,
7, 8, 9});
m.SetUpdate<int64_t>({-1, -2});
m.SetStartIndices<int64_t>({1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int64_t>(), ElementsAreArray({1, 2, 3,
4, -1, 6,
7, -2, 9}));
}
TEST(DynamicUpdateSliceOpTest, BoundaryTest) {
DynamicUpdateSliceOpModel m({TensorType_FLOAT32, {3, 3}},
{TensorType_FLOAT32, {2, 2}},
{TensorType_INT32, {2}});
m.SetInput<float>({1, 2, 3,
4, 5, 6,
7, 8, 9});
m.SetUpdate<float>({-1, -2,
-3, -4});
m.SetStartIndices<int32_t>({2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({1, 2, 3,
4, -1, -2,
7, -3, -4})));
}
TEST(DynamicUpdateSliceOpTest, UpdateShapeTooLargeTest) {
EXPECT_DEATH_IF_SUPPORTED(
DynamicUpdateSliceOpModel({TensorType_FLOAT32, {3, 3}},
{TensorType_FLOAT32, {4, 2}},
{TensorType_INT32, {2}}),
"SizeOfDimension\\(update, i\\) <= SizeOfDimension\\(operand, "
"i\\) was not true.");
}
class DynamicUpdateSliceGraphModel {
public:
static constexpr struct InPlaceGraph {
} kInPlaceGraph{};
static constexpr struct NotInPlaceGraph {
} kNotInPlaceGraph{};
DynamicUpdateSliceGraphModel(InPlaceGraph, bool multiple_consumers) {
builder_.BuildInplaceDynamicUpdateSliceSubgraph(
interpreter_.primary_subgraph(), multiple_consumers);
SetUpInterpreter();
}
explicit DynamicUpdateSliceGraphModel(NotInPlaceGraph) {
builder_.BuildInputDynamicUpdateSliceSubgraph(
interpreter_.primary_subgraph());
SetUpInterpreter();
}
void SetUpInterpreter() {
interpreter_.ResizeInputTensor(interpreter_.inputs()[0], {2, 3});
interpreter_.ResizeInputTensor(interpreter_.inputs()[1], {1, 3});
interpreter_.ResizeInputTensor(interpreter_.inputs()[2], {2});
CHECK_EQ(interpreter_.AllocateTensors(), kTfLiteOk);
subgraph_test_util::FillIntTensor(&GetInputTensor(0), {0, 0, 0, 0, 0, 0});
subgraph_test_util::FillIntTensor(&GetInputTensor(1), {3, 3, 3});
subgraph_test_util::FillIntTensor(&GetInputTensor(2), {1, 0});
}
Interpreter& GetInterpreter() { return interpreter_; }
TfLiteTensor& GetTensor(int index) { return *interpreter_.tensor(index); }
TfLiteTensor& GetInputTensor(int index) {
return GetTensor(interpreter_.inputs()[index]);
}
TfLiteTensor& GetOutputTensor(int index) {
return GetTensor(interpreter_.outputs()[index]);
}
protected:
Interpreter interpreter_;
subgraph_test_util::SubgraphBuilder builder_;
};
absl::Span<int> ShapeOf(const TfLiteTensor& tensor) {
if (!tensor.dims) {
return {};
}
return absl::Span<int>(tensor.dims->data, tensor.dims->size);
}
template <class T>
absl::Span<int32_t> DataOf(const TfLiteTensor& tensor) {
return absl::Span<int>(tensor.data.i32, tensor.bytes / sizeof(T));
}
TEST(DynamicUpdateSliceOpTest, DoNotReuseGraphInputBuffer) {
auto model = DynamicUpdateSliceGraphModel(
DynamicUpdateSliceGraphModel::kNotInPlaceGraph);
ASSERT_EQ(model.GetInterpreter().Invoke(), kTfLiteOk);
const TfLiteTensor& output = model.GetOutputTensor(0);
EXPECT_THAT(ShapeOf(output), ElementsAre(2, 3));
EXPECT_THAT(DataOf<int32_t>(output), ElementsAre(1, 1, 1, 4, 4, 4));
const TfLiteTensor& input0 = model.GetInputTensor(0);
const TfLiteTensor& intermediate = model.GetTensor(5);
EXPECT_NE(input0.data.data, intermediate.data.data);
}
TEST(DynamicUpdateSliceOpTest, OnlyShareBufferForASingleConsumer) {
for (bool multiple_consumers : {true, false}) {
auto model = DynamicUpdateSliceGraphModel(
DynamicUpdateSliceGraphModel::kInPlaceGraph, multiple_consumers);
ASSERT_EQ(model.GetInterpreter().Invoke(), kTfLiteOk);
const TfLiteTensor& output = model.GetOutputTensor(0);
EXPECT_THAT(ShapeOf(output), ElementsAre(2, 3));
EXPECT_THAT(DataOf<int32_t>(output), ElementsAre(2, 2, 2, 4, 4, 4));
const TfLiteTensor& intermediate0 = model.GetTensor(5);
const TfLiteTensor& intermediate1 = model.GetTensor(6);
if (multiple_consumers) {
EXPECT_NE(intermediate0.data.data, intermediate1.data.data);
} else {
EXPECT_EQ(intermediate0.data.data, intermediate1.data.data);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/dynamic_update_slice.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/dynamic_update_slice_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0ecfd9b5-6bc6-4d8f-80b0-b0f99116647f | cpp | google/tensorstore | sharding_indexed | tensorstore/driver/zarr3/codec/sharding_indexed.cc | tensorstore/driver/zarr3/codec/sharding_indexed_test.cc | #include "tensorstore/driver/zarr3/codec/sharding_indexed.h"
#include <stdint.h>
#include <algorithm>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr3/codec/bytes.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/crc32c.h"
#include "tensorstore/driver/zarr3/codec/registry.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/internal/async_write_array.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/chunk_grid_specification.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/lexicographical_grid_index_key.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/shard_format.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_zarr3 {
absl::Status SubChunkRankMismatch(span<const Index> sub_chunk_shape,
DimensionIndex outer_rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"sharding_indexed sub-chunk shape of ", sub_chunk_shape,
" is not compatible with array of rank ", outer_rank));
}
absl::Status SubChunkShapeMismatch(span<const Index> sub_chunk_shape,
span<const Index> chunk_shape) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"sharding_indexed sub-chunk shape of ", sub_chunk_shape,
" does not evenly divide chunk shape of ", chunk_shape));
}
namespace {
class ShardingIndexedCodec : public ZarrShardingCodec {
public:
explicit ShardingIndexedCodec(
internal::ChunkGridSpecification&& sub_chunk_grid)
: sub_chunk_grid_(std::move(sub_chunk_grid)) {}
class State : public ZarrShardingCodec::PreparedState,
public internal::LexicographicalGridIndexKeyParser {
public:
absl::Status EncodeArray(SharedArrayView<const void> decoded,
riegeli::Writer& writer) const final {
return absl::InternalError("");
}
Result<SharedArray<const void>> DecodeArray(
span<const Index> decoded_shape, riegeli::Reader& reader) const final {
return absl::InternalError("");
}
kvstore::DriverPtr GetSubChunkKvstore(
kvstore::DriverPtr parent, std::string parent_key,
const Executor& executor,
internal::CachePool::WeakPtr cache_pool) const override {
zarr3_sharding_indexed::ShardedKeyValueStoreParameters params;
params.base_kvstore = std::move(parent);
params.base_kvstore_path = std::move(parent_key);
params.executor = executor;
params.cache_pool = std::move(cache_pool);
params.index_params = shard_index_params_;
return zarr3_sharding_indexed::GetShardedKeyValueStore(std::move(params));
}
const LexicographicalGridIndexKeyParser& GetSubChunkStorageKeyParser()
const final {
return *this;
}
std::string FormatKey(span<const Index> grid_indices) const final {
return zarr3_sharding_indexed::IndicesToKey(grid_indices);
}
bool ParseKey(std::string_view key, span<Index> grid_indices) const final {
return zarr3_sharding_indexed::KeyToIndices(key, grid_indices);
}
Index MinGridIndexForLexicographicalOrder(
DimensionIndex dim, IndexInterval grid_interval) const final {
return 0;
}
internal::IntrusivePtr<const ZarrShardingCodec> parent_codec_;
std::vector<Index> sub_chunk_grid_shape_;
ZarrCodecChain::PreparedState::Ptr codec_state_;
zarr3_sharding_indexed::ShardIndexParameters shard_index_params_;
};
Result<ZarrArrayToBytesCodec::PreparedState::Ptr> Prepare(
span<const Index> decoded_shape) const final {
span<const Index> sub_chunk_shape = sub_chunk_grid_.components[0].shape();
if (decoded_shape.size() != sub_chunk_shape.size()) {
return SubChunkRankMismatch(sub_chunk_shape, decoded_shape.size());
}
auto state = internal::MakeIntrusivePtr<State>();
state->parent_codec_.reset(this);
auto& sub_chunk_grid_shape = state->sub_chunk_grid_shape_;
sub_chunk_grid_shape.resize(decoded_shape.size());
for (DimensionIndex i = 0; i < sub_chunk_shape.size(); ++i) {
if (decoded_shape[i] % sub_chunk_shape[i] != 0) {
return SubChunkShapeMismatch(sub_chunk_shape, decoded_shape);
}
const int64_t grid_size = decoded_shape[i] / sub_chunk_shape[i];
sub_chunk_grid_shape[i] = grid_size;
}
TENSORSTORE_ASSIGN_OR_RETURN(
state->codec_state_, sub_chunk_codec_chain_->Prepare(sub_chunk_shape));
state->sub_chunk_grid = &sub_chunk_grid_;
state->sub_chunk_codec_chain = sub_chunk_codec_chain_.get();
state->sub_chunk_codec_state = state->codec_state_.get();
state->shard_index_params_.index_location = index_location_;
TENSORSTORE_RETURN_IF_ERROR(state->shard_index_params_.Initialize(
*index_codec_chain_, sub_chunk_grid_shape));
return {std::in_place, std::move(state)};
}
internal::ChunkGridSpecification sub_chunk_grid_;
ZarrCodecChain::Ptr sub_chunk_codec_chain_;
ZarrCodecChain::Ptr index_codec_chain_;
ShardIndexLocation index_location_;
};
}
absl::Status ShardingIndexedCodecSpec::MergeFrom(const ZarrCodecSpec& other,
bool strict) {
using Self = ShardingIndexedCodecSpec;
const auto& other_options = static_cast<const Self&>(other).options;
TENSORSTORE_RETURN_IF_ERROR(MergeConstraint<&Options::sub_chunk_shape>(
"chunk_shape", options, other_options));
TENSORSTORE_RETURN_IF_ERROR(
internal_zarr3::MergeZarrCodecSpecs(options.index_codecs,
other_options.index_codecs, strict),
tensorstore::MaybeAnnotateStatus(_, "Incompatible \"index_codecs\""));
TENSORSTORE_RETURN_IF_ERROR(
internal_zarr3::MergeZarrCodecSpecs(
options.sub_chunk_codecs, other_options.sub_chunk_codecs, strict),
tensorstore::MaybeAnnotateStatus(_, "Incompatible sub-chunk \"codecs\""));
TENSORSTORE_RETURN_IF_ERROR(MergeConstraint<&Options::index_location>(
"index_location", options, other_options));
return absl::OkStatus();
}
absl::Status ShardingIndexedCodecSpec::MergeSubChunkCodecsFrom(
const ZarrCodecChainSpec& other, bool strict) {
if (!options.sub_chunk_codecs) {
options.sub_chunk_codecs = other;
return absl::OkStatus();
}
return options.sub_chunk_codecs->MergeFrom(other, strict);
}
ZarrCodecSpec::Ptr ShardingIndexedCodecSpec::Clone() const {
return internal::MakeIntrusivePtr<ShardingIndexedCodecSpec>(*this);
}
const ZarrCodecChainSpec* ShardingIndexedCodecSpec::GetSubChunkCodecs() const {
return options.sub_chunk_codecs ? &*options.sub_chunk_codecs : nullptr;
}
absl::Status ShardingIndexedCodecSpec::GetDecodedChunkLayout(
const ArrayDataTypeAndShapeInfo& array_info,
ArrayCodecChunkLayoutInfo& decoded) const {
ArrayDataTypeAndShapeInfo sub_chunk_info;
if (options.sub_chunk_shape &&
!RankConstraint::Implies(options.sub_chunk_shape->size(),
array_info.rank)) {
return SubChunkRankMismatch(*options.sub_chunk_shape, array_info.rank);
}
sub_chunk_info.dtype = array_info.dtype;
sub_chunk_info.rank = array_info.rank;
if (options.sub_chunk_shape) {
std::copy(options.sub_chunk_shape->begin(), options.sub_chunk_shape->end(),
sub_chunk_info.shape.emplace().begin());
}
if (options.sub_chunk_codecs) {
TENSORSTORE_RETURN_IF_ERROR(options.sub_chunk_codecs->GetDecodedChunkLayout(
sub_chunk_info, decoded));
}
return absl::OkStatus();
}
namespace {
ZarrCodecChainSpec DefaultIndexCodecChainSpec() {
ZarrCodecChainSpec codecs;
codecs.array_to_bytes = DefaultBytesCodec();
codecs.bytes_to_bytes.push_back(
internal::MakeIntrusivePtr<const Crc32cCodecSpec>());
return codecs;
}
}
Result<ZarrArrayToBytesCodec::Ptr> ShardingIndexedCodecSpec::Resolve(
ArrayCodecResolveParameters&& decoded, BytesCodecResolveParameters& encoded,
ZarrArrayToBytesCodecSpec::Ptr* resolved_spec) const {
ShardingIndexedCodecSpec::Options* resolved_options = nullptr;
if (resolved_spec) {
auto* resolved_spec_ptr = new ShardingIndexedCodecSpec;
resolved_options = &resolved_spec_ptr->options;
resolved_spec->reset(resolved_spec_ptr);
}
span<const Index> sub_chunk_shape;
if (options.sub_chunk_shape) {
sub_chunk_shape = *options.sub_chunk_shape;
} else if (decoded.read_chunk_shape) {
sub_chunk_shape =
span<const Index>(decoded.read_chunk_shape->data(), decoded.rank);
} else {
return absl::InvalidArgumentError("\"chunk_shape\" must be specified");
}
if (sub_chunk_shape.size() != decoded.rank) {
return SubChunkRankMismatch(sub_chunk_shape, decoded.rank);
}
internal::ChunkGridSpecification::ComponentList components;
TENSORSTORE_ASSIGN_OR_RETURN(
auto broadcast_fill_value,
BroadcastArray(decoded.fill_value, BoxView<>(sub_chunk_shape.size())));
components.emplace_back(
internal::AsyncWriteArray::Spec{std::move(broadcast_fill_value),
Box<>(sub_chunk_shape.size())},
std::vector<Index>(sub_chunk_shape.begin(), sub_chunk_shape.end()));
components.back().array_spec.fill_value_comparison_kind =
EqualityComparisonKind::identical;
auto codec = internal::MakeIntrusivePtr<ShardingIndexedCodec>(
internal::ChunkGridSpecification(std::move(components)));
codec->index_location_ =
options.index_location.value_or(ShardIndexLocation::kEnd);
if (resolved_options) {
resolved_options->sub_chunk_shape = codec->sub_chunk_grid_.chunk_shape;
resolved_options->index_location = codec->index_location_;
}
auto set_up_codecs =
[&](const ZarrCodecChainSpec& sub_chunk_codecs) -> absl::Status {
ArrayCodecResolveParameters sub_chunk_decoded;
sub_chunk_decoded.dtype = decoded.dtype;
sub_chunk_decoded.rank = decoded.rank;
sub_chunk_decoded.fill_value = std::move(decoded.fill_value);
if (decoded.read_chunk_shape) {
std::copy_n(decoded.read_chunk_shape->begin(), decoded.rank,
sub_chunk_decoded.read_chunk_shape.emplace().begin());
}
if (decoded.codec_chunk_shape) {
std::copy_n(decoded.codec_chunk_shape->begin(), decoded.rank,
sub_chunk_decoded.codec_chunk_shape.emplace().begin());
}
if (decoded.inner_order) {
std::copy_n(decoded.inner_order->begin(), decoded.rank,
sub_chunk_decoded.inner_order.emplace().begin());
}
TENSORSTORE_ASSIGN_OR_RETURN(
codec->sub_chunk_codec_chain_,
sub_chunk_codecs.Resolve(
std::move(sub_chunk_decoded), encoded,
resolved_options ? &resolved_options->sub_chunk_codecs.emplace()
: nullptr));
return absl::OkStatus();
};
TENSORSTORE_RETURN_IF_ERROR(
set_up_codecs(options.sub_chunk_codecs ? *options.sub_chunk_codecs
: ZarrCodecChainSpec{}),
tensorstore::MaybeAnnotateStatus(_, "Error resolving sub-chunk codecs"));
auto set_up_index_codecs =
[&](const ZarrCodecChainSpec& index_codecs) -> absl::Status {
TENSORSTORE_ASSIGN_OR_RETURN(
codec->index_codec_chain_,
zarr3_sharding_indexed::InitializeIndexCodecChain(
index_codecs, sub_chunk_shape.size(),
resolved_options ? &resolved_options->index_codecs.emplace()
: nullptr));
return absl::OkStatus();
};
TENSORSTORE_RETURN_IF_ERROR(
set_up_index_codecs(options.index_codecs ? *options.index_codecs
: DefaultIndexCodecChainSpec()),
tensorstore::MaybeAnnotateStatus(_, "Error resolving index_codecs"));
return {std::in_place, std::move(codec)};
}
TENSORSTORE_GLOBAL_INITIALIZER {
using Self = ShardingIndexedCodecSpec;
using Options = Self::Options;
namespace jb = ::tensorstore::internal_json_binding;
RegisterCodec<Self>(
"sharding_indexed",
jb::Projection<&Self::options>(jb::Sequence(
jb::Member("chunk_shape", jb::Projection<&Options::sub_chunk_shape>(
OptionalIfConstraintsBinder(
jb::Array(jb::Integer<Index>(1))))),
jb::Member("index_codecs", jb::Projection<&Options::index_codecs>(
OptionalIfConstraintsBinder())),
jb::Member("codecs", jb::Projection<&Options::sub_chunk_codecs>(
OptionalIfConstraintsBinder())),
jb::Member(
"index_location",
jb::Projection<&Options::index_location>(
[](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (!is_loading) {
if (!options.constraints &&
*obj == ShardIndexLocation::kEnd) {
return absl::OkStatus();
}
}
return jb::Validate([](const auto& options, auto* obj) {
if (!options.constraints) {
if (!obj->has_value()) *obj = ShardIndexLocation::kEnd;
}
return absl::OkStatus();
})(is_loading, options, obj, j);
}))
)));
}
}
} | #include <stdint.h>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_test_util.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr3::ArrayCodecResolveParameters;
using ::tensorstore::internal_zarr3::BytesCodecResolveParameters;
using ::tensorstore::internal_zarr3::CodecSpecRoundTripTestParams;
using ::tensorstore::internal_zarr3::TestCodecSpecRoundTrip;
using ::tensorstore::internal_zarr3::ZarrCodecChainSpec;
TEST(ShardingIndexedTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto codec, ZarrCodecChainSpec::FromJson(
{{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}}}));
}
TEST(ShardingIndexedTest, InvalidBytesToBytes) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec, ZarrCodecChainSpec::FromJson({
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}},
{
{"name", "gzip"},
{"configuration", {{"level", 5}}},
},
}));
ArrayCodecResolveParameters decoded_params;
decoded_params.dtype = tensorstore::dtype_v<uint32_t>;
decoded_params.rank = 2;
decoded_params.fill_value = tensorstore::MakeScalarArray<uint32_t>(42);
BytesCodecResolveParameters encoded_params;
EXPECT_THAT(
spec.Resolve(std::move(decoded_params), encoded_params, nullptr),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Sharding codec .* is not compatible with subsequent bytes "
"-> bytes .*"));
}
TEST(ShardingIndexedTest, DefaultIndexLocation) {
CodecSpecRoundTripTestParams p;
p.resolve_params.rank = 2;
p.orig_spec = {
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}},
};
p.expected_spec = {
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_location", "end"},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}},
};
p.to_json_options.constraints = true;
TestCodecSpecRoundTrip(p);
p.expected_spec = {
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}},
};
p.to_json_options.constraints = false;
TestCodecSpecRoundTrip(p);
}
TEST(ShardingIndexedTest, IndexLocationEndNotStored) {
ArrayCodecResolveParameters p;
p.dtype = tensorstore::dtype_v<uint16_t>;
p.rank = 2;
EXPECT_THAT(TestCodecSpecResolve(
::nlohmann::json::array_t{
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
{"index_location", "end"},
}}}},
p,
false),
::testing::Optional(MatchesJson(::nlohmann::json::array_t{
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}}})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/sharding_indexed.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/sharding_indexed_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
549e11be-6e45-424a-9e28-1f27ebc88b6f | cpp | google/cel-cpp | attribute_utility | eval/eval/attribute_utility.cc | eval/eval/attribute_utility_test.cc | #include "eval/eval/attribute_utility.h"
#include <cstdint>
#include <string>
#include <utility>
#include "absl/status/statusor.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "base/attribute.h"
#include "base/attribute_set.h"
#include "base/function_descriptor.h"
#include "base/function_result.h"
#include "base/function_result_set.h"
#include "base/internal/unknown_set.h"
#include "common/casting.h"
#include "common/value.h"
#include "eval/eval/attribute_trail.h"
#include "eval/internal/errors.h"
#include "internal/status_macros.h"
namespace google::api::expr::runtime {
using ::cel::AttributeSet;
using ::cel::Cast;
using ::cel::ErrorValue;
using ::cel::FunctionResult;
using ::cel::FunctionResultSet;
using ::cel::InstanceOf;
using ::cel::UnknownValue;
using ::cel::Value;
using ::cel::base_internal::UnknownSet;
using Accumulator = AttributeUtility::Accumulator;
bool AttributeUtility::CheckForMissingAttribute(
const AttributeTrail& trail) const {
if (trail.empty()) {
return false;
}
for (const auto& pattern : missing_attribute_patterns_) {
if (pattern.IsMatch(trail.attribute()) ==
cel::AttributePattern::MatchType::FULL) {
return true;
}
}
return false;
}
bool AttributeUtility::CheckForUnknown(const AttributeTrail& trail,
bool use_partial) const {
if (trail.empty()) {
return false;
}
for (const auto& pattern : unknown_patterns_) {
auto current_match = pattern.IsMatch(trail.attribute());
if (current_match == cel::AttributePattern::MatchType::FULL ||
(use_partial &&
current_match == cel::AttributePattern::MatchType::PARTIAL)) {
return true;
}
}
return false;
}
absl::optional<UnknownValue> AttributeUtility::MergeUnknowns(
absl::Span<const cel::Value> args) const {
absl::optional<UnknownSet> result_set;
for (const auto& value : args) {
if (!value->Is<cel::UnknownValue>()) continue;
if (!result_set.has_value()) {
result_set.emplace();
}
const auto& current_set = value.GetUnknown();
cel::base_internal::UnknownSetAccess::Add(
*result_set, UnknownSet(current_set.attribute_set(),
current_set.function_result_set()));
}
if (!result_set.has_value()) {
return absl::nullopt;
}
return value_factory_.CreateUnknownValue(
result_set->unknown_attributes(), result_set->unknown_function_results());
}
UnknownValue AttributeUtility::MergeUnknownValues(
const UnknownValue& left, const UnknownValue& right) const {
AttributeSet attributes;
FunctionResultSet function_results;
attributes.Add(left.attribute_set());
function_results.Add(left.function_result_set());
attributes.Add(right.attribute_set());
function_results.Add(right.function_result_set());
return value_factory_.CreateUnknownValue(std::move(attributes),
std::move(function_results));
}
AttributeSet AttributeUtility::CheckForUnknowns(
absl::Span<const AttributeTrail> args, bool use_partial) const {
AttributeSet attribute_set;
for (const auto& trail : args) {
if (CheckForUnknown(trail, use_partial)) {
attribute_set.Add(trail.attribute());
}
}
return attribute_set;
}
absl::optional<UnknownValue> AttributeUtility::IdentifyAndMergeUnknowns(
absl::Span<const cel::Value> args, absl::Span<const AttributeTrail> attrs,
bool use_partial) const {
absl::optional<UnknownSet> result_set;
cel::AttributeSet attr_set = CheckForUnknowns(attrs, use_partial);
if (!attr_set.empty()) {
result_set.emplace(std::move(attr_set));
}
absl::optional<UnknownValue> arg_unknowns = MergeUnknowns(args);
if (!result_set.has_value()) {
return arg_unknowns;
}
if (arg_unknowns.has_value()) {
cel::base_internal::UnknownSetAccess::Add(
*result_set, UnknownSet((*arg_unknowns).attribute_set(),
(*arg_unknowns).function_result_set()));
}
return value_factory_.CreateUnknownValue(
result_set->unknown_attributes(), result_set->unknown_function_results());
}
UnknownValue AttributeUtility::CreateUnknownSet(cel::Attribute attr) const {
return value_factory_.CreateUnknownValue(AttributeSet({std::move(attr)}));
}
absl::StatusOr<ErrorValue> AttributeUtility::CreateMissingAttributeError(
const cel::Attribute& attr) const {
CEL_ASSIGN_OR_RETURN(std::string message, attr.AsString());
return value_factory_.CreateErrorValue(
cel::runtime_internal::CreateMissingAttributeError(message));
}
UnknownValue AttributeUtility::CreateUnknownSet(
const cel::FunctionDescriptor& fn_descriptor, int64_t expr_id,
absl::Span<const cel::Value> args) const {
return value_factory_.CreateUnknownValue(
FunctionResultSet(FunctionResult(fn_descriptor, expr_id)));
}
void AttributeUtility::Add(Accumulator& a, const cel::UnknownValue& v) const {
a.attribute_set_.Add(v.attribute_set());
a.function_result_set_.Add(v.function_result_set());
}
void AttributeUtility::Add(Accumulator& a, const AttributeTrail& attr) const {
a.attribute_set_.Add(attr.attribute());
}
void Accumulator::Add(const UnknownValue& value) {
unknown_present_ = true;
parent_.Add(*this, value);
}
void Accumulator::Add(const AttributeTrail& attr) { parent_.Add(*this, attr); }
void Accumulator::MaybeAdd(const Value& v) {
if (InstanceOf<UnknownValue>(v)) {
Add(Cast<UnknownValue>(v));
}
}
bool Accumulator::IsEmpty() const {
return !unknown_present_ && attribute_set_.empty() &&
function_result_set_.empty();
}
cel::UnknownValue Accumulator::Build() && {
return parent_.value_manager().CreateUnknownValue(
std::move(attribute_set_), std::move(function_result_set_));
}
} | #include "eval/eval/attribute_utility.h"
#include <vector>
#include "base/attribute_set.h"
#include "base/type_provider.h"
#include "common/type_factory.h"
#include "common/value_manager.h"
#include "common/values/legacy_value_manager.h"
#include "eval/public/cel_attribute.h"
#include "eval/public/cel_value.h"
#include "eval/public/unknown_attribute_set.h"
#include "eval/public/unknown_set.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
using ::cel::AttributeSet;
using ::cel::UnknownValue;
using ::cel::Value;
using ::cel::extensions::ProtoMemoryManagerRef;
using ::testing::Eq;
using ::testing::SizeIs;
using ::testing::UnorderedPointwise;
class AttributeUtilityTest : public ::testing::Test {
public:
AttributeUtilityTest()
: value_factory_(ProtoMemoryManagerRef(&arena_),
cel::TypeProvider::Builtin()) {}
protected:
google::protobuf::Arena arena_;
cel::common_internal::LegacyValueManager value_factory_;
};
TEST_F(AttributeUtilityTest, UnknownsUtilityCheckUnknowns) {
std::vector<CelAttributePattern> unknown_patterns = {
CelAttributePattern("unknown0", {CreateCelAttributeQualifierPattern(
CelValue::CreateInt64(1))}),
CelAttributePattern("unknown0", {CreateCelAttributeQualifierPattern(
CelValue::CreateInt64(2))}),
CelAttributePattern("unknown1", {}),
CelAttributePattern("unknown2", {}),
};
std::vector<CelAttributePattern> missing_attribute_patterns;
AttributeUtility utility(unknown_patterns, missing_attribute_patterns,
value_factory_);
ASSERT_FALSE(utility.CheckForUnknown(AttributeTrail(), true));
ASSERT_FALSE(utility.CheckForUnknown(AttributeTrail(), false));
AttributeTrail unknown_trail0("unknown0");
{ ASSERT_FALSE(utility.CheckForUnknown(unknown_trail0, false)); }
{ ASSERT_TRUE(utility.CheckForUnknown(unknown_trail0, true)); }
{
ASSERT_TRUE(utility.CheckForUnknown(
unknown_trail0.Step(
CreateCelAttributeQualifier(CelValue::CreateInt64(1))),
false));
}
{
ASSERT_TRUE(utility.CheckForUnknown(
unknown_trail0.Step(
CreateCelAttributeQualifier(CelValue::CreateInt64(1))),
true));
}
}
TEST_F(AttributeUtilityTest, UnknownsUtilityMergeUnknownsFromValues) {
std::vector<CelAttributePattern> unknown_patterns;
std::vector<CelAttributePattern> missing_attribute_patterns;
CelAttribute attribute0("unknown0", {});
CelAttribute attribute1("unknown1", {});
AttributeUtility utility(unknown_patterns, missing_attribute_patterns,
value_factory_);
UnknownValue unknown_set0 =
value_factory_.CreateUnknownValue(AttributeSet({attribute0}));
UnknownValue unknown_set1 =
value_factory_.CreateUnknownValue(AttributeSet({attribute1}));
std::vector<cel::Value> values = {
unknown_set0,
unknown_set1,
value_factory_.CreateBoolValue(true),
value_factory_.CreateIntValue(1),
};
absl::optional<UnknownValue> unknown_set = utility.MergeUnknowns(values);
ASSERT_TRUE(unknown_set.has_value());
EXPECT_THAT((*unknown_set).attribute_set(),
UnorderedPointwise(
Eq(), std::vector<CelAttribute>{attribute0, attribute1}));
}
TEST_F(AttributeUtilityTest, UnknownsUtilityCheckForUnknownsFromAttributes) {
std::vector<CelAttributePattern> unknown_patterns = {
CelAttributePattern("unknown0",
{CelAttributeQualifierPattern::CreateWildcard()}),
};
std::vector<CelAttributePattern> missing_attribute_patterns;
AttributeTrail trail0("unknown0");
AttributeTrail trail1("unknown1");
CelAttribute attribute1("unknown1", {});
UnknownSet unknown_set1(UnknownAttributeSet({attribute1}));
AttributeUtility utility(unknown_patterns, missing_attribute_patterns,
value_factory_);
UnknownSet unknown_attr_set(utility.CheckForUnknowns(
{
AttributeTrail(),
trail0.Step(CreateCelAttributeQualifier(CelValue::CreateInt64(1))),
trail0.Step(CreateCelAttributeQualifier(CelValue::CreateInt64(2))),
},
false));
UnknownSet unknown_set(unknown_set1, unknown_attr_set);
ASSERT_THAT(unknown_set.unknown_attributes(), SizeIs(3));
}
TEST_F(AttributeUtilityTest, UnknownsUtilityCheckForMissingAttributes) {
std::vector<CelAttributePattern> unknown_patterns;
std::vector<CelAttributePattern> missing_attribute_patterns;
AttributeTrail trail("destination");
trail =
trail.Step(CreateCelAttributeQualifier(CelValue::CreateStringView("ip")));
AttributeUtility utility0(unknown_patterns, missing_attribute_patterns,
value_factory_);
EXPECT_FALSE(utility0.CheckForMissingAttribute(trail));
missing_attribute_patterns.push_back(CelAttributePattern(
"destination",
{CreateCelAttributeQualifierPattern(CelValue::CreateStringView("ip"))}));
AttributeUtility utility1(unknown_patterns, missing_attribute_patterns,
value_factory_);
EXPECT_TRUE(utility1.CheckForMissingAttribute(trail));
}
TEST_F(AttributeUtilityTest, CreateUnknownSet) {
AttributeTrail trail("destination");
trail =
trail.Step(CreateCelAttributeQualifier(CelValue::CreateStringView("ip")));
std::vector<CelAttributePattern> empty_patterns;
AttributeUtility utility(empty_patterns, empty_patterns, value_factory_);
UnknownValue set = utility.CreateUnknownSet(trail.attribute());
ASSERT_THAT(set.attribute_set(), SizeIs(1));
ASSERT_OK_AND_ASSIGN(auto elem, set.attribute_set().begin()->AsString());
EXPECT_EQ(elem, "destination.ip");
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/attribute_utility.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/attribute_utility_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
5d214a45-f9ca-4faa-9496-7df736893bdf | cpp | google/tensorstore | std_variant | tensorstore/internal/json_binding/std_variant.cc | tensorstore/internal/json_binding/std_variant_test.cc | #include <stddef.h>
#include <string>
#include "absl/status/status.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_json_binding {
absl::Status GetVariantErrorStatus(span<const absl::Status> status_values) {
std::string error = "No matching value binder: ";
for (size_t i = 0; i < status_values.size(); ++i) {
if (i != 0) error += "; ";
error += status_values[i].message();
}
return absl::InvalidArgumentError(error);
}
}
} | #include "tensorstore/internal/json_binding/std_variant.h"
#include <string>
#include <type_traits>
#include <utility>
#include <variant>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace jb = ::tensorstore::internal_json_binding;
namespace {
using ::tensorstore::MatchesStatus;
TEST(JsonBindingTest, VariantDefaultBinder) {
tensorstore::TestJsonBinderRoundTrip<std::variant<int, std::string>>({
{3, ::nlohmann::json(3)},
{"abc", ::nlohmann::json("abc")},
});
}
TEST(JsonBindingTest, VariantDefaultBinderError) {
EXPECT_THAT(
(jb::FromJson<std::variant<int, std::string>>(::nlohmann::json(false))),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"No matching value binder: "
"Expected integer in the range .*, but received: false; "
"Expected string, but received: false"));
}
TEST(JsonBindingTest, VariantExplicitBinder) {
auto binder = jb::Object(jb::Variant(jb::Member("a"), jb::Member("b")));
tensorstore::TestJsonBinderRoundTrip<std::variant<int, std::string>>(
{
{3, {{"a", 3}}},
{"abc", {{"b", "abc"}}},
},
binder);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/std_variant.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/std_variant_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
2486c2e3-6592-4428-a86c-aeecaa457f0c | cpp | tensorflow/tensorflow | lru_cache | third_party/xla/xla/pjrt/lru_cache.h | third_party/xla/xla/pjrt/lru_cache_test.cc | #ifndef XLA_PJRT_LRU_CACHE_H_
#define XLA_PJRT_LRU_CACHE_H_
#include <optional>
#include <unordered_map>
#include "absl/container/node_hash_map.h"
#include "tsl/platform/logging.h"
namespace xla {
template <typename Key, typename Value,
typename Hash = typename absl::node_hash_map<Key, Value>::hasher,
typename Eq = typename absl::node_hash_map<Key, Value>::key_equal>
class LRUCache {
private:
struct LRUListEntry {
LRUListEntry* next;
LRUListEntry* prev;
};
public:
class LRUList {
public:
explicit LRUList(int capacity) : capacity_(capacity) {
head_.next = &head_;
head_.prev = &head_;
}
~LRUList() {
CHECK(head_.next == &head_);
CHECK(head_.prev == &head_);
}
LRUList(const LRUList&) = delete;
LRUList(LRUList&&) = delete;
LRUList& operator=(const LRUList&) = delete;
LRUList& operator=(LRUList&&) = delete;
int Capacity() const { return capacity_; }
int Size() const { return size_; }
void Clear();
private:
friend class LRUCache;
int capacity_;
int size_ = 0;
LRUListEntry head_;
};
explicit LRUCache(LRUList* lru_list) : lru_list_(lru_list) {}
~LRUCache();
LRUCache(const LRUCache&) = delete;
LRUCache(LRUCache&&) = delete;
LRUCache& operator=(const LRUCache&) = delete;
LRUCache& operator=(LRUCache&&) = delete;
Value GetOrCreateIfAbsent(const Key& key,
const std::function<Value(const Key&)>& factory);
void Remove(const Key& key);
void Clear();
int Size() const { return entries_.size(); }
int Capacity() const { return lru_list_->Capacity(); }
auto begin() const { return entries_.begin(); }
auto end() const { return entries_.end(); }
private:
LRUList* lru_list_;
struct Entry : public LRUListEntry {
Entry() = default;
const Key* key;
LRUCache* container;
std::optional<Value> value;
};
std::unordered_map<Key, Entry, Hash, Eq> entries_;
};
template <typename Key, typename Value, typename Hash, typename Eq>
void LRUCache<Key, Value, Hash, Eq>::LRUList::Clear() {
while (head_.next != &head_) {
static_cast<Entry*>(head_.next)->container->Clear();
}
size_ = 0;
}
template <typename Key, typename Value, typename Hash, typename Eq>
void LRUCache<Key, Value, Hash, Eq>::Clear() {
for (auto& e : entries_) {
LRUListEntry* l = &e.second;
l->next->prev = l->prev;
l->prev->next = l->next;
--lru_list_->size_;
}
entries_.clear();
}
template <typename Key, typename Value, typename Hash, typename Eq>
LRUCache<Key, Value, Hash, Eq>::~LRUCache() {
Clear();
}
template <typename Key, typename Value, typename Hash, typename Eq>
void LRUCache<Key, Value, Hash, Eq>::Remove(const Key& key) {
LRUListEntry* l = &entries_[key];
l->next->prev = l->prev;
l->prev->next = l->next;
--lru_list_->size_;
entries_.erase(key);
}
template <typename Key, typename Value, typename Hash, typename Eq>
Value LRUCache<Key, Value, Hash, Eq>::GetOrCreateIfAbsent(
const Key& key, const std::function<Value(const Key&)>& factory) {
auto [it, inserted] = entries_.try_emplace(key);
Entry& entry = it->second;
if (inserted) {
entry.key = &it->first;
entry.value = factory(*entry.key);
++lru_list_->size_;
} else {
entry.prev->next = entry.next;
entry.next->prev = entry.prev;
}
LRUListEntry& lru_head = lru_list_->head_;
entry.container = this;
entry.prev = lru_head.prev;
entry.next = &lru_head;
lru_head.prev->next = &entry;
lru_head.prev = &entry;
Value v = *entry.value;
if (lru_list_->size_ > lru_list_->capacity_) {
Entry* to_remove = static_cast<Entry*>(lru_head.next);
to_remove->next->prev = &lru_head;
lru_head.next = to_remove->next;
to_remove->container->entries_.extract(*to_remove->key);
--lru_list_->size_;
}
return v;
}
}
#endif | #include "xla/pjrt/lru_cache.h"
#include <random>
#include "xla/test.h"
namespace xla {
namespace {
TEST(LRUCache, Basics) {
LRUCache<int, int>::LRUList list(3);
LRUCache<int, int> cache(&list);
EXPECT_EQ(3, cache.Capacity());
EXPECT_EQ(0, cache.Size());
EXPECT_EQ(0, cache.GetOrCreateIfAbsent(0, [](int) { return 0; }));
EXPECT_EQ(1, cache.Size());
EXPECT_EQ(1, cache.GetOrCreateIfAbsent(1, [](int) { return 1; }));
EXPECT_EQ(2, cache.Size());
EXPECT_EQ(2, cache.GetOrCreateIfAbsent(2, [](int) { return 2; }));
EXPECT_EQ(3, cache.Size());
EXPECT_EQ(0, cache.GetOrCreateIfAbsent(0, [](int) { return 3; }));
EXPECT_EQ(3, cache.Size());
EXPECT_EQ(4, cache.GetOrCreateIfAbsent(3, [](int) { return 4; }));
EXPECT_EQ(3, cache.Size());
EXPECT_EQ(2, cache.GetOrCreateIfAbsent(2, [](int) { return 5; }));
EXPECT_EQ(3, cache.Size());
EXPECT_EQ(6, cache.GetOrCreateIfAbsent(1, [](int) { return 6; }));
EXPECT_EQ(3, cache.Size());
cache.Clear();
EXPECT_EQ(0, cache.Size());
EXPECT_EQ(6, cache.GetOrCreateIfAbsent(1, [](int) { return 6; }));
EXPECT_EQ(1, cache.Size());
}
TEST(LRUCache, SharedLRUList) {
LRUCache<int, int>::LRUList list(2);
LRUCache<int, int> cache1(&list);
LRUCache<int, int> cache2(&list);
EXPECT_EQ(2, list.Capacity());
EXPECT_EQ(0, cache1.Size());
EXPECT_EQ(0, cache2.Size());
EXPECT_EQ(0, cache1.GetOrCreateIfAbsent(0, [](int) { return 0; }));
EXPECT_EQ(1, list.Size());
EXPECT_EQ(1, cache1.Size());
EXPECT_EQ(0, cache2.Size());
EXPECT_EQ(1, cache2.GetOrCreateIfAbsent(1, [](int) { return 1; }));
EXPECT_EQ(2, list.Size());
EXPECT_EQ(1, cache1.Size());
EXPECT_EQ(1, cache2.Size());
EXPECT_EQ(2, cache1.GetOrCreateIfAbsent(2, [](int) { return 2; }));
EXPECT_EQ(2, list.Size());
EXPECT_EQ(1, cache1.Size());
EXPECT_EQ(1, cache2.Size());
EXPECT_EQ(1, cache2.GetOrCreateIfAbsent(1, [](int) { return -1; }));
EXPECT_EQ(2, list.Size());
EXPECT_EQ(1, cache1.Size());
EXPECT_EQ(1, cache2.Size());
cache1.Clear();
EXPECT_EQ(1, list.Size());
EXPECT_EQ(0, cache1.Size());
EXPECT_EQ(1, cache2.Size());
EXPECT_EQ(1, cache2.GetOrCreateIfAbsent(1, [](int) { return 4; }));
EXPECT_EQ(1, list.Size());
EXPECT_EQ(0, cache1.Size());
EXPECT_EQ(1, cache2.Size());
EXPECT_EQ(7, cache1.GetOrCreateIfAbsent(7, [](int) { return 7; }));
EXPECT_EQ(2, list.Size());
EXPECT_EQ(1, cache1.Size());
EXPECT_EQ(1, cache2.Size());
list.Clear();
EXPECT_EQ(0, list.Size());
EXPECT_EQ(0, cache1.Size());
EXPECT_EQ(0, cache2.Size());
EXPECT_EQ(2, cache1.GetOrCreateIfAbsent(2, [](int) { return 2; }));
}
TEST(LRUCache, RandomInsertions) {
LRUCache<int, int>::LRUList list(7);
LRUCache<int, int> cache(&list);
std::random_device rng;
std::uniform_int_distribution<int> dist(0, 100);
for (int i = 0; i < 1000; ++i) {
EXPECT_LE(cache.Size(), std::min(cache.Capacity(), i));
int key = dist(rng);
int k = -1;
int v = cache.GetOrCreateIfAbsent(key, [&](int k_arg) {
CHECK_EQ(k_arg, key);
k = k_arg;
return k_arg * 37;
});
EXPECT_TRUE(k == -1 || k == key);
EXPECT_EQ(v, key * 37);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/lru_cache.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/lru_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5e23e48e-e613-4ffe-b6df-e833fb616742 | cpp | tensorflow/tensorflow | simplify_ici_dummy_variables_pass | tensorflow/core/common_runtime/simplify_ici_dummy_variables_pass.cc | tensorflow/core/common_runtime/simplify_ici_dummy_variables_pass_test.cc | #include "tensorflow/core/common_runtime/simplify_ici_dummy_variables_pass.h"
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/util/device_name_utils.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/config/flags.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/platform/bfloat16.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
constexpr absl::string_view kTpuExecute = "TPUExecute";
constexpr absl::string_view kParallelExecuteIds = "_parallel_execution_ids";
const char kICIWeightDistributionMlirBridgeMarker[] =
"ici_weight_distribution_mlir_bridge_marker";
std::string GetNewOpName(std::string op_name, int index, int task_id) {
return absl::StrCat(op_name, "_ici_specific_index_", std::to_string(index),
"_task_id_", std::to_string(task_id));
}
std::vector<Node*> GetNonMainReplicaIciTPUExecuteNodes(Graph* graph,
bool& is_spmd) {
std::vector<Node*> tpu_nodes;
for (Node* node : graph->nodes()) {
if (node->type_string() == kTpuExecute &&
HasNodeAttr(node->def(), kParallelExecuteIds)) {
auto parallel_exec_ids = node->attrs().Find(kParallelExecuteIds)->s();
std::vector<std::string> group_vec =
absl::StrSplit(parallel_exec_ids, ',');
if (group_vec.empty()) return tpu_nodes;
std::vector<std::string> replica_vec = absl::StrSplit(group_vec[0], ':');
int replica_id = std::stoi(replica_vec[1]);
if (replica_id != 0) tpu_nodes.push_back(node);
if (group_vec.size() > 1) {
std::vector<std::string> parallel_vec =
absl::StrSplit(group_vec[1], ':');
int parallel_id = std::stoi(parallel_vec[1]);
if (parallel_id != 0) is_spmd = true;
}
}
}
return tpu_nodes;
}
void RedirectEdge(Graph* graph, Node* old_src_node, Node* dst_node,
Node* new_src_node, int input_index) {
const Edge* delete_edge;
for (auto edge : dst_node->in_edges()) {
if (edge->src() == old_src_node) {
delete_edge = edge;
break;
}
}
if (delete_edge == nullptr) return;
graph->RemoveEdge(delete_edge);
graph->AddEdge(new_src_node, 0, dst_node, input_index);
}
string GetHostDeviceName(Node* tpu_node) {
auto device_name = tpu_node->requested_device();
if (device_name.empty()) device_name = tpu_node->assigned_device_name();
DeviceNameUtils::ParsedName parsed_device_name;
DeviceNameUtils::ParseFullName(device_name, &parsed_device_name);
string host_device_name = DeviceNameUtils::FullName(
parsed_device_name.job, parsed_device_name.replica,
parsed_device_name.task, "CPU", 0);
return host_device_name;
}
std::optional<std::vector<int>> GetOutputShapeVec(Node* node) {
auto output_shapes = node->attrs().Find("_output_shapes");
if (output_shapes == nullptr) return std::nullopt;
auto output_shape = output_shapes->list().shape()[0];
std::vector<int> output_shape_vec;
output_shape_vec.reserve(output_shape.dim_size());
for (auto i = 0; i < output_shape.dim_size(); i++) {
output_shape_vec.push_back(output_shape.dim()[i].size());
}
return output_shape_vec;
}
int GetTPUTaskId(Node* tpu_node) {
auto device_name = tpu_node->requested_device();
if (device_name.empty()) device_name = tpu_node->assigned_device_name();
DeviceNameUtils::ParsedName parsed_device_name;
DeviceNameUtils::ParseFullName(device_name, &parsed_device_name);
return parsed_device_name.task;
}
Node* BuildFillOp(GraphDefBuilder::Options& bopts, Node* tpu_node,
Node* in_node, int input_index, string host_device_name) {
auto output_shape_vec = GetOutputShapeVec(in_node);
if (!output_shape_vec.has_value()) return nullptr;
auto dtype = in_node->attrs().Find("T")->type();
int tpu_task_id = GetTPUTaskId(tpu_node);
TensorShape tensor_shape;
tensor_shape.AddDim(output_shape_vec.value().size());
Tensor const_op_shape_tensor(DT_INT32, tensor_shape);
for (int i = 0; i < output_shape_vec.value().size(); i++) {
const_op_shape_tensor.flat<int>()(i) = output_shape_vec.value()[i];
}
std::string const_1_name = GetNewOpName("const_1", input_index, tpu_task_id);
Node* fill_dim_input =
ops::SourceOp("Const", bopts.WithName(const_1_name)
.WithAttr("dtype", DT_INT32)
.WithAttr("value", const_op_shape_tensor));
TensorShape fill_dim_output_shape;
fill_dim_output_shape.AddDim(output_shape_vec.value().size());
fill_dim_input->AddAttr("_output_shapes",
std::vector<TensorShape>{fill_dim_output_shape});
std::string const_2_name = GetNewOpName("const_2", input_index, tpu_task_id);
auto scalar_tensor = Tensor(dtype, {});
if (dtype == DT_FLOAT) {
scalar_tensor.scalar<float>()() = 0;
} else if (dtype == DT_BFLOAT16) {
scalar_tensor.scalar<bfloat16>()() = bfloat16(0);
} else {
LOG(ERROR) << "Unsupported data type: ", DataTypeString(dtype);
return nullptr;
}
Node* fill_value_input =
ops::SourceOp("Const", bopts.WithName(const_2_name)
.WithAttr("dtype", dtype)
.WithAttr("value", scalar_tensor));
TensorShape fill_value_output_shape;
fill_value_input->AddAttr("_output_shapes",
std::vector<TensorShape>{fill_value_output_shape});
std::string fill_name = GetNewOpName("fill", input_index, tpu_task_id);
Node* new_fill =
ops::BinaryOp("Fill", fill_dim_input, fill_value_input,
bopts.WithName(fill_name).WithAttr("T", dtype));
TensorShape new_output_shape;
for (auto output_shape : output_shape_vec.value()) {
new_output_shape.AddDim(output_shape);
}
new_fill->AddAttr("_output_shapes",
std::vector<TensorShape>{new_output_shape});
new_fill->AddAttr("_xla_inferred_shapes",
std::vector<TensorShape>{new_output_shape});
fill_dim_input->set_requested_device(host_device_name);
fill_value_input->set_requested_device(host_device_name);
new_fill->set_requested_device(host_device_name);
return new_fill;
}
absl::Status ReplaceIciDummyVariables(Graph* graph, int input_index,
std::vector<Node*> tpu_nodes,
GraphDefBuilder::Options& bopts) {
absl::flat_hash_map<std::string, Node*> device_to_node_map;
for (Node* tpu_node : tpu_nodes) {
Node* in_node;
TF_RETURN_IF_ERROR(tpu_node->input_node(input_index, &in_node));
if (!in_node->attrs().Find(kICIWeightDistributionMlirBridgeMarker)) {
continue;
}
string host_device_name = GetHostDeviceName(tpu_node);
if (device_to_node_map.contains(host_device_name)) {
RedirectEdge(graph, in_node, tpu_node,
device_to_node_map[host_device_name], input_index);
continue;
}
Node* new_fill =
BuildFillOp(bopts, tpu_node, in_node, input_index, host_device_name);
if (new_fill == nullptr) continue;
device_to_node_map[host_device_name] = new_fill;
RedirectEdge(graph, in_node, tpu_node, device_to_node_map[host_device_name],
input_index);
}
return absl::OkStatus();
}
}
bool ShouldRunPass(const GraphOptimizationPassOptions& options) {
if (!flags::Global().enable_tf2min_ici_weight.value()) {
VLOG(1) << "SimplifyIciDummyVariablesPass is disabled.";
return false;
}
VLOG(1) << "SimplifyIciDummyVariablesPass is enabled.";
if (options.graph == nullptr) {
LOG(INFO) << "No graph in simplify_ici_dummy_variables_pass.";
return false;
}
return true;
}
Status SimplifyIciDummyVariablesPass::Run(
const GraphOptimizationPassOptions& options) {
if (!ShouldRunPass(options)) {
return absl::OkStatus();
}
Graph* graph = options.graph->get();
VLOG(1) << DumpGraphToFile("before_simplify_ici_dummy_variables_pass", *graph,
options.flib_def);
absl::Status status;
GraphDefBuilder::Options bopts(graph, &status);
if (!status.ok()) {
LOG(ERROR) << "GraphDefBuilder::Option failed to initialize.";
return status;
}
bool is_spmd = false;
std::vector<Node*> tpu_nodes =
GetNonMainReplicaIciTPUExecuteNodes(graph, is_spmd);
if (!is_spmd) {
VLOG(1) << "Not SPMD case, skip SimplifyIciDummyVariablesPass.";
return absl::OkStatus();
}
if (tpu_nodes.empty()) {
VLOG(1) << "tpu_nodes is empty, skip SimplifyIciDummyVariablesPass.";
return absl::OkStatus();
}
for (int i = 0; i < tpu_nodes[0]->num_inputs(); ++i) {
auto replace_status = ReplaceIciDummyVariables(graph, i, tpu_nodes, bopts);
if (!replace_status.ok()) {
LOG(ERROR) << "Replace ici dummy variables failed.";
return replace_status;
}
}
RemoveDeadNodes(graph);
VLOG(1) << DumpGraphToFile("after_simplify_ici_dummy_variables_pass", *graph,
options.flib_def);
return absl::OkStatus();
}
REGISTER_OPTIMIZATION(OptimizationPassRegistry::PRE_PLACEMENT, 49,
SimplifyIciDummyVariablesPass);
} | #include "tensorflow/core/common_runtime/simplify_ici_dummy_variables_pass.h"
#include <memory>
#include <string>
#include "absl/status/status.h"
#include "tensorflow/cc/framework/scope.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/config/flags.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/test.h"
namespace tensorflow {
Node* GetNode(const Graph& graph, const std::string& name) {
for (Node* node : graph.nodes()) {
if (node->name() == name) return node;
}
return nullptr;
}
std::string TestDataPath() {
return tensorflow::GetDataDependencyFilepath(
"tensorflow/core/common_runtime/testdata/");
}
TEST(SimplifyIciDummyVariablesPassTest, flag_is_false) {
flags::Global().enable_tf2min_ici_weight.reset(false);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
std::string graph_path =
TestDataPath() + "simplify_ici_dummy_variables_pass_before.pbtxt";
tensorflow::GraphDef graph_def;
absl::Status load_graph_status =
ReadTextProto(tensorflow::Env::Default(), graph_path, &graph_def);
EXPECT_EQ(load_graph_status.ok(), true);
TF_EXPECT_OK(ConvertGraphDefToGraph(GraphConstructorOptions(), graph_def,
graph.get()));
GraphOptimizationPassOptions options;
options.graph = &graph;
SimplifyIciDummyVariablesPass pass;
TF_ASSERT_OK(pass.Run(options));
Node* fill_1_dim = GetNode(*graph, "const_1_ici_specific_index_0_task_id_2");
Node* fill_1_value =
GetNode(*graph, "const_2_ici_specific_index_0_task_id_2");
Node* fill_1 = GetNode(*graph, "fill_ici_specific_index_0_task_id_2");
EXPECT_EQ(fill_1_dim, nullptr);
EXPECT_EQ(fill_1_value, nullptr);
EXPECT_EQ(fill_1, nullptr);
Node* fill_2_dim = GetNode(*graph, "const_1_ici_specific_index_1_task_id_2");
Node* fill_2_value =
GetNode(*graph, "const_2_ici_specific_index_1_task_id_2");
Node* fill_2 = GetNode(*graph, "fill_ici_specific_index_1_task_id_2");
EXPECT_EQ(fill_2_dim, nullptr);
EXPECT_EQ(fill_2_value, nullptr);
EXPECT_EQ(fill_2, nullptr);
}
TEST(SimplifyIciDummyVariablesPassTest, replace_dummy_variable) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
std::string graph_path =
TestDataPath() + "simplify_ici_dummy_variables_pass_before.pbtxt";
tensorflow::GraphDef graph_def;
tensorflow::Status load_graph_status =
ReadTextProto(tensorflow::Env::Default(), graph_path, &graph_def);
EXPECT_EQ(load_graph_status.ok(), true);
TF_EXPECT_OK(ConvertGraphDefToGraph(GraphConstructorOptions(), graph_def,
graph.get()));
GraphOptimizationPassOptions options;
options.graph = &graph;
SimplifyIciDummyVariablesPass pass;
TF_ASSERT_OK(pass.Run(options));
Node* fill_1_dim = GetNode(*graph, "const_1_ici_specific_index_0_task_id_2");
Node* fill_1_value =
GetNode(*graph, "const_2_ici_specific_index_0_task_id_2");
Node* fill_1 = GetNode(*graph, "fill_ici_specific_index_0_task_id_2");
EXPECT_NE(fill_1_dim, nullptr);
EXPECT_NE(fill_1_value, nullptr);
EXPECT_NE(fill_1, nullptr);
EXPECT_EQ(fill_1_dim->requested_device(),
"/job:tpu_host_worker/replica:0/task:2/device:CPU:0");
EXPECT_EQ(fill_1_value->requested_device(),
"/job:tpu_host_worker/replica:0/task:2/device:CPU:0");
EXPECT_EQ(fill_1->requested_device(),
"/job:tpu_host_worker/replica:0/task:2/device:CPU:0");
Node* fill_2_dim = GetNode(*graph, "const_1_ici_specific_index_1_task_id_2");
Node* fill_2_value =
GetNode(*graph, "const_2_ici_specific_index_1_task_id_2");
Node* fill_2 = GetNode(*graph, "fill_ici_specific_index_1_task_id_2");
EXPECT_NE(fill_2_dim, nullptr);
EXPECT_NE(fill_2_value, nullptr);
EXPECT_NE(fill_2, nullptr);
EXPECT_EQ(fill_2_dim->requested_device(),
"/job:tpu_host_worker/replica:0/task:2/device:CPU:0");
EXPECT_EQ(fill_2_value->requested_device(),
"/job:tpu_host_worker/replica:0/task:2/device:CPU:0");
EXPECT_EQ(fill_2->requested_device(),
"/job:tpu_host_worker/replica:0/task:2/device:CPU:0");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/simplify_ici_dummy_variables_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/simplify_ici_dummy_variables_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4a4e2c3c-500f-4632-a6de-198ec052515a | cpp | tensorflow/tensorflow | inputbuffer | third_party/xla/xla/tsl/lib/io/inputbuffer.cc | third_party/xla/xla/tsl/lib/io/inputbuffer_test.cc | #include "xla/tsl/lib/io/inputbuffer.h"
#include <algorithm>
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace tsl {
namespace io {
InputBuffer::InputBuffer(RandomAccessFile* file, size_t buffer_bytes)
: file_(file),
file_pos_(0),
size_(buffer_bytes),
buf_(new char[size_]),
pos_(buf_),
limit_(buf_) {}
InputBuffer::~InputBuffer() { delete[] buf_; }
absl::Status InputBuffer::FillBuffer() {
absl::string_view data;
absl::Status s = file_->Read(file_pos_, size_, &data, buf_);
if (data.data() != buf_) {
memmove(buf_, data.data(), data.size());
}
pos_ = buf_;
limit_ = pos_ + data.size();
file_pos_ += data.size();
return s;
}
template <typename T>
absl::Status InputBuffer::ReadLine(T* result) {
result->clear();
absl::Status s;
do {
size_t buf_remain = limit_ - pos_;
char* newline = static_cast<char*>(memchr(pos_, '\n', buf_remain));
if (newline != nullptr) {
size_t result_len = newline - pos_;
result->append(pos_, result_len);
pos_ = newline + 1;
if (!result->empty() && result->back() == '\r') {
result->resize(result->size() - 1);
}
return absl::OkStatus();
}
if (buf_remain > 0) result->append(pos_, buf_remain);
s = FillBuffer();
DCHECK_EQ(pos_, buf_);
} while (limit_ != buf_);
if (!result->empty() && result->back() == '\r') {
result->resize(result->size() - 1);
}
if (errors::IsOutOfRange(s) && !result->empty()) {
return absl::OkStatus();
}
return s;
}
template Status InputBuffer::ReadLine<std::string>(std::string* result);
template Status InputBuffer::ReadLine<tstring>(tstring* result);
absl::Status InputBuffer::ReadNBytes(int64_t bytes_to_read,
std::string* result) {
result->clear();
if (bytes_to_read < 0) {
return errors::InvalidArgument("Can't read a negative number of bytes: ",
bytes_to_read);
}
result->resize(bytes_to_read);
size_t bytes_read = 0;
absl::Status status = ReadNBytes(bytes_to_read, &(*result)[0], &bytes_read);
if (bytes_read < bytes_to_read) result->resize(bytes_read);
return status;
}
absl::Status InputBuffer::ReadNBytes(int64_t bytes_to_read, char* result,
size_t* bytes_read) {
if (bytes_to_read < 0) {
return errors::InvalidArgument("Can't read a negative number of bytes: ",
bytes_to_read);
}
absl::Status status;
*bytes_read = 0;
while (*bytes_read < static_cast<size_t>(bytes_to_read)) {
if (pos_ == limit_) {
status = FillBuffer();
if (limit_ == buf_) {
break;
}
}
const int64_t bytes_to_copy =
std::min<int64_t>(limit_ - pos_, bytes_to_read - *bytes_read);
memcpy(result + *bytes_read, pos_, bytes_to_copy);
pos_ += bytes_to_copy;
*bytes_read += bytes_to_copy;
}
if (errors::IsOutOfRange(status) &&
(*bytes_read == static_cast<size_t>(bytes_to_read))) {
return absl::OkStatus();
}
return status;
}
absl::Status InputBuffer::ReadVarint32Fallback(uint32* result) {
absl::Status s = ReadVarintFallback(result, core::kMaxVarint32Bytes);
if (errors::IsDataLoss(s)) {
return errors::DataLoss("Stored data is too large to be a varint32.");
}
return s;
}
absl::Status InputBuffer::ReadVarint64Fallback(uint64* result) {
absl::Status s = ReadVarintFallback(result, core::kMaxVarint64Bytes);
if (errors::IsDataLoss(s)) {
return errors::DataLoss("Stored data is too large to be a varint64.");
}
return s;
}
template <typename T>
absl::Status InputBuffer::ReadVarintFallback(T* result, int max_bytes) {
uint8 scratch = 0;
auto* p = reinterpret_cast<char*>(&scratch);
size_t unused_bytes_read = 0;
*result = 0;
for (int index = 0; index < max_bytes; index++) {
int shift = 7 * index;
TF_RETURN_IF_ERROR(ReadNBytes(1, p, &unused_bytes_read));
*result |= (static_cast<T>(scratch) & 127) << shift;
if (!(scratch & 128)) return absl::OkStatus();
}
return errors::DataLoss("Stored data longer than ", max_bytes, " bytes.");
}
absl::Status InputBuffer::SkipNBytes(int64_t bytes_to_skip) {
if (bytes_to_skip < 0) {
return errors::InvalidArgument("Can only skip forward, not ",
bytes_to_skip);
}
int64_t bytes_skipped = 0;
absl::Status s;
while (bytes_skipped < bytes_to_skip) {
if (pos_ == limit_) {
s = FillBuffer();
if (limit_ == buf_) {
break;
}
}
const int64_t bytes_to_advance =
std::min<int64_t>(limit_ - pos_, bytes_to_skip - bytes_skipped);
bytes_skipped += bytes_to_advance;
pos_ += bytes_to_advance;
}
if (errors::IsOutOfRange(s) && bytes_skipped == bytes_to_skip) {
return absl::OkStatus();
}
return s;
}
absl::Status InputBuffer::Seek(int64_t position) {
if (position < 0) {
return errors::InvalidArgument("Seeking to a negative position: ",
position);
}
const int64_t bufpos = file_pos_ - static_cast<int64_t>(limit_ - buf_);
if (position >= bufpos && position < file_pos_) {
pos_ = buf_ + (position - bufpos);
DCHECK(pos_ >= buf_ && pos_ < limit_);
} else {
pos_ = limit_ = buf_;
file_pos_ = position;
}
return absl::OkStatus();
}
absl::Status InputBuffer::Hint(int64_t bytes_to_read) {
if (bytes_to_read < 0) {
return errors::InvalidArgument("Can't read a negative number of bytes: ",
bytes_to_read);
}
if (bytes_to_read > size_) {
return absl::OkStatus();
}
const int64_t bytes_remain_in_buf = static_cast<int64_t>(limit_ - pos_);
if (bytes_to_read <= bytes_remain_in_buf) {
return absl::OkStatus();
}
memmove(buf_, pos_, bytes_remain_in_buf);
pos_ = buf_;
limit_ = buf_ + bytes_remain_in_buf;
bytes_to_read -= bytes_remain_in_buf;
absl::string_view data;
absl::Status s = file_->Read(file_pos_, bytes_to_read, &data, limit_);
if (data.data() != limit_) {
memmove(limit_, data.data(), data.size());
}
limit_ += data.size();
file_pos_ += data.size();
if (errors::IsOutOfRange(s) && data.size() == bytes_to_read) {
return absl::OkStatus();
} else {
return s;
}
}
}
} | #include "xla/tsl/lib/io/inputbuffer.h"
#include <vector>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/coding.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
static std::vector<int> BufferSizes() {
return {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 65536};
}
TEST(InputBuffer, ReadLine_Empty) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, ""));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string line;
io::InputBuffer in(file.get(), buf_size);
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(InputBuffer, ReadLine1) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_CHECK_OK(
WriteStringToFile(env, fname, "line one\nline two\nline three\n"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string line;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(InputBuffer, ReadLine_NoTrailingNewLine) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "line one\nline two\nline three"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string line;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(InputBuffer, ReadLine_EmptyLines) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_CHECK_OK(
WriteStringToFile(env, fname, "line one\n\n\nline two\nline three"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string line;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(InputBuffer, ReadLine_CRLF) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname,
"line one\r\n\r\n\r\nline two\r\nline three"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string line;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(InputBuffer, ReadNBytes) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string read;
io::InputBuffer in(file.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_CHECK_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(3, in.Tell());
TF_CHECK_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
EXPECT_EQ(7, in.Tell());
TF_CHECK_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(7, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "789");
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
TF_CHECK_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
size_t bytes_read;
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
char read[5];
io::InputBuffer in(file.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(3, read, &bytes_read));
EXPECT_EQ(absl::string_view(read, 3), "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, read, &bytes_read));
EXPECT_EQ(absl::string_view(read, 3), "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(4, read, &bytes_read));
EXPECT_EQ(absl::string_view(read, 4), "3456");
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, read, &bytes_read));
EXPECT_EQ(absl::string_view(read, 4), "3456");
EXPECT_EQ(7, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, read, &bytes_read)));
EXPECT_EQ(absl::string_view(read, 3), "789");
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, read, &bytes_read)));
EXPECT_EQ(absl::string_view(read, 3), "789");
EXPECT_EQ(10, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, read, &bytes_read));
EXPECT_EQ(absl::string_view(read, 3), "789");
EXPECT_EQ(10, in.Tell());
}
}
TEST(InputBuffer, SkipNBytes) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string read;
io::InputBuffer in(file.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_CHECK_OK(in.SkipNBytes(3));
EXPECT_EQ(3, in.Tell());
TF_CHECK_OK(in.SkipNBytes(0));
EXPECT_EQ(3, in.Tell());
TF_CHECK_OK(in.ReadNBytes(2, &read));
EXPECT_EQ(read, "34");
EXPECT_EQ(5, in.Tell());
TF_CHECK_OK(in.SkipNBytes(0));
EXPECT_EQ(5, in.Tell());
TF_CHECK_OK(in.SkipNBytes(2));
EXPECT_EQ(7, in.Tell());
TF_CHECK_OK(in.ReadNBytes(1, &read));
EXPECT_EQ(read, "7");
EXPECT_EQ(8, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
}
TEST(InputBuffer, Seek) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string read;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "345");
TF_CHECK_OK(in.Seek(0));
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
TF_CHECK_OK(in.Seek(3));
TF_CHECK_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
TF_CHECK_OK(in.Seek(4));
TF_CHECK_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "4567");
TF_CHECK_OK(in.Seek(1 << 25));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(1, &read)));
EXPECT_TRUE(absl::StrContains(in.Seek(-1).ToString(), "negative position"));
}
}
TEST(InputBuffer, ReadVarint32) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
std::vector<uint32> data;
uint32 i = 0;
for (; i < (1U << 10); i += 1) data.push_back(i);
for (; i < (1U << 15); i += 5) data.push_back(i);
for (; i < (1U << 31); i += 132817) data.push_back(i);
data.push_back(std::numeric_limits<uint32>::max());
{
std::unique_ptr<WritableFile> file;
TF_CHECK_OK(env->NewWritableFile(fname, &file));
string varint;
for (uint32 number : data) {
varint.clear();
core::PutVarint32(&varint, number);
TF_CHECK_OK(file->Append(absl::string_view(varint)));
}
}
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
io::InputBuffer in(file.get(), buf_size);
uint32 result = 0;
for (uint32 expected : data) {
TF_ASSERT_OK(in.ReadVarint32(&result));
EXPECT_EQ(expected, result);
}
EXPECT_TRUE(errors::IsOutOfRange(in.ReadVarint32(&result)));
}
}
TEST(InputBuffer, ReadVarint64) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
std::vector<uint64> data;
uint64 i = 0;
for (; i < (1U << 10); i += 1) data.push_back(i);
for (; i < (1U << 15); i += 5) data.push_back(i);
for (; i < (1U << 31); i += 164817) data.push_back(i);
for (; i < (1ULL << 63); i += 16481797854795663UL) data.push_back(i);
data.push_back(std::numeric_limits<uint64>::max());
{
std::unique_ptr<WritableFile> file;
TF_CHECK_OK(env->NewWritableFile(fname, &file));
string varint;
for (uint64 number : data) {
varint.clear();
core::PutVarint64(&varint, number);
TF_CHECK_OK(file->Append(absl::string_view(varint)));
}
}
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
io::InputBuffer in(file.get(), buf_size);
uint64 result = 0;
for (uint64 expected : data) {
TF_ASSERT_OK(in.ReadVarint64(&result));
EXPECT_EQ(expected, result);
}
EXPECT_TRUE(errors::IsOutOfRange(in.ReadVarint64(&result)));
}
}
TEST(InputBuffer, Hint) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string read;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
TF_CHECK_OK(in.Hint(4));
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "345");
TF_CHECK_OK(in.Hint(1));
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "678");
TF_CHECK_OK(in.Seek(0));
TF_CHECK_OK(in.Hint(7));
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
TF_CHECK_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
TF_CHECK_OK(in.Hint(2));
TF_CHECK_OK(in.Seek(4));
TF_CHECK_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "4567");
TF_CHECK_OK(in.Seek(0));
TF_CHECK_OK(in.Hint(1 << 25));
TF_CHECK_OK(in.Seek(1 << 25));
EXPECT_TRUE(errors::IsOutOfRange(in.Hint(1)));
EXPECT_TRUE(errors::IsInvalidArgument(in.Hint(-1)));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/inputbuffer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/inputbuffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6e6903fa-08d5-4c96-8b91-e495d0ae1ec7 | cpp | tensorflow/tensorflow | op_stats_to_pod_viewer | tensorflow/core/profiler/convert/op_stats_to_pod_viewer.cc | tensorflow/core/profiler/convert/op_stats_to_pod_viewer_test.cc | #include "tensorflow/core/profiler/convert/op_stats_to_pod_viewer.h"
#include <utility>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/profiler/convert/op_stats_to_pod_stats.h"
#include "tensorflow/core/profiler/protobuf/pod_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/utils/diagnostics.h"
namespace tensorflow {
namespace profiler {
namespace {
PodStatsSequence ConvertOpStatsToPodStatsSequence(const OpStats& op_stats,
PodStatsDatabase pod_stats) {
PodStatsSequence result_db;
int i = 0;
for (const auto& step_sequence : op_stats.step_db().step_sequence()) {
PodStatsMap* pod_stats_map = result_db.add_pod_stats_map();
pod_stats_map->set_step_num(step_sequence.step_num());
for (const auto& entry : step_sequence.step_info_per_core()) {
PodStatsRecord& record =
(*pod_stats_map->mutable_pod_stats_per_core())[entry.first];
DCHECK_LE(i, pod_stats.pod_stats_record_size());
record = std::move(*pod_stats.mutable_pod_stats_record(i++));
}
}
return result_db;
}
}
PodViewerDatabase ConvertOpStatsToPodViewer(const OpStats& op_stats) {
PodViewerDatabase database;
database.set_device_type(op_stats.run_environment().device_type());
PodStatsDatabase pod_stats = ConvertOpStatsToPodStats(op_stats);
database.mutable_step_breakdown_events()->Swap(
pod_stats.mutable_step_breakdown_events());
*database.mutable_pod_stats_sequence() =
ConvertOpStatsToPodStatsSequence(op_stats, std::move(pod_stats));
PopulateStepDiagnostics(op_stats, database.mutable_diagnostics());
return database;
}
}
} | #include "tensorflow/core/profiler/convert/op_stats_to_pod_viewer.h"
#include "google/protobuf/any.pb.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/diagnostics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/pod_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/utils/diagnostics.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
const double kMaxError = 1e-6;
constexpr int kStepNum = 2;
constexpr int kCoreId = 1001;
constexpr int kStepTimePs = 1000;
constexpr int kHostComputePs = 50;
constexpr int kHostCompilePs = 50;
constexpr int kHostToHostPs = 50;
constexpr int kHostToDevicePs = 50;
constexpr int kHostPreparePs = 50;
constexpr int kDeviceCollectivePs = 350;
constexpr int kHostWaitInputPs = 50;
constexpr int kDeviceToDevicePs = 50;
constexpr int kDeviceToHostPs = 50;
constexpr int kDeviceCompute32Ps = 50;
constexpr int kDeviceCompute16Ps = 50;
constexpr int kDeviceWaitDevicePs = 50;
constexpr int kDeviceWaitHostPs = 50;
constexpr int kUnknownTimePs = 50;
static constexpr char kHostname[] = "host:123";
void CreateOpStats(OpStats* op_stats) {
PerCoreStepInfo* info = op_stats->mutable_step_db()->add_step_sequence();
info->set_step_num(kStepNum);
StepInfoResult& step_info = (*info->mutable_step_info_per_core())[kCoreId];
step_info.set_step_num(kStepNum);
step_info.set_duration_ps(kStepTimePs);
GenericStepBreakdown breakdown;
auto& type_ps = *breakdown.mutable_type_ps();
type_ps[HOST_COMPUTE] = kHostComputePs;
type_ps[HOST_COMPILE] = kHostCompilePs;
type_ps[HOST_TO_HOST] = kHostToHostPs;
type_ps[HOST_TO_DEVICE] = kHostToDevicePs;
type_ps[HOST_PREPARE] = kHostPreparePs;
type_ps[DEVICE_COLLECTIVES] = kDeviceCollectivePs;
type_ps[HOST_WAIT_INPUT] = kHostWaitInputPs;
type_ps[DEVICE_TO_DEVICE] = kDeviceToDevicePs;
type_ps[DEVICE_TO_HOST] = kDeviceToHostPs;
type_ps[DEVICE_COMPUTE_32] = kDeviceCompute32Ps;
type_ps[DEVICE_COMPUTE_16] = kDeviceCompute16Ps;
type_ps[DEVICE_WAIT_DEVICE] = kDeviceWaitDevicePs;
type_ps[DEVICE_WAIT_HOST] = kDeviceWaitHostPs;
type_ps[UNKNOWN_TIME] = kUnknownTimePs;
step_info.mutable_step_breakdown()->PackFrom(breakdown);
CoreDetails& details = (*op_stats->mutable_core_id_to_details())[kCoreId];
details.set_hostname(kHostname);
}
TEST(OpStatsToPodViewer, GpuPodViewer) {
OpStats op_stats;
CreateOpStats(&op_stats);
PodViewerDatabase pod_viewer_db = ConvertOpStatsToPodViewer(op_stats);
EXPECT_EQ(1, pod_viewer_db.pod_stats_sequence().pod_stats_map_size());
const PodStatsMap& pod_stats_map =
pod_viewer_db.pod_stats_sequence().pod_stats_map(0);
EXPECT_EQ(kStepNum, pod_stats_map.step_num());
const PodStatsRecord& record = pod_stats_map.pod_stats_per_core().at(kCoreId);
EXPECT_EQ(kStepNum, record.step_num());
EXPECT_EQ(kHostname, record.host_name());
EXPECT_NEAR(tsl::profiler::PicoToMicro(kStepTimePs),
record.total_duration_us(), kMaxError);
const auto& breakdown = record.step_breakdown_us();
EXPECT_NEAR(
tsl::profiler::PicoToMicro(kDeviceCompute32Ps + kDeviceCompute16Ps),
breakdown.at(kDeviceCompute), kMaxError);
EXPECT_NEAR(
tsl::profiler::PicoToMicro(kDeviceToDevicePs + kDeviceWaitDevicePs),
breakdown.at(kDeviceToDevice), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kDeviceCollectivePs),
breakdown.at(kDeviceCollectives), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostComputePs),
breakdown.at(kHostCompute), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostPreparePs),
breakdown.at(kHostPrepare), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostWaitInputPs + kHostToDevicePs +
kDeviceWaitHostPs),
breakdown.at(kInput), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kDeviceToHostPs),
breakdown.at(kOutput), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostCompilePs),
breakdown.at(kCompile), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kUnknownTimePs),
breakdown.at(kAllOthers), kMaxError);
EXPECT_EQ(GetGenericEventTypeStr(kDeviceCollectives), record.bottleneck());
}
TEST(OpStatsToPodViewer, Diagnostics) {
OpStats op_stats;
op_stats.mutable_step_db()->set_use_incomplete_step(true);
PodViewerDatabase pod_viewer_db = ConvertOpStatsToPodViewer(op_stats);
EXPECT_EQ(1, pod_viewer_db.diagnostics().warnings_size());
EXPECT_EQ(kErrorIncompleteStep, pod_viewer_db.diagnostics().warnings(0));
}
TEST(OpStatsToPodViewer, DeviceType) {
OpStats op_stats;
op_stats.mutable_run_environment()->set_device_type("GPU");
PodViewerDatabase pod_viewer_db = ConvertOpStatsToPodViewer(op_stats);
EXPECT_EQ("GPU", pod_viewer_db.device_type());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/op_stats_to_pod_viewer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/op_stats_to_pod_viewer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d68cdea6-84b3-4713-87b5-65ec06ff8085 | cpp | tensorflow/tensorflow | nccl_clique_key | third_party/xla/xla/service/gpu/runtime/nccl_clique_key.cc | third_party/xla/xla/service/gpu/runtime/nccl_clique_key_test.cc | #include "xla/service/gpu/runtime/nccl_clique_key.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/service/global_device_id.h"
#include "tsl/platform/logging.h"
namespace xla::gpu {
NcclCliqueKey::NcclCliqueKey(
std::vector<GlobalDeviceId> devices, NcclStreamId stream_id,
AsyncStreamKind stream_kind,
std::vector<std::vector<GlobalDeviceId>> participant_groups)
: devices_(std::move(devices)),
stream_id_(stream_id),
stream_kind_(stream_kind),
participant_groups_(std::move(participant_groups)) {
for (std::vector<GlobalDeviceId>& group : participant_groups_) {
absl::c_sort(group);
}
auto compare_groups = [](const std::vector<GlobalDeviceId>& lhs,
const std::vector<GlobalDeviceId>& rhs) {
CHECK(!lhs.empty());
CHECK(!rhs.empty());
return lhs[0] < rhs[0];
};
absl::c_sort(participant_groups_, compare_groups);
}
absl::Span<const GlobalDeviceId> NcclCliqueKey::devices() const {
return devices_;
}
NcclStreamId NcclCliqueKey::stream_id() const { return stream_id_; }
std::optional<int64_t> NcclCliqueKey::rank(GlobalDeviceId id) const {
if (auto it = absl::c_find(devices_, id); it != devices_.end()) {
return it - devices_.begin();
}
return std::nullopt;
}
bool NcclCliqueKey::IsSubsetOf(const NcclCliqueKey& other) const {
return stream_id_ == other.stream_id_ &&
absl::c_all_of(devices_, [&](GlobalDeviceId id) {
return absl::c_linear_search(other.devices_, id);
});
}
std::string NcclCliqueKey::ToString() const {
std::string group_string = "";
if (!participant_groups_.empty()) {
std::vector<std::string> values;
values.reserve(participant_groups_.size());
for (const auto& group : participant_groups_) {
values.push_back("[" + GlobalDeviceIdsToString(group) + "]");
}
group_string = absl::StrFormat("; groups=[%s]", absl::StrJoin(values, ","));
}
return absl::StrFormat("devices=[%s]; stream=%d%s",
GlobalDeviceIdsToString(devices_), stream_id_.value(),
group_string);
}
bool operator==(const NcclCliqueKey& a, const NcclCliqueKey& b) {
return a.devices_ == b.devices_ && a.stream_id_ == b.stream_id_ &&
a.participant_groups_ == b.participant_groups_;
}
bool operator<(const NcclCliqueKey& a, const NcclCliqueKey& b) {
if (a.devices_.size() < b.devices_.size()) return true;
if (b.devices_.size() < a.devices_.size()) return false;
if (a.devices_ < b.devices_) return true;
if (b.devices_ < a.devices_) return false;
return a.stream_id_.value() < b.stream_id_.value();
}
bool operator>(const NcclCliqueKey& a, const NcclCliqueKey& b) {
if (a.devices_.size() > b.devices_.size()) return true;
if (b.devices_.size() > a.devices_.size()) return false;
if (a.devices_ > b.devices_) return true;
if (b.devices_ > a.devices_) return false;
return a.stream_id_.value() < b.stream_id_.value();
}
NcclCliqueId::NcclCliqueId() { std::fill(data_.begin(), data_.end(), 0); }
NcclCliqueId::NcclCliqueId(char bytes[kSize]) {
std::copy(bytes, bytes + kSize, data_.data());
}
absl::StatusOr<NcclCliqueId> NcclCliqueId::FromString(std::string_view str) {
if (str.size() != kSize) {
return absl::InvalidArgumentError(
absl::StrFormat("Invalid NCCL clique id size: %d , expected %d bytes",
str.size(), kSize));
}
char bytes[kSize];
std::copy(str.data(), str.data() + kSize, bytes);
return NcclCliqueId(bytes);
}
absl::Span<const char> NcclCliqueId::data() const { return data_; }
std::string NcclCliqueId::ToString() const {
return std::string(data_.data(), data_.size());
}
} | #include "xla/service/gpu/runtime/nccl_clique_key.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <cstring>
#include <functional>
#include <optional>
#include <vector>
#include "absl/container/btree_map.h"
#include "absl/status/status.h"
#include "xla/service/global_device_id.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace xla::gpu {
using ::tsl::testing::StatusIs;
static NcclCliqueKey GetBaseCliqueKey() {
return NcclCliqueKey({GlobalDeviceId(0), GlobalDeviceId(1)}, NcclStreamId(0),
AsyncStreamKind::kCollective,
std::vector<std::vector<GlobalDeviceId>>{
{GlobalDeviceId(0), GlobalDeviceId(1)},
{GlobalDeviceId(2), GlobalDeviceId(3)}});
}
TEST(NcclCliqueKeyTest, IsSubsetOf) {
GlobalDeviceId id0 = GlobalDeviceId(0);
GlobalDeviceId id1 = GlobalDeviceId(1);
GlobalDeviceId id2 = GlobalDeviceId(2);
GlobalDeviceId id3 = GlobalDeviceId(3);
NcclCliqueKey key0({id0, id1}, NcclStreamId(0));
NcclCliqueKey key1({id0, id1, id2, id3}, NcclStreamId(0));
NcclCliqueKey key2({id0, id1, id2, id3}, NcclStreamId(1));
NcclCliqueKey key3({id1, id2, id3}, NcclStreamId(0));
EXPECT_TRUE(key0.IsSubsetOf(key1));
EXPECT_FALSE(key0.IsSubsetOf(key2));
EXPECT_FALSE(key0.IsSubsetOf(key3));
}
TEST(NcclCliqueKeyTest, Compare) {
GlobalDeviceId id0 = GlobalDeviceId(0);
GlobalDeviceId id1 = GlobalDeviceId(1);
GlobalDeviceId id2 = GlobalDeviceId(2);
GlobalDeviceId id3 = GlobalDeviceId(3);
NcclCliqueKey key0({id0, id1}, NcclStreamId(0));
NcclCliqueKey key1({id1, id2, id3}, NcclStreamId(0));
NcclCliqueKey key2({id1, id2, id3}, NcclStreamId(1));
EXPECT_LT(key0, key1);
EXPECT_GT(key1, key0);
EXPECT_LT(key1, key2);
}
TEST(NcclCliqueKeyTest, CompareWithParticipantGroups) {
GlobalDeviceId id0 = GlobalDeviceId(0);
GlobalDeviceId id1 = GlobalDeviceId(1);
GlobalDeviceId id2 = GlobalDeviceId(2);
GlobalDeviceId id3 = GlobalDeviceId(3);
NcclCliqueKey key0({id0, id1}, NcclStreamId(0), AsyncStreamKind::kCollective,
std::vector<std::vector<GlobalDeviceId>>{{id0, id1}});
NcclCliqueKey key1(
{id0, id1}, NcclStreamId(0), AsyncStreamKind::kCollective,
std::vector<std::vector<GlobalDeviceId>>{{id0, id1}, {id2, id3}});
EXPECT_FALSE(key0 == key1);
NcclCliqueKey key0_nogroups({id0, id1}, NcclStreamId(0));
NcclCliqueKey key1_nogroups({id0, id1}, NcclStreamId(0));
EXPECT_EQ(key0_nogroups, key1_nogroups);
}
TEST(NcclCliqueKeyTest, CompareWithPermutedParticipantGroups) {
GlobalDeviceId id0 = GlobalDeviceId(0);
GlobalDeviceId id1 = GlobalDeviceId(1);
GlobalDeviceId id2 = GlobalDeviceId(2);
GlobalDeviceId id3 = GlobalDeviceId(3);
NcclCliqueKey key0(
{id0, id1}, NcclStreamId(0), AsyncStreamKind::kCollective,
std::vector<std::vector<GlobalDeviceId>>{{id3, id2}, {id0, id1}});
NcclCliqueKey key1(
{id0, id1}, NcclStreamId(0), AsyncStreamKind::kCollective,
std::vector<std::vector<GlobalDeviceId>>{{id0, id1}, {id2, id3}});
EXPECT_EQ(key0, key1);
NcclCliqueKey key_other(
{id0, id1}, NcclStreamId(0), AsyncStreamKind::kCollective,
std::vector<std::vector<GlobalDeviceId>>{{id0, id2}, {id1, id3}});
EXPECT_FALSE(key0 == key_other);
}
TEST(NcclCliqueKeyTest, BtreeIterationOrder) {
GlobalDeviceId id0 = GlobalDeviceId(0);
GlobalDeviceId id1 = GlobalDeviceId(1);
GlobalDeviceId id2 = GlobalDeviceId(2);
GlobalDeviceId id3 = GlobalDeviceId(3);
NcclCliqueKey key0({id0, id2}, NcclStreamId(0));
NcclCliqueKey key1({id0, id1, id2, id3}, NcclStreamId(0));
absl::btree_map<NcclCliqueKey, int64_t, std::greater<NcclCliqueKey>> map;
map[key0] = 0;
map[key1] = 1;
EXPECT_EQ(map.begin()->first, key1);
}
TEST(NcclCliqueKeyGettersTest, Devices) {
EXPECT_THAT(
GetBaseCliqueKey().devices(),
::testing::UnorderedElementsAre(GlobalDeviceId(0), GlobalDeviceId(1)));
}
TEST(NcclCliqueKeyGettersTest, Rank) {
auto key = GetBaseCliqueKey();
EXPECT_EQ(key.rank(GlobalDeviceId(0)), 0);
EXPECT_EQ(key.rank(GlobalDeviceId(1)), 1);
EXPECT_EQ(key.rank(GlobalDeviceId(2)), std::nullopt);
EXPECT_EQ(key.rank(GlobalDeviceId(3)), std::nullopt);
}
TEST(NcclCliqueKeyGettersTest, StreamId) {
EXPECT_EQ(GetBaseCliqueKey().stream_id(), NcclStreamId(0));
}
TEST(NcclCliqueKeyGetterTest, ToString) {
EXPECT_EQ(GetBaseCliqueKey().ToString(),
"devices=[0,1]; stream=0; groups=[[0,1],[2,3]]");
}
TEST(NcclCliqueIdGettersTest, Data) {
std::array<char, 128> id;
std::fill(id.begin(), id.end(), 0x01);
NcclCliqueId clique_id(id.data());
EXPECT_EQ(std::memcmp(clique_id.data().data(), id.data(), 128), 0);
}
TEST(NcclCliqueIdStringTest, ToString) {
std::array<char, 128> id;
std::fill(id.begin(), id.end(), 0x01);
NcclCliqueId clique_id(id.data());
for (int i = 0; i < 128; ++i) {
EXPECT_THAT(clique_id.ToString().substr(i, 1), "\x1");
}
}
TEST(NcclCliqueIdStringTest, FromInvalidString) {
EXPECT_THAT(NcclCliqueId::FromString("123"),
StatusIs(absl::StatusCode::kInvalidArgument));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/nccl_clique_key.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/nccl_clique_key_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0ecbd68f-03dc-483c-bf7d-7c2e09656484 | cpp | tensorflow/tensorflow | eager_op_rewrite_registry | tensorflow/core/common_runtime/eager/eager_op_rewrite_registry.cc | tensorflow/core/common_runtime/eager/eager_op_rewrite_registry_test.cc | #include "tensorflow/core/common_runtime/eager/eager_op_rewrite_registry.h"
#include <memory>
#include <utility>
namespace tensorflow {
EagerOpRewriteRegistry* EagerOpRewriteRegistry::Global() {
static EagerOpRewriteRegistry* global_rewrite_registry =
new EagerOpRewriteRegistry;
return global_rewrite_registry;
}
void EagerOpRewriteRegistry::Register(Phase phase, int32_t ordinal,
std::unique_ptr<EagerOpRewrite> pass) {
auto it_rewrites = rewrites_[phase].cbegin();
for (; it_rewrites != rewrites_[phase].cend(); ++it_rewrites) {
if (it_rewrites->second == ordinal) {
TF_CHECK_OK(errors::AlreadyExists(
"Attempting to register Eager Rewriter ", pass->GetDebugInfo().name,
" for phase ", phase, " using ordinal ", ordinal,
" already occupied by Rewriter ",
it_rewrites->first->GetDebugInfo().name));
}
if (it_rewrites->second > ordinal) {
break;
}
}
rewrites_[phase].emplace(it_rewrites,
std::make_pair(std::move(pass), ordinal));
}
Status EagerOpRewriteRegistry::RunRewrite(
Phase phase, EagerOperation* orig_op,
std::unique_ptr<EagerOperation>* out_op) {
EagerOperation* pre_op = orig_op;
for (auto it_rewrites = rewrites_[phase].cbegin();
it_rewrites != rewrites_[phase].cend(); ++it_rewrites) {
TF_RETURN_IF_ERROR(it_rewrites->first->Run(pre_op, out_op));
if (*out_op != nullptr) {
pre_op = out_op->get();
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/eager/eager_op_rewrite_registry.h"
#include <memory>
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class TestEagerOpRewrite : public EagerOpRewrite {
public:
TestEagerOpRewrite(string name, string file, string line)
: EagerOpRewrite(name, file, line),
executor_(false, true) {}
static int count_;
EagerExecutor executor_;
Status Run(EagerOperation* orig_op,
std::unique_ptr<tensorflow::EagerOperation>* out_op) override {
++count_;
tensorflow::EagerOperation* op =
new tensorflow::EagerOperation(&orig_op->EagerContext());
TF_RETURN_IF_ERROR(op->Reset("NoOp", nullptr, false, &executor_));
out_op->reset(op);
return absl::OkStatus();
}
};
int TestEagerOpRewrite::count_ = 0;
REGISTER_REWRITE(EagerOpRewriteRegistry::PRE_EXECUTION, 10000,
TestEagerOpRewrite);
REGISTER_REWRITE(EagerOpRewriteRegistry::PRE_EXECUTION, 10001,
TestEagerOpRewrite);
TEST(EagerOpRewriteRegistryTest, RegisterRewritePass) {
EXPECT_EQ(0, TestEagerOpRewrite::count_);
StaticDeviceMgr device_mgr(DeviceFactory::NewDevice(
"CPU", {}, "/job:localhost/replica:0/task:0/device:CPU:0"));
tensorflow::EagerContext* ctx = new tensorflow::EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false,
&device_mgr, false, nullptr, nullptr, nullptr,
true);
EagerOperation orig_op(ctx);
std::unique_ptr<tensorflow::EagerOperation> out_op;
EXPECT_EQ(absl::OkStatus(),
EagerOpRewriteRegistry::Global()->RunRewrite(
EagerOpRewriteRegistry::PRE_EXECUTION, &orig_op, &out_op));
EXPECT_EQ(2, TestEagerOpRewrite::count_);
EXPECT_EQ("NoOp", out_op->Name());
ctx->Unref();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/eager_op_rewrite_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/eager_op_rewrite_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8c9e068c-a6d4-43d8-b79a-da70f3a1b567 | cpp | tensorflow/tensorflow | cosine | tensorflow/lite/experimental/shlo/ops/cosine.cc | tensorflow/lite/experimental/shlo/ops/cosine_test.cc | #include "tensorflow/lite/experimental/shlo/ops/cosine.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Cosine {
template <class T>
T operator()(T v) const {
return std::cos(v);
}
};
template <>
F16 Cosine::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Cosine::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
CosineOp Create(CosineOp::Attributes) { return {}; }
absl::Status Prepare(CosineOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("cosine"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("cosine"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(CosineOp& op, const Tensor& input, Tensor& output) {
Cosine cosine;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), cosine,
input, output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
cosine, input, output);
}
return absl::FailedPreconditionError("Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/cosine.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<CosineOp> {
static std::string Get() { return "Cosine"; }
};
namespace {
struct Cosine {
template <class T>
T operator()(T v) const {
return std::cos(v);
}
} cosine_ref;
template <>
F16 Cosine::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Cosine::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(Cosine, UnaryElementwiseOpShapePropagationTest,
CosineOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Cosine, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<CosineOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<CosineOp, ConcatTypes<BoolTestType, IntTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Cosine, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct CosineTest : ::testing::Test {};
TYPED_TEST_SUITE(CosineTest, FloatTestTypes, TestParamNames);
TYPED_TEST(CosineTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), cosine_ref);
auto op = Create(CosineOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedCosineTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedCosineTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedCosineTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = cosine_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(CosineOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/cosine.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/cosine_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
79cf9640-53d5-4de9-930d-2b402a98d224 | cpp | tensorflow/tensorflow | rpc_collective_executor_mgr | tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.cc | tensorflow/core/distributed_runtime/rpc_collective_executor_mgr_test.cc | #include "tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/distributed_runtime/collective_param_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/collective_rma_distributed.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
RpcCollectiveExecutorMgr::RpcCollectiveExecutorMgr(
const ConfigProto& config, const DeviceMgr* dev_mgr,
std::unique_ptr<DeviceResolverDistributed> dev_resolver,
std::unique_ptr<CollectiveParamResolverDistributed> param_resolver,
std::unique_ptr<NcclCommunicatorInterface> nccl_communicator,
WorkerCacheInterface* worker_cache, const string& task_name)
: CollectiveExecutorMgr(config, dev_mgr, std::move(dev_resolver),
std::move(param_resolver),
std::move(nccl_communicator)),
worker_cache_(worker_cache),
task_name_(task_name) {
group_leader_ = (task_name == config.experimental().collective_group_leader())
? ""
: config.experimental().collective_group_leader();
}
RpcCollectiveExecutorMgr::~RpcCollectiveExecutorMgr() {
for (auto it : sequence_table_) {
delete it.second;
}
}
CollectiveExecutor* RpcCollectiveExecutorMgr::Create(int64_t step_id) {
CollectiveRemoteAccessDistributed* rma =
new CollectiveRemoteAccessDistributed(dev_mgr_, dev_resolver_.get(),
work_queue_, worker_cache_, step_id,
task_name_);
return new BaseCollectiveExecutor(this, rma, step_id, dev_mgr_, work_queue_);
}
namespace {
static const int64_t kStepIdMask = (((1uLL << 56) - 1) | (1uLL << 56));
int64_t NewRandomStepId() {
int64_t step_id = random::New64();
step_id &= kStepIdMask;
return step_id;
}
}
void RpcCollectiveExecutorMgr::RefreshStepIdSequenceAsync(
int64_t graph_key, const StatusCallback& done) {
if (group_leader_.empty()) {
mutex_lock l(sequence_mu_);
GraphKeySequence* gks = nullptr;
auto it = sequence_table_.find(graph_key);
if (it == sequence_table_.end()) {
gks = new GraphKeySequence(graph_key);
sequence_table_[graph_key] = gks;
} else {
gks = it->second;
}
gks->next_step_id_ = NewRandomStepId();
done(absl::OkStatus());
} else {
WorkerInterface* wi = worker_cache_->GetOrCreateWorker(group_leader_);
GetStepSequenceRequest* req = new GetStepSequenceRequest;
GetStepSequenceResponse* resp = new GetStepSequenceResponse;
req->add_graph_key(graph_key);
wi->GetStepSequenceAsync(
req, resp, [this, req, resp, done](const Status& s) {
if (!s.ok()) {
LOG(ERROR) << "Bad response [" << s
<< "] from GetStepSequenceAsync call to "
<< group_leader_;
done(s);
} else {
done(UpdateStepSequences(*resp));
}
delete req;
delete resp;
});
}
}
void RpcCollectiveExecutorMgr::GetStepSequenceAsync(
const GetStepSequenceRequest* request, GetStepSequenceResponse* response,
const StatusCallback& done) {
if (!group_leader_.empty()) {
LOG(ERROR) << "GetStepSequence called at non-group-leader";
done(errors::Internal("GetStepSequenceAsync called at non-group-leader"));
} else {
mutex_lock l(sequence_mu_);
for (int64_t graph_key : request->graph_key()) {
auto it = sequence_table_.find(graph_key);
GraphKeySequence* gks = nullptr;
if (it == sequence_table_.end()) {
gks = new GraphKeySequence(graph_key);
gks->next_step_id_ = NewRandomStepId();
sequence_table_[graph_key] = gks;
} else {
gks = it->second;
}
StepSequence* ss = response->add_step_sequence();
ss->set_graph_key(graph_key);
ss->set_next_step_id(gks->next_step_id_);
}
done(absl::OkStatus());
}
}
Status RpcCollectiveExecutorMgr::UpdateStepSequences(
const GetStepSequenceResponse& resp) {
mutex_lock l(sequence_mu_);
for (const StepSequence& ss : resp.step_sequence()) {
GraphKeySequence* gks = nullptr;
auto it = sequence_table_.find(ss.graph_key());
if (it == sequence_table_.end()) {
gks = new GraphKeySequence(ss.graph_key());
sequence_table_[ss.graph_key()] = gks;
} else {
gks = it->second;
}
gks->next_step_id_ = ss.next_step_id();
}
return absl::OkStatus();
}
int64_t RpcCollectiveExecutorMgr::NextStepId(int64_t graph_key) {
mutex_lock l(sequence_mu_);
auto it = sequence_table_.find(graph_key);
if (it != sequence_table_.end()) {
return it->second->next_step_id_;
}
return CollectiveExecutor::kInvalidId;
}
void RpcCollectiveExecutorMgr::RetireStepId(int64_t graph_key,
int64_t step_id) {
mutex_lock l(sequence_mu_);
auto it = sequence_table_.find(graph_key);
if (it != sequence_table_.end()) {
if (step_id == it->second->next_step_id_) {
it->second->next_step_id_ = (it->second->next_step_id_ + 1) & kStepIdMask;
} else {
it->second->next_step_id_ = CollectiveExecutor::kInvalidId;
}
} else {
LOG(ERROR) << "Failed to find graph_key " << graph_key << " to retire.";
}
}
std::unique_ptr<RpcCollectiveExecutorMgr> CreateProdRpcCollectiveExecutorMgr(
const ConfigProto& config, const DeviceMgr* device_mgr,
std::unique_ptr<NcclCommunicatorInterface> nccl_communicator,
WorkerCacheInterface* worker_cache, const string& default_worker_name) {
auto dev_resolver = std::make_unique<DeviceResolverDistributed>(device_mgr);
auto param_resolver = std::make_unique<CollectiveParamResolverDistributed>(
config, device_mgr, dev_resolver.get(), nccl_communicator.get(),
worker_cache, default_worker_name);
return std::make_unique<RpcCollectiveExecutorMgr>(
config, device_mgr, std::move(dev_resolver), std::move(param_resolver),
std::move(nccl_communicator), worker_cache, default_worker_name);
}
} | #include "tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.h"
#include <stdlib.h>
#include <string>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/distributed_runtime/collective_param_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/nccl/collective_communicator.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/worker.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
#define NUM_DEVS 3
class RpcCollectiveExecutorMgrTest : public ::testing::Test {
protected:
RpcCollectiveExecutorMgrTest() {
string task_name = "/job:localhost/replica:0/task:0";
SessionOptions options;
options.config.mutable_experimental()->set_collective_group_leader(
task_name);
WorkerCacheInterface* worker_cache = nullptr;
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", NUM_DEVS});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, task_name, &devices));
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
std::unique_ptr<DeviceResolverDistributed> dr(
new DeviceResolverDistributed(device_mgr_.get()));
std::unique_ptr<CollectiveParamResolverDistributed> cpr(
new CollectiveParamResolverDistributed(
options.config, device_mgr_.get(), dr.get(),
nullptr, worker_cache, task_name));
cme_.reset(new RpcCollectiveExecutorMgr(
options.config, device_mgr_.get(), std::move(dr), std::move(cpr),
MaybeCreateNcclCommunicator(options.config), worker_cache, task_name));
}
std::unique_ptr<RpcCollectiveExecutorMgr> cme_;
std::unique_ptr<DeviceMgr> device_mgr_;
};
TEST_F(RpcCollectiveExecutorMgrTest, FindOrCreate) {
CollectiveExecutor::Handle* h =
new CollectiveExecutor::Handle(cme_->FindOrCreate(1), true);
EXPECT_TRUE(h->get());
CollectiveExecutor::Handle* h2 =
new CollectiveExecutor::Handle(cme_->FindOrCreate(1), true);
EXPECT_EQ(h->get(), h2->get());
CollectiveExecutor* ce = h->get();
delete h;
delete h2;
CollectiveExecutor* ce2 = cme_->FindOrCreate(1);
EXPECT_EQ(ce, ce2);
ce2->Unref();
cme_->Cleanup(1);
}
TEST_F(RpcCollectiveExecutorMgrTest, NextStepId) {
int64_t x = cme_->NextStepId(7);
EXPECT_EQ(x, CollectiveExecutor::kInvalidId);
{
Notification note;
Status status;
cme_->RefreshStepIdSequenceAsync(7,
[this, &status, ¬e](const Status& s) {
status = s;
note.Notify();
});
EXPECT_TRUE(status.ok());
}
x = cme_->NextStepId(7);
EXPECT_NE(x, CollectiveExecutor::kInvalidId);
EXPECT_EQ(x, cme_->NextStepId(7));
EXPECT_EQ(x, cme_->NextStepId(7));
cme_->RetireStepId(6, x);
EXPECT_EQ(x, cme_->NextStepId(7));
cme_->RetireStepId(7, x);
int64_t y = cme_->NextStepId(7);
EXPECT_EQ((x + 1) & (((1uLL << 56) - 1) | (1uLL << 56)), y);
{
Notification note;
Status status;
cme_->RefreshStepIdSequenceAsync(7,
[this, &status, ¬e](const Status& s) {
status = s;
note.Notify();
});
note.WaitForNotification();
EXPECT_TRUE(status.ok());
}
int64_t z = cme_->NextStepId(7);
EXPECT_NE(y, z);
EXPECT_GT(llabs(y - z), 3);
}
TEST_F(RpcCollectiveExecutorMgrTest, GetStepSequence) {
int64_t x = cme_->NextStepId(3);
EXPECT_EQ(x, CollectiveExecutor::kInvalidId);
int64_t y = cme_->NextStepId(4);
EXPECT_EQ(y, CollectiveExecutor::kInvalidId);
GetStepSequenceRequest request;
GetStepSequenceResponse response;
request.add_graph_key(3);
request.add_graph_key(4);
{
Notification note;
Status status;
cme_->GetStepSequenceAsync(&request, &response,
[this, &status, ¬e](const Status& s) {
status = s;
note.Notify();
});
note.WaitForNotification();
EXPECT_TRUE(status.ok());
}
ASSERT_EQ(2, response.step_sequence_size());
std::unordered_map<int64_t, int64_t> values;
for (const auto& ss : response.step_sequence()) {
values[ss.graph_key()] = ss.next_step_id();
}
EXPECT_NE(values[3], CollectiveExecutor::kInvalidId);
EXPECT_NE(values[4], CollectiveExecutor::kInvalidId);
response.Clear();
{
Notification note;
Status status;
cme_->GetStepSequenceAsync(&request, &response,
[this, &status, ¬e](const Status& s) {
status = s;
note.Notify();
});
note.WaitForNotification();
EXPECT_TRUE(status.ok());
}
ASSERT_EQ(2, response.step_sequence_size());
for (const auto& ss : response.step_sequence()) {
EXPECT_EQ(values[ss.graph_key()], ss.next_step_id());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc_collective_executor_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0f006b4a-ddcc-4597-8679-08dd5fc18a8a | cpp | google/cel-cpp | cel_number | eval/public/cel_number.cc | eval/public/cel_number_test.cc | #include "eval/public/cel_number.h"
#include "eval/public/cel_value.h"
namespace google::api::expr::runtime {
absl::optional<CelNumber> GetNumberFromCelValue(const CelValue& value) {
if (int64_t val; value.GetValue(&val)) {
return CelNumber(val);
} else if (uint64_t val; value.GetValue(&val)) {
return CelNumber(val);
} else if (double val; value.GetValue(&val)) {
return CelNumber(val);
}
return absl::nullopt;
}
} | #include "eval/public/cel_number.h"
#include <cstdint>
#include <limits>
#include "absl/types/optional.h"
#include "eval/public/cel_value.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
namespace {
using ::testing::Optional;
TEST(CelNumber, GetNumberFromCelValue) {
EXPECT_THAT(GetNumberFromCelValue(CelValue::CreateDouble(1.1)),
Optional(CelNumber::FromDouble(1.1)));
EXPECT_THAT(GetNumberFromCelValue(CelValue::CreateInt64(1)),
Optional(CelNumber::FromDouble(1.0)));
EXPECT_THAT(GetNumberFromCelValue(CelValue::CreateUint64(1)),
Optional(CelNumber::FromDouble(1.0)));
EXPECT_EQ(GetNumberFromCelValue(CelValue::CreateDuration(absl::Seconds(1))),
absl::nullopt);
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/cel_number.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/cel_number_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
8fe449ec-1b51-4dea-84dc-1db9f8e42aac | cpp | tensorflow/tensorflow | memory_usage_monitor | tensorflow/lite/profiling/memory_usage_monitor.cc | tensorflow/lite/profiling/memory_usage_monitor_test.cc | #include "tensorflow/lite/profiling/memory_usage_monitor.h"
#include <memory>
#include <utility>
#include "absl/synchronization/notification.h"
#include "absl/time/time.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/profiling/memory_info.h"
namespace tflite {
namespace profiling {
namespace memory {
constexpr float MemoryUsageMonitor::kInvalidMemUsageMB;
MemoryUsageMonitor::MemoryUsageMonitor(int sampling_interval_ms,
std::unique_ptr<Sampler> sampler)
: sampler_(std::move(sampler)),
is_supported_(false),
sampling_interval_(absl::Milliseconds(sampling_interval_ms)) {
is_supported_ = (sampler_ != nullptr && sampler_->IsSupported());
if (!is_supported_) {
TFLITE_LOG(TFLITE_LOG_INFO,
"Getting memory usage isn't supported on this platform!\n");
return;
}
}
void MemoryUsageMonitor::Start() {
if (!is_supported_) return;
if (check_memory_thd_ != nullptr) {
TFLITE_LOG(TFLITE_LOG_INFO, "Memory monitoring has already started!\n");
return;
}
stop_signal_ = std::make_unique<absl::Notification>();
check_memory_thd_ = std::make_unique<std::thread>(([this]() {
while (true) {
const auto mem_info = sampler_->GetMemoryUsage();
if (mem_info.mem_footprint_kb > peak_mem_footprint_kb_) {
peak_mem_footprint_kb_ = mem_info.mem_footprint_kb;
}
if (stop_signal_->HasBeenNotified()) break;
sampler_->SleepFor(sampling_interval_);
}
}));
}
void MemoryUsageMonitor::Stop() {
if (!is_supported_) return;
if (check_memory_thd_ == nullptr) {
TFLITE_LOG(TFLITE_LOG_INFO,
"Memory monitoring hasn't started yet or has stopped!\n");
return;
}
StopInternal();
}
void MemoryUsageMonitor::StopInternal() {
if (check_memory_thd_ == nullptr) return;
stop_signal_->Notify();
if (check_memory_thd_ != nullptr) {
check_memory_thd_->join();
}
stop_signal_.reset(nullptr);
check_memory_thd_.reset(nullptr);
}
}
}
} | #include "tensorflow/lite/profiling/memory_usage_monitor.h"
#include <memory>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorflow/lite/profiling/memory_info.h"
namespace tflite {
namespace profiling {
namespace memory {
class MemoryUsageNotSupportedSampler : public MemoryUsageMonitor::Sampler {
public:
bool IsSupported() override { return false; }
};
TEST(MemoryUsageMonitor, NotSupported) {
MemoryUsageMonitor monitor1(50, std::unique_ptr<MemoryUsageMonitor::Sampler>(
new MemoryUsageNotSupportedSampler()));
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor1.GetPeakMemUsageInMB());
MemoryUsageMonitor monitor2(50, nullptr);
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor2.GetPeakMemUsageInMB());
}
class MemoryUsageMonitorTest : public ::testing::Test {
protected:
class FakeMemoryUsageSampler : public MemoryUsageMonitor::Sampler {
public:
explicit FakeMemoryUsageSampler(int64_t* num_sleeps)
: sleep_cnt_(num_sleeps) {}
bool IsSupported() override { return true; }
MemoryUsage GetMemoryUsage() override {
MemoryUsage result;
result.mem_footprint_kb = 5 * ((*sleep_cnt_) + 1) * 1024;
return result;
}
void SleepFor(const absl::Duration& duration) override {
(*sleep_cnt_)++;
absl::SleepFor(duration);
}
private:
int64_t* const sleep_cnt_ = nullptr;
};
void SetUp() override {
monitor_ = std::make_unique<MemoryUsageMonitor>(
50, std::unique_ptr<MemoryUsageMonitor::Sampler>(
new FakeMemoryUsageSampler(&num_sleeps_)));
}
int64_t num_sleeps_ = 0;
std::unique_ptr<MemoryUsageMonitor> monitor_ = nullptr;
};
TEST_F(MemoryUsageMonitorTest, StartAndStop) {
monitor_->Start();
monitor_->Stop();
EXPECT_FLOAT_EQ(5.0 * (num_sleeps_ + 1), monitor_->GetPeakMemUsageInMB());
}
TEST_F(MemoryUsageMonitorTest, NoStartAndStop) {
monitor_->Stop();
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor_->GetPeakMemUsageInMB());
}
TEST_F(MemoryUsageMonitorTest, StartAndNoStop) {
monitor_->Start();
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor_->GetPeakMemUsageInMB());
}
TEST_F(MemoryUsageMonitorTest, StopFirst) {
monitor_->Stop();
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor_->GetPeakMemUsageInMB());
monitor_->Start();
EXPECT_FLOAT_EQ(MemoryUsageMonitor::kInvalidMemUsageMB,
monitor_->GetPeakMemUsageInMB());
}
TEST_F(MemoryUsageMonitorTest, MultiStartAndStops) {
monitor_->Start();
monitor_->Start();
monitor_->Stop();
monitor_->Stop();
EXPECT_FLOAT_EQ(5.0 * (num_sleeps_ + 1), monitor_->GetPeakMemUsageInMB());
}
TEST_F(MemoryUsageMonitorTest, StartStopPairs) {
monitor_->Start();
monitor_->Stop();
EXPECT_FLOAT_EQ(5.0 * (num_sleeps_ + 1), monitor_->GetPeakMemUsageInMB());
monitor_->Start();
absl::SleepFor(absl::Milliseconds(100));
monitor_->Stop();
EXPECT_GE(num_sleeps_, 1);
EXPECT_FLOAT_EQ(5.0 * (num_sleeps_ + 1), monitor_->GetPeakMemUsageInMB());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/memory_usage_monitor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/memory_usage_monitor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7f15f528-9a85-447a-add7-29c1b1ef15f7 | cpp | tensorflow/tensorflow | net | third_party/xla/third_party/tsl/tsl/platform/windows/net.cc | third_party/xla/third_party/tsl/tsl/platform/net_test.cc | #include "tsl/platform/net.h"
#include <sys/types.h>
#include <winsock2.h>
#include <cstdlib>
#include <unordered_set>
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/windows/error_windows.h"
#undef ERROR
namespace tsl {
namespace internal {
namespace {
bool IsPortAvailable(int* port, bool is_tcp) {
const int protocol = is_tcp ? IPPROTO_TCP : 0;
SOCKET sock = socket(AF_INET, is_tcp ? SOCK_STREAM : SOCK_DGRAM, protocol);
struct sockaddr_in addr;
int addr_len = static_cast<int>(sizeof(addr));
int actual_port;
CHECK_GE(*port, 0);
CHECK_LE(*port, 65535);
if (sock == INVALID_SOCKET) {
LOG(ERROR) << "socket() failed: "
<< tsl::internal::WindowsWSAGetLastErrorMessage();
return false;
}
const int one = 1;
int result = setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
reinterpret_cast<const char*>(&one), sizeof(one));
if (result == SOCKET_ERROR) {
LOG(ERROR) << "setsockopt() failed: "
<< tsl::internal::WindowsWSAGetLastErrorMessage();
closesocket(sock);
return false;
}
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = INADDR_ANY;
addr.sin_port = htons((uint16_t)*port);
result = bind(sock, (struct sockaddr*)&addr, sizeof(addr));
if (result == SOCKET_ERROR) {
LOG(WARNING) << "bind(port=" << *port << ") failed: "
<< tsl::internal::WindowsWSAGetLastErrorMessage();
closesocket(sock);
return false;
}
result = getsockname(sock, (struct sockaddr*)&addr, &addr_len);
if (result == SOCKET_ERROR) {
LOG(WARNING) << "getsockname() failed: "
<< tsl::internal::WindowsWSAGetLastErrorMessage();
closesocket(sock);
return false;
}
CHECK_LE(addr_len, sizeof(addr));
actual_port = ntohs(addr.sin_port);
CHECK_GT(actual_port, 0);
if (*port == 0) {
*port = actual_port;
} else {
CHECK_EQ(*port, actual_port);
}
closesocket(sock);
return true;
}
const int kNumRandomPortsToPick = 100;
const int kMaximumTrials = 1000;
}
int PickUnusedPortOrDie() {
WSADATA wsaData;
if (WSAStartup(MAKEWORD(2, 2), &wsaData) != NO_ERROR) {
LOG(ERROR) << "Error at WSAStartup()";
return false;
}
static std::unordered_set<int> chosen_ports;
bool is_tcp = true;
int trial = 0;
while (true) {
int port;
trial++;
CHECK_LE(trial, kMaximumTrials)
<< "Failed to pick an unused port for testing.";
if (trial == 1) {
port = GetCurrentProcessId() % (65536 - 30000) + 30000;
} else if (trial <= kNumRandomPortsToPick) {
port = rand() % (65536 - 30000) + 30000;
} else {
port = 0;
}
if (chosen_ports.find(port) != chosen_ports.end()) {
continue;
}
if (!IsPortAvailable(&port, is_tcp)) {
continue;
}
CHECK_GT(port, 0);
if (!IsPortAvailable(&port, !is_tcp)) {
is_tcp = !is_tcp;
continue;
}
chosen_ports.insert(port);
WSACleanup();
return port;
}
return 0;
}
}
} | #include "tsl/platform/net.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace internal {
TEST(Net, PickUnusedPortOrDie) {
int port0 = PickUnusedPortOrDie();
int port1 = PickUnusedPortOrDie();
CHECK_GE(port0, 0);
CHECK_LT(port0, 65536);
CHECK_GE(port1, 0);
CHECK_LT(port1, 65536);
CHECK_NE(port0, port1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/windows/net.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/net_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cf851bbb-4aa8-47fa-9034-b88394650fd3 | cpp | tensorflow/tensorflow | reduce_window | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce_window.cc | tensorflow/lite/kernels/reduce_window_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce_window.h"
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/op_util_common.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce_window_util.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
namespace {
using TFLPoolAttrsT = std::tuple<IntegerAttr, IntegerAttr, IntegerAttr,
IntegerAttr, StringAttr, StringAttr>;
bool AreDilationsSupported(const ReduceWindowView& op) {
auto is_one = [](int64_t v) { return v == 1; };
return llvm::all_of(op.BaseDilations(), is_one) &&
llvm::all_of(op.WindowDilations(), is_one);
}
bool IsRankSupported(const ReduceWindowView& op) { return op.Rank() == 4; }
std::optional<std::tuple<ReduceWindowView, Layout>> GetViewIfAttrsSupported(
mhlo::ReduceWindowOp op) {
const ReduceWindowView view(op);
if (!IsRankSupported(view)) {
return std::nullopt;
}
if (!AreDilationsSupported(view)) {
return std::nullopt;
}
auto opt_layout = view.GuessLayout();
if (!opt_layout.has_value()) {
return std::nullopt;
}
auto layout = opt_layout.value();
const int64_t batch = layout.SpecialDim1();
if (!view.Paddings()[batch].Trivial()) {
return std::nullopt;
}
const int64_t chan = layout.SpecialDim2();
if (!view.Paddings()[chan].Trivial()) {
return std::nullopt;
}
return std::tuple(view, layout);
}
std::optional<bool> IsReduceWindowLegal(mhlo::ReduceWindowOp op) {
return std::nullopt;
}
std::optional<bool> IsDivideLegal(mhlo::DivOp op) { return std::nullopt; }
Layout TFLNativePoolingLayout(int64_t rank) {
return Layout(0, rank - 1, llvm::to_vector(llvm::seq<int64_t>(1, rank - 1)));
}
bool IsCstFloatZero(Value val) {
DenseFPElementsAttr initial_value;
return matchPattern(val, m_Constant(&initial_value)) &&
initial_value.getNumElements() == 1 &&
initial_value.getValues<APFloat>()[0].isZero();
}
bool IsCstIntZero(Value val) {
DenseIntElementsAttr initial_value;
return matchPattern(val, m_Constant(&initial_value)) &&
initial_value.getNumElements() == 1 &&
initial_value.getValues<APInt>()[0].isZero();
}
llvm::SmallVector<int64_t> Permute(llvm::ArrayRef<int64_t> data,
llvm::ArrayRef<int64_t> perm) {
llvm::SmallVector<int64_t> res(data.size());
for (int i = 0; i < data.size(); ++i) {
res[i] = data[perm[i]];
}
return res;
}
Value TransposeTensor(OpBuilder& b, Value tensor,
llvm::SmallVector<int64_t> perm) {
const int64_t perm_size = perm.size();
auto perm_attr_type = RankedTensorType::get({perm_size}, b.getI64Type());
auto perm_attr = DenseIntElementsAttr::get(perm_attr_type, perm);
return b.create<mhlo::TransposeOp>(tensor.getLoc(), tensor, perm_attr);
}
DenseIntElementsAttr BuildDenseI64(OpBuilder& b, ArrayRef<int64_t> shape,
ArrayRef<int64_t> data) {
return DenseIntElementsAttr::get(RankedTensorType::get(shape, b.getI64Type()),
data);
}
DenseIntElementsAttr BuildDenseI64(OpBuilder& b, ArrayRef<int64_t> data) {
const int64_t dim = data.size();
return BuildDenseI64(b, {dim}, data);
}
std::optional<std::tuple<Value, Value>> GetInputAndInitIfValid(
mhlo::ReduceWindowOp op) {
if (op->getNumResults() != 1) {
return std::nullopt;
}
if (op.getInputs().size() > 1) {
return std::nullopt;
}
if (op.getInitValues().size() > 1) {
return std::nullopt;
}
auto init_val = op.getInitValues().front();
if (llvm::dyn_cast<ShapedType>(init_val.getType()).getNumElements() != 1) {
return std::nullopt;
}
return std::tuple(op.getInputs().front(), op.getInitValues().front());
}
std::optional<std::string> GetTFLPadding(ArrayRef<DimPadding> paddings,
ArrayRef<int64_t> window_strides,
ArrayRef<int64_t> in_shape,
ArrayRef<int64_t> window_dims) {
const int64_t rank = paddings.size();
std::string tfl_padding = "VALID";
for (int i = 1; i < rank - 1; ++i) {
const auto& dim_pad = paddings[i];
if (dim_pad.Trivial()) {
continue;
}
if (!IsSamePaddingOnDim(in_shape[i], 1, window_strides[i], window_dims[i],
dim_pad)) {
return std::nullopt;
}
tfl_padding = "SAME";
}
return tfl_padding;
}
TFLPoolAttrsT BuildTFLPoolAttrs(OpBuilder& b, const ReduceWindowView& view,
StringRef padding) {
const int32_t filter_h = view.WindowDims()[1];
auto filter_h_attr = b.getI32IntegerAttr(filter_h);
const int32_t filter_w = view.WindowDims()[2];
auto filter_w_attr = b.getI32IntegerAttr(filter_w);
const int32_t stride_h = view.WindowStrides()[1];
auto stride_h_attr = b.getI32IntegerAttr(stride_h);
const int32_t stride_w = view.WindowStrides()[2];
auto stride_w_attr = b.getI32IntegerAttr(stride_w);
auto padding_attr = b.getStringAttr(padding);
auto faf_attr = b.getStringAttr("NONE");
return std::tuple(filter_h_attr, filter_w_attr, stride_h_attr, stride_w_attr,
padding_attr, faf_attr);
}
class RelayoutReduceWindow : public OpRewritePattern<mhlo::ReduceWindowOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(mhlo::ReduceWindowOp op,
PatternRewriter& rewriter) const final;
};
LogicalResult RelayoutReduceWindow::matchAndRewrite(
mhlo::ReduceWindowOp op, PatternRewriter& rewriter) const {
auto opt_view = GetViewIfAttrsSupported(op);
if (!opt_view.has_value()) {
return rewriter.notifyMatchFailure(
op, "Reduce window attributes not supported.");
}
const auto [view, layout] = opt_view.value();
auto opt_input_and_init = GetInputAndInitIfValid(op);
if (!opt_input_and_init.has_value()) {
return rewriter.notifyMatchFailure(
op, "Reduce window has wrong number of inputs or init values.");
}
auto [input, init_val] = opt_input_and_init.value();
const auto target_layout = TFLNativePoolingLayout(view.Rank());
if (layout == target_layout) {
return rewriter.notifyMatchFailure(
op, "Reduce window does not need layout change");
}
llvm::SmallVector<int64_t> perm_for_inputs =
layout.GetPermForReLayout(target_layout);
auto paddings = view.Paddings();
llvm::SmallVector<int64_t> new_paddings(paddings.size() * 2);
for (int i = 0; i < new_paddings.size() / 2; ++i) {
const auto& dim_pad = paddings[perm_for_inputs[i]];
new_paddings[2 * i] = dim_pad.Lo();
new_paddings[2 * i + 1] = dim_pad.Hi();
}
const int64_t new_paddings_size = paddings.size();
auto new_paddings_type =
RankedTensorType::get({new_paddings_size, 2}, rewriter.getI64Type());
auto new_paddings_attr =
DenseIntElementsAttr::get(new_paddings_type, new_paddings);
llvm::SmallVector<int64_t> new_window_dims =
Permute(view.WindowDims(), perm_for_inputs);
auto new_window_dims_attr = BuildDenseI64(rewriter, new_window_dims);
llvm::SmallVector<int64_t> new_window_strides =
Permute(view.WindowStrides(), perm_for_inputs);
auto new_window_strides_attr = BuildDenseI64(rewriter, new_window_strides);
llvm::SmallVector<int64_t> perm_for_outputs =
target_layout.GetPermForReLayout(layout);
auto cur_out_type = llvm::dyn_cast<ShapedType>(op.getResult(0).getType());
llvm::SmallVector<int64_t> new_rw_out_shape =
layout.PermuteShape(target_layout, cur_out_type.getShape());
auto new_out_type = cur_out_type.clone(new_rw_out_shape);
auto new_input = TransposeTensor(rewriter, input, perm_for_inputs);
auto new_rw = rewriter.create<mhlo::ReduceWindowOp>(
op.getLoc(), new_out_type, new_input, init_val, new_window_dims_attr,
new_window_strides_attr, BuildDenseI64(rewriter, view.BaseDilations()),
BuildDenseI64(rewriter, view.WindowDilations()), new_paddings_attr);
IRMapping ir_map;
op.getBody().cloneInto(&new_rw.getBody(), ir_map);
auto new_output =
TransposeTensor(rewriter, new_rw.getResult(0), perm_for_outputs);
rewriter.replaceOp(op, new_output);
return success();
}
class LegalizeCumSum : public OpConversionPattern<mhlo::ReduceWindowOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::ReduceWindowOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeCumSum::matchAndRewrite(
mhlo::ReduceWindowOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
auto opt_input_init = GetInputAndInitIfValid(op);
if (!opt_input_init.has_value()) {
return rewriter.notifyMatchFailure(op,
"Must have 1 input, init and result.");
}
auto [input, init] = opt_input_init.value();
if (failed(MatchBinaryReduceFunction<mhlo::AddOp>(op.getBody()))) {
return rewriter.notifyMatchFailure(op, "Requires scalar add in region.");
}
if (!IsCstFloatZero(init) && !IsCstIntZero(init)) {
return rewriter.notifyMatchFailure(op, "Requires 0 for init value.");
}
const ReduceWindowView view(op);
auto trivial = [](int64_t v) { return v == 1; };
const bool trivial_window_dilate =
llvm::all_of(view.WindowDilations(), trivial);
const bool trivial_base_dilate = llvm::all_of(view.BaseDilations(), trivial);
const bool trivial_stride = llvm::all_of(view.WindowStrides(), trivial);
if (!trivial_window_dilate || !trivial_stride || !trivial_base_dilate) {
return rewriter.notifyMatchFailure(
op, "Requires trivial strides and dilations attributes.");
}
auto input_type = llvm::cast<ShapedType>(input.getType());
if (view.WindowDims().size() != input_type.getRank()) {
return rewriter.notifyMatchFailure(op, "Splat window dims not supported.");
}
int64_t axis = -1;
for (auto [ind, val] : llvm::enumerate(view.WindowDims())) {
if (val == 1) {
continue;
}
if (axis != -1) {
return rewriter.notifyMatchFailure(op, "Multiple non 1 dimensions.");
}
if (val != input_type.getShape()[ind]) {
return rewriter.notifyMatchFailure(
op, "Axis dimension requires size be same as input shape's.");
}
axis = ind;
}
if (axis == -1) {
return rewriter.notifyMatchFailure(op, "Could not identify axis.");
}
const int64_t axis_size = input_type.getShape()[axis];
for (const auto& [ind, dim_pad] : llvm::enumerate(view.Paddings())) {
if (dim_pad.Hi() != 0) {
return rewriter.notifyMatchFailure(op, "Has non trivial high padding.");
}
if (ind != axis) {
if (!dim_pad.Trivial()) {
return rewriter.notifyMatchFailure(
op, "Has non trivial padding on non axis dim.");
}
} else {
if (dim_pad.Lo() != axis_size - 1) {
return rewriter.notifyMatchFailure(
op, "Requires low padding on axis dim to be N - 1.");
}
}
}
auto axis_cst_attr = DenseIntElementsAttr::get(
RankedTensorType::get({}, rewriter.getI32Type()),
static_cast<int32_t>(axis));
auto axis_cst =
rewriter.create<arith::ConstantOp>(op->getLoc(), axis_cst_attr);
auto tfl_exclusive_attr = rewriter.getBoolAttr(false);
auto tfl_reverse_attr = rewriter.getBoolAttr(false);
rewriter.replaceOpWithNewOp<TFL::CumsumOp>(op, op->getResultTypes()[0], input,
axis_cst, tfl_exclusive_attr,
tfl_reverse_attr);
return success();
}
bool isFloatMinusInfinity(Value value) {
DenseFPElementsAttr float_value;
if (!matchPattern(value, m_Constant(&float_value))) {
return false;
}
if (float_value.getNumElements() != 1) {
return false;
}
APFloat element = float_value.getValues<APFloat>()[0];
return element.isInfinity() && element.isNegative();
}
class LegalizeMaxPool : public OpConversionPattern<mhlo::ReduceWindowOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::ReduceWindowOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
private:
TFL::PadV2Op BuildExplicitPadOp(mhlo::ReduceWindowOp op, const Layout& layout,
const ShapedType& input_type,
const ShapedType& output_type, Value input,
Value init, const ReduceWindowView& view,
PatternRewriter& rewriter) const;
};
TFL::PadV2Op LegalizeMaxPool::BuildExplicitPadOp(
mhlo::ReduceWindowOp op, const Layout& layout, const ShapedType& input_type,
const ShapedType& output_type, Value input, Value init,
const ReduceWindowView& view, PatternRewriter& rewriter) const {
std::vector<int64_t> shape = {layout.Rank(), layout.NumSpatials()};
llvm::SmallVector<int64_t, 8> padding_values;
for (auto& padding : view.Paddings()) {
padding_values.push_back(padding.Lo());
padding_values.push_back(padding.Hi());
}
auto padding_dense_attr = mlir::DenseElementsAttr::get(
mlir::RankedTensorType::get(shape, rewriter.getIntegerType(64)),
llvm::ArrayRef<int64_t>(padding_values));
auto padding_values_op =
rewriter.create<arith::ConstantOp>(op.getLoc(), padding_dense_attr);
llvm::SmallVector<int64_t, 4> pad_output_shape_vector;
pad_output_shape_vector.push_back(input_type.getDimSize(0));
pad_output_shape_vector.push_back(input_type.getDimSize(1) +
view.Paddings()[1].Lo() +
view.Paddings()[1].Hi());
pad_output_shape_vector.push_back(input_type.getDimSize(2) +
view.Paddings()[2].Lo() +
view.Paddings()[2].Hi());
pad_output_shape_vector.push_back(input_type.getDimSize(3));
auto pad_output_type = mlir::RankedTensorType::get(
pad_output_shape_vector, output_type.getElementType());
return rewriter.create<TFL::PadV2Op>(op.getLoc(), pad_output_type, input,
padding_values_op, init);
}
LogicalResult LegalizeMaxPool::matchAndRewrite(
mhlo::ReduceWindowOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
const auto opt_view = GetViewIfAttrsSupported(op);
if (!opt_view.has_value()) {
return rewriter.notifyMatchFailure(op, "Reduce window is not valid.");
}
const auto [view, layout] = opt_view.value();
if (layout != TFLNativePoolingLayout(layout.Rank())) {
return rewriter.notifyMatchFailure(op, "Not tfl standard layout.");
}
if (failed(MatchBinaryReduceFunction<mhlo::MaxOp>(op.getBody()))) {
return rewriter.notifyMatchFailure(op, "Must be a max pool.");
}
auto type = mlir::dyn_cast<ShapedType>(op.getResult(0).getType());
if (!mlir::isa<FloatType>(type.getElementType())) {
return rewriter.notifyMatchFailure(op, "Not a floating point pool.");
}
auto opt_inputs_and_init = GetInputAndInitIfValid(op);
if (!opt_inputs_and_init.has_value()) {
return rewriter.notifyMatchFailure(op, "Too many inputs or inits.");
}
auto [input, init] = opt_inputs_and_init.value();
auto input_type = llvm::dyn_cast<ShapedType>(input.getType());
if (!isFloatMinusInfinity(init)) {
return rewriter.notifyMatchFailure(op, "Init not minus infinity.");
}
auto opt_tfl_padding =
GetTFLPadding(view.Paddings(), view.WindowStrides(),
input_type.getShape(), view.WindowDims());
Value max_pool_input;
std::string tfl_padding_attr;
if (opt_tfl_padding.has_value()) {
max_pool_input = input;
tfl_padding_attr = opt_tfl_padding.value();
} else {
max_pool_input = BuildExplicitPadOp(op, layout, input_type, type, input,
init, view, rewriter);
tfl_padding_attr = "VALID";
}
auto [fh, fw, sh, sw, p, faf] =
BuildTFLPoolAttrs(rewriter, view, tfl_padding_attr);
rewriter.replaceOpWithNewOp<TFL::MaxPool2DOp>(op, type, max_pool_input, p, sw,
sh, fw, fh, faf);
return success();
}
void ReplaceWithAvgPool(mhlo::DivOp op, Value rw_lhs_input,
const ReduceWindowView& lhs_view,
llvm::StringRef padding, PatternRewriter& rewriter,
mhlo::TransposeOp opt_final_tpose) {
Type out_type =
opt_final_tpose ? opt_final_tpose.getOperand().getType() : op.getType();
auto [fh, fw, sh, sw, p, faf] =
BuildTFLPoolAttrs(rewriter, lhs_view, padding);
Value final_op = rewriter.create<TFL::AveragePool2DOp>(
op->getLoc(), out_type, rw_lhs_input, fh, fw, p, sh, sw, faf);
if (opt_final_tpose) {
final_op = rewriter
.create<mhlo::TransposeOp>(final_op.getLoc(), final_op,
opt_final_tpose.getPermutation())
.getResult();
}
rewriter.replaceOp(op, final_op);
}
template <typename... Tys>
Value RecursivelyWalkUp(Value op) {
while (llvm::isa_and_nonnull<Tys...>(op.getDefiningOp())) {
Operation* producer = op.getDefiningOp();
op = producer->getOperand(0);
}
return op;
}
class LegalizeAvgPool : public OpConversionPattern<mhlo::DivOp> {
public:
using OpConversionPattern::OpConversionPattern;
explicit LegalizeAvgPool(MLIRContext* context)
: OpConversionPattern<mhlo::DivOp>(context, 10) {}
LogicalResult matchAndRewrite(
mhlo::DivOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeAvgPool::matchAndRewrite(
mhlo::DivOp div_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
auto div_lhs = div_op.getLhs();
mhlo::TransposeOp opt_final_tpose;
if (auto div_lhs_op = div_lhs.getDefiningOp()) {
opt_final_tpose = llvm::dyn_cast_or_null<mhlo::TransposeOp>(div_lhs_op);
}
auto rw_lhs_val = RecursivelyWalkUp<mhlo::TransposeOp>(div_lhs);
auto rw_lhs =
llvm::dyn_cast_or_null<mhlo::ReduceWindowOp>(rw_lhs_val.getDefiningOp());
if (!rw_lhs) {
return rewriter.notifyMatchFailure(
div_op, "Could not match lhs of div on reduce window.");
}
const auto opt_rw_lhs_view = GetViewIfAttrsSupported(rw_lhs);
if (!opt_rw_lhs_view.has_value()) {
return rewriter.notifyMatchFailure(div_op, "Lhs rw is not valid.");
}
const auto [rw_lhs_view, rw_lhs_layout] = opt_rw_lhs_view.value();
if (rw_lhs_layout != TFLNativePoolingLayout(rw_lhs_layout.Rank())) {
return rewriter.notifyMatchFailure(
div_op, "Lhs reduce window not tfl standard layout.");
}
if (failed(MatchBinaryReduceFunction<mhlo::AddOp>(rw_lhs.getBody()))) {
return rewriter.notifyMatchFailure(div_op,
"Failed to match rw lhs binary func.");
}
auto opt_rw_lhs_input_and_init = GetInputAndInitIfValid(rw_lhs);
if (!opt_rw_lhs_input_and_init.has_value()) {
return rewriter.notifyMatchFailure(
div_op, "Lhs reduce window has wrong number of inputs or init values.");
}
auto [rw_lhs_input, rw_lhs_init_val] = opt_rw_lhs_input_and_init.value();
auto rw_lhs_input_type = llvm::dyn_cast<ShapedType>(rw_lhs_input.getType());
auto rw_lhs_type =
mlir::dyn_cast<RankedTensorType>(rw_lhs.getResult(0).getType());
if (!mlir::isa<FloatType>(rw_lhs_type.getElementType())) {
return rewriter.notifyMatchFailure(div_op,
"Reduce window lhs most be float type.");
}
if (!IsCstFloatZero(rw_lhs_init_val)) {
return rewriter.notifyMatchFailure(
div_op, "Reduce window lhs init value is not zero.");
}
auto opt_tfl_padding =
GetTFLPadding(rw_lhs_view.Paddings(), rw_lhs_view.WindowStrides(),
rw_lhs_input_type.getShape(), rw_lhs_view.WindowDims());
if (!opt_tfl_padding.has_value()) {
return rewriter.notifyMatchFailure(div_op,
"Padding must be VALID or SAME.");
}
const auto& tfl_padding = opt_tfl_padding.value();
{
DenseFPElementsAttr divisor;
auto div_rhs = RecursivelyWalkUp<mhlo::BroadcastInDimOp, mhlo::TransposeOp>(
div_op.getRhs());
if (matchPattern(div_rhs, m_Constant(&divisor))) {
if (!divisor.isSplat()) {
return failure();
}
if (!divisor.getSplatValue<APFloat>().isExactlyValue(
rw_lhs_view.WindowSize())) {
return rewriter.notifyMatchFailure(
div_op, "Rhs splat const is not equal to window size.");
}
if (tfl_padding != "VALID") {
return rewriter.notifyMatchFailure(div_op,
"Matching on rhs splat const where "
"rw lhs has non-trivial padding.");
}
ReplaceWithAvgPool(div_op, rw_lhs_input, rw_lhs_view, tfl_padding,
rewriter, opt_final_tpose);
return success();
}
}
{
Value divisor = RecursivelyWalkUp<mhlo::BroadcastInDimOp, mhlo::ReshapeOp,
mhlo::TransposeOp>(div_op.getRhs());
auto rw_rhs =
dyn_cast_or_null<mhlo::ReduceWindowOp>(divisor.getDefiningOp());
if (!rw_rhs) {
return rewriter.notifyMatchFailure(
div_op, "Rhs of div op is not a reduce window.");
}
const auto opt_rw_rhs_view = GetViewIfAttrsSupported(rw_rhs);
if (!opt_rw_rhs_view.has_value()) {
return rewriter.notifyMatchFailure(div_op, "Rhs rw is not valid.");
}
const auto [rw_rhs_view, rw_rhs_layout] = opt_rw_rhs_view.value();
if (rw_rhs_layout != TFLNativePoolingLayout(rw_rhs_layout.Rank())) {
return rewriter.notifyMatchFailure(
div_op, "Rhs reduce window not tfl standard layout.");
}
if (failed(MatchBinaryReduceFunction<mhlo::AddOp>(rw_rhs.getBody()))) {
return rewriter.notifyMatchFailure(
div_op, "Rhs rw body function is not an add op.");
}
auto opt_rw_rhs_input_and_init = GetInputAndInitIfValid(rw_rhs);
if (!opt_rw_rhs_input_and_init.has_value()) {
return rewriter.notifyMatchFailure(
div_op,
"Rhs reduce window has wrong number of inputs or init values.");
}
auto [rw_rhs_input, rw_rhs_init_val] = opt_rw_rhs_input_and_init.value();
if (!IsCstFloatZero(rw_rhs_init_val)) {
return rewriter.notifyMatchFailure(div_op,
"Rhs rw init vals is not zero.");
}
rw_rhs_input = RecursivelyWalkUp<mhlo::BroadcastInDimOp, mhlo::TransposeOp>(
rw_rhs_input);
DenseFPElementsAttr rhs_input_data;
if (!matchPattern(rw_rhs_input, m_Constant(&rhs_input_data)) ||
!rhs_input_data.isSplat() ||
!rhs_input_data.getSplatValue<APFloat>().isExactlyValue(1.0)) {
return rewriter.notifyMatchFailure(div_op,
"Rw rhs input is not splat of 1.0.");
}
if (rw_lhs.getWindowDimensions() != rw_rhs.getWindowDimensions() ||
rw_lhs.getWindowStrides() != rw_rhs.getWindowStrides() ||
rw_lhs.getPadding() != rw_rhs.getPadding()) {
return rewriter.notifyMatchFailure(
div_op, "Lhs rw and Rhs rw do not have the same config.");
}
ReplaceWithAvgPool(div_op, rw_lhs_input, rw_lhs_view, tfl_padding, rewriter,
opt_final_tpose);
return success();
}
return failure();
}
}
void PopulateLegalizeReduceWindowPatterns(MLIRContext* ctx,
RewritePatternSet& patterns,
ConversionTarget& target) {
patterns.add<LegalizeAvgPool, LegalizeMaxPool, LegalizeCumSum>(ctx);
target.addDynamicallyLegalOp<mhlo::ReduceWindowOp>(IsReduceWindowLegal);
target.addDynamicallyLegalOp<mhlo::DivOp>(IsDivideLegal);
}
void PopulatePrepareReduceWindowPatterns(MLIRContext* ctx,
RewritePatternSet& patterns) {
patterns.add<RelayoutReduceWindow>(ctx);
}
} | #include <cstdint>
#include <functional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using testing::ElementsAre;
template <class T>
struct TensorTypeFor;
#define TENSOR_TYPE_ASSOC(CPP_TYPE, TENSORTYPE_VALUE) \
template <> \
struct TensorTypeFor<CPP_TYPE> { \
static constexpr TensorType value = TENSORTYPE_VALUE; \
};
TENSOR_TYPE_ASSOC(int8_t, TensorType_INT8);
TENSOR_TYPE_ASSOC(int16_t, TensorType_INT16);
TENSOR_TYPE_ASSOC(int32_t, TensorType_INT32);
TENSOR_TYPE_ASSOC(int64_t, TensorType_INT64);
TENSOR_TYPE_ASSOC(uint8_t, TensorType_UINT8);
TENSOR_TYPE_ASSOC(uint16_t, TensorType_UINT16);
TENSOR_TYPE_ASSOC(uint32_t, TensorType_UINT32);
TENSOR_TYPE_ASSOC(uint64_t, TensorType_UINT64);
TENSOR_TYPE_ASSOC(float, TensorType_FLOAT32);
static_assert(sizeof(float) == 4, "float type is expected to be 32 bit long");
TENSOR_TYPE_ASSOC(double, TensorType_FLOAT64);
static_assert(sizeof(double) == 8, "double type is expected to be 64 bit long");
template <class Container>
int32_t intsize(const Container& c) {
return static_cast<int32_t>(c.size());
}
template <class T>
class DilateOpModel : public SingleOpModel {
static constexpr TensorType kTensorType = TensorTypeFor<T>::value;
public:
void SetInput(absl::Span<const int32_t> shape,
absl::Span<const T> data = {}) {
input_shape_.assign(shape.begin(), shape.end());
if (data.empty()) {
input_data_.resize(absl::c_accumulate(shape, 1, std::multiplies<int>()));
absl::c_iota(input_data_, 1);
} else {
input_data_.assign(data.begin(), data.end());
}
}
void SetWindowShape(absl::Span<const int64_t> shape) {
window_shape_data_.assign(shape.begin(), shape.end());
}
void SetWindowStrides(absl::Span<const int64_t> strides) {
window_strides_data_.assign(strides.begin(), strides.end());
}
void SetWindowDilations(absl::Span<const int64_t> dilations) {
window_dilations_data_.assign(dilations.begin(), dilations.end());
}
void SetInitValue(const T& val) { init_value_data_ = val; }
void Build() {
input_ = AddInput({kTensorType, input_shape_});
init_value_ = AddConstInput(kTensorType, {init_value_data_}, {1});
window_shape_ = AddConstInput(TensorType_INT64, window_shape_data_,
{intsize(window_shape_data_)});
window_strides_ = AddConstInput(TensorType_INT64, window_strides_data_,
{intsize(window_strides_data_)});
window_dilations_ = AddConstInput(TensorType_INT64, window_dilations_data_,
{intsize(window_dilations_data_)});
output_ = AddOutput(kTensorType);
SetBuiltinOp(
BuiltinOperator_REDUCE_WINDOW, BuiltinOptions2_ReduceWindowOptions,
CreateReduceWindowOptions(builder_, ReduceWindowFunction_ADD).Union());
BuildInterpreter({input_shape_});
PopulateTensor(input_, input_data_);
}
TfLiteStatus BuildAndInvoke() {
Build();
return Invoke();
}
absl::Span<const T> GetOutputData() {
return absl::Span<const T>(interpreter_->typed_tensor<T>(output_),
GetTensorSize(output_));
}
absl::Span<const int> GetOutputShape() {
const TfLiteIntArray& shape = *(interpreter_->tensor(output_)->dims);
return absl::Span<const int>(shape.data, shape.size);
}
const std::vector<T>& GetInput() const { return input_data_; }
const std::vector<int32_t>& GetInputShape() const { return input_shape_; }
const std::vector<int64_t>& GetWindowShape() const {
return window_shape_data_;
}
const std::vector<int64_t>& GetWindowStrides() const {
return window_strides_data_;
}
const std::vector<int64_t>& GetWindowDilations() const {
return window_dilations_data_;
}
const T& GetInitValue() const { return init_value_data_; }
protected:
int input_ = -1;
int window_shape_ = -1;
int window_strides_ = -1;
int window_dilations_ = -1;
int init_value_ = -1;
int output_ = -1;
std::vector<T> input_data_;
T init_value_data_;
std::vector<int32_t> input_shape_;
std::vector<int64_t> window_shape_data_;
std::vector<int64_t> window_strides_data_;
std::vector<int64_t> window_dilations_data_;
};
template <class StorageType>
class ReduceWindowTest : public testing::Test {
protected:
DilateOpModel<StorageType> model_;
};
using TestList =
testing::Types<int8_t, int16_t, int32_t, int64_t, uint8_t, float, double>;
TYPED_TEST_SUITE(ReduceWindowTest, TestList);
TYPED_TEST(ReduceWindowTest, FullWindow) {
auto& model = this->model_;
model.SetInput({3, 3});
model.SetWindowShape({3, 3});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(1, 1));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAre(45));
}
TYPED_TEST(ReduceWindowTest, NoDilation) {
auto& model = this->model_;
model.SetInput({3, 3});
model.SetWindowShape({2, 2});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(2, 2));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAre(12, 16, 24, 28));
}
TYPED_TEST(ReduceWindowTest, FullWindowWithDilation) {
auto& model = this->model_;
model.SetInput({3, 3});
model.SetWindowShape({2, 2});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({2, 2});
model.SetInitValue(0);
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(1, 1));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAre(20));
}
TYPED_TEST(ReduceWindowTest, WithDilation) {
auto& model = this->model_;
model.SetInput({4, 4});
model.SetWindowShape({2, 2});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({2, 2});
model.SetInitValue(0);
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(2, 2));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAre(24, 28, 40, 44));
}
TYPED_TEST(ReduceWindowTest, WithStrides) {
auto& model = this->model_;
model.SetInput({4, 4});
model.SetWindowShape({2, 2});
model.SetWindowStrides({2, 2});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(2, 2));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAre(14, 22, 46, 54));
}
TYPED_TEST(ReduceWindowTest, WithDilationAndStrides) {
auto& model = this->model_;
model.SetInput({5, 5});
model.SetWindowShape({2, 2});
model.SetWindowStrides({2, 2});
model.SetWindowDilations({2, 2});
model.SetInitValue(2);
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(2, 2));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAre(30, 38, 70, 78));
}
TYPED_TEST(ReduceWindowTest, OutputShapeRoundingIsCorrect) {
auto& model = this->model_;
model.SetInput({1, 64, 114, 114});
model.SetWindowShape({1, 1, 3, 3});
model.SetWindowStrides({1, 1, 2, 2});
model.SetWindowDilations({1, 1, 1, 1});
model.SetInitValue(2);
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(1, 64, 56, 56));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce_window.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/reduce_window_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a9a26e45-6caf-4a8e-84d6-4f99d3d89f0a | cpp | tensorflow/tensorflow | while_loop_constant_sinking | third_party/xla/xla/service/while_loop_constant_sinking.cc | third_party/xla/xla/service/while_loop_constant_sinking_test.cc | #include "xla/service/while_loop_constant_sinking.h"
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "xla/service/while_util.h"
#include "xla/shape_util.h"
#include "xla/util.h"
namespace xla {
namespace {
absl::Status ReplaceUsesWhileKeepingLoopInvariance(
HloInstruction* old_instr, HloInstruction* new_instr,
HloInstruction* while_body_root, int64_t tuple_index) {
CHECK_EQ(while_body_root->opcode(), HloOpcode::kTuple);
std::vector<HloInstruction*> users;
users.reserve(old_instr->user_count());
absl::c_copy(old_instr->users(), std::back_inserter(users));
for (auto* user : users) {
for (int64_t i = 0, e = user->operand_count(); i < e; i++) {
if (user->operand(i) == old_instr &&
!(user == while_body_root && i == tuple_index)) {
TF_RETURN_IF_ERROR(user->ReplaceOperandWith(i, new_instr));
}
}
}
return absl::OkStatus();
}
HloInstruction* CloneHelper(const HloInstruction* instruction,
HloComputation* computation) {
if (instruction->opcode() == HloOpcode::kConstant) {
return computation->AddInstruction(instruction->Clone(".sunk"));
}
if (instruction->opcode() == HloOpcode::kBroadcast) {
return computation->AddInstruction(instruction->CloneWithNewOperands(
instruction->shape(),
{CloneHelper(instruction->operand(0), computation)}));
}
LOG(FATAL) << "Unexpected instruction.";
}
}
absl::StatusOr<bool> WhileLoopConstantSinking::TrySinkingConstantsIntoWhileLoop(
HloInstruction* while_instr) {
HloComputation* while_cond = while_instr->while_condition();
HloComputation* while_body = while_instr->while_body();
const HloInstruction& init_value = *while_instr->operand(0);
if (init_value.opcode() != HloOpcode::kTuple) {
return false;
}
bool changed = false;
absl::flat_hash_map<int64_t, absl::InlinedVector<HloInstruction*, 1>>
conditional_gte_index_to_insts =
WhileUtil::GetGTEsMapForWhileConditional(*while_cond);
std::vector<HloInstruction*> invariant_body_gtes =
WhileUtil::GetInvariantGTEsForWhileBody(*while_body);
for (HloInstruction* invariant_body_gte : invariant_body_gtes) {
int64_t index = invariant_body_gte->tuple_index();
const HloInstruction& invariant_value = *init_value.operand(index);
if (invariant_value.opcode() != HloOpcode::kConstant &&
(!sink_broadcast_of_constants_ ||
invariant_value.opcode() != HloOpcode::kBroadcast ||
invariant_value.operand(0)->opcode() != HloOpcode::kConstant)) {
continue;
}
if (sink_only_scalar_constants_) {
if (!ShapeUtil::IsScalar(init_value.operand(index)->shape())) {
continue;
}
}
if (invariant_body_gte->user_count() > 1) {
HloInstruction* constant_instr =
CloneHelper(&invariant_value, while_body);
TF_RETURN_IF_ERROR(ReplaceUsesWhileKeepingLoopInvariance(
invariant_body_gte, constant_instr, while_body->root_instruction(),
index));
changed = true;
}
auto it = conditional_gte_index_to_insts.find(index);
if (it == conditional_gte_index_to_insts.end()) {
continue;
}
for (HloInstruction* invariant_cond_gte : it->second) {
if (invariant_cond_gte->user_count() > 0) {
HloInstruction* constant_instr =
CloneHelper(&invariant_value, while_cond);
TF_RETURN_IF_ERROR(
invariant_cond_gte->ReplaceAllUsesWith(constant_instr));
changed = true;
}
}
}
return changed;
}
absl::StatusOr<bool> WhileLoopConstantSinking::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "HLO module before WhileLoopConstantSinking:";
XLA_VLOG_LINES(2, module->ToString());
bool changed = false;
std::vector<HloInstruction*> while_instrs;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
HloPredicateIsOp<HloOpcode::kWhile>);
}
for (HloInstruction* while_instr : while_instrs) {
TF_ASSIGN_OR_RETURN(bool result,
TrySinkingConstantsIntoWhileLoop(while_instr));
changed |= result;
}
if (changed) {
VLOG(2) << "HLO module after WhileLoopConstantSinking:";
XLA_VLOG_LINES(2, module->ToString());
} else {
VLOG(2) << "HLO module unchanged after WhileLoopConstantSinking";
}
return changed;
}
} | #include "xla/service/while_loop_constant_sinking.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::testing::_;
using WhileLoopConstantSinkingTest = HloTestBase;
TEST_F(WhileLoopConstantSinkingTest, SinkOneConstant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2],f32[2]) parameter(0)
p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0
p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1
add.0 = f32[2] add(p_body.0, p_body.1)
ROOT root = (f32[2],f32[2]) tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[2],f32[2]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] constant({1, 2})
const_1 = f32[2] constant({2, 1})
while_init = (f32[2],f32[2]) tuple(const_0, const_1)
ROOT while = (f32[2],f32[2]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
WhileLoopConstantSinking(false,
true)
.Run(module.get()));
ASSERT_FALSE(changed);
TF_ASSERT_OK_AND_ASSIGN(
changed, WhileLoopConstantSinking(false,
false)
.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(_, op::Constant()), _));
}
TEST_F(WhileLoopConstantSinkingTest, SinkBroadcastOfConstant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[16],f32[16]) parameter(0)
p_body.0 = get-tuple-element(p_body), index=0
p_body.1 = get-tuple-element(p_body), index=1
add.0 = add(p_body.0, p_body.1)
ROOT root = tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[16],f32[16]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[] constant(1)
const_1 = f32[] constant(2)
broadcast_0 = f32[16] broadcast(const_0), dimensions={}
broadcast_1 = f32[16] broadcast(const_1), dimensions={}
while_init = tuple(broadcast_0, broadcast_1)
ROOT while = while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
WhileLoopConstantSinking(false)
.Run(module.get()));
ASSERT_FALSE(changed);
TF_ASSERT_OK_AND_ASSIGN(
changed, WhileLoopConstantSinking(true)
.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(_, op::Broadcast(op::Constant())), _));
}
TEST_F(WhileLoopConstantSinkingTest, KeepConstantsLoopInvariant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2],f32[2],f32[2]) parameter(0)
p_body.0 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_body), index=0
p_body.1 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_body), index=1
p_body.2 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_body), index=2
add.0 = f32[2] add(p_body.1, p_body.2)
ROOT root = (f32[2],f32[2],f32[2]) tuple(add.0, p_body.1, p_body.2)
}
condition {
p_cond = (f32[2],f32[2],f32[2]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] constant({1, 2})
const_1 = f32[2] constant({2, 1})
const_2 = f32[2] constant({3, 1})
while_init = (f32[2],f32[2],f32[2]) tuple(const_0, const_1, const_2)
ROOT while = (f32[2],f32[2],f32[2]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(op::Constant(), op::Constant()),
op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))));
}
TEST_F(WhileLoopConstantSinkingTest, TupleShapedConstants) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_b = (f32[2],(f32[2],f32[2])) parameter(0)
p_b.0 = f32[2] get-tuple-element((f32[2],(f32[2],f32[2])) p_b), index=0
p_b.1 = (f32[2],f32[2]) get-tuple-element((f32[2],(f32[2],f32[2])) p_b), index=1
p_b.1.1 = f32[2] get-tuple-element(p_b.1), index=0
ROOT root = (f32[2],(f32[2],f32[2])) tuple(p_b.1.1, p_b.1)
}
condition {
p_cond = (f32[2],(f32[2],f32[2])) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] constant({1, 2})
const_1 = (f32[2], f32[2]) constant(({2, 1},{3,1}))
while_init = (f32[2],(f32[2],f32[2])) tuple(const_0, const_1)
ROOT while = (f32[2],(f32[2],f32[2])) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::GetTupleElement(op::Constant(), 0),
op::GetTupleElement(op::Parameter(0))));
}
TEST_F(WhileLoopConstantSinkingTest, DuplicateGTEs) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_b = (f32[2],f32[2],f32[2]) parameter(0)
p_b.1 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_b), index=1
p_b.2 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_b), index=2
p_b.2.dup = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_b), index=2
add.0 = f32[2] add(p_b.1, p_b.2.dup)
ROOT root = (f32[2],f32[2],f32[2]) tuple(add.0, p_b.1, p_b.2)
}
condition {
p_cond = (f32[2],f32[2],f32[2]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] constant({1, 2})
const_1 = f32[2] constant({2, 1})
const_2 = f32[2] constant({3, 1})
while_init = (f32[2],f32[2],f32[2]) tuple(const_0, const_1, const_2)
ROOT while = (f32[2],f32[2],f32[2]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(op::Constant(), ::testing::Not(op::Constant())),
op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))));
}
TEST_F(WhileLoopConstantSinkingTest, DontCreateDeadConstant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2],f32[2]) parameter(0)
p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0
p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1
token0 = token[] after-all()
outfeed = token[] outfeed(p_body.0, token0)
ROOT root = (f32[2],f32[2],f32[2]) tuple(p_body.0, p_body.1, p_body.1)
}
condition {
p_cond = (f32[2],f32[2]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] constant({1, 2})
const_1 = f32[2] constant({2, 1})
while_init = (f32[2],f32[2]) tuple(const_0, const_1)
ROOT while = (f32[2],f32[2],f32[2]) while(while_init), condition=condition,
body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::GetTupleElement(), op::GetTupleElement(),
op::GetTupleElement()));
for (const HloInstruction* inst : while_body->instructions()) {
if (inst->opcode() == HloOpcode::kConstant) {
EXPECT_GT(inst->user_count(), 0);
}
}
}
TEST_F(WhileLoopConstantSinkingTest, ConditionalSinkConstant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[],f32[]) parameter(0)
p_body.0 = f32[] get-tuple-element((f32[],f32[]) p_body), index=0
const = f32[] constant(1)
add = f32[] add(p_body.0, const)
p_body.1 = f32[] get-tuple-element((f32[],f32[]) p_body), index=1
ROOT root = (f32[],f32[]) tuple(add, p_body.1)
}
condition {
p_cond = (f32[],f32[]) parameter(0)
p_cond.0 = f32[] get-tuple-element((f32[],f32[]) p_cond), index=0
p_cond.1 = f32[] get-tuple-element((f32[],f32[]) p_cond), index=1
ROOT result = pred[] compare(p_cond.0, p_cond.1), direction=LT
}
ENTRY entry {
const_0 = f32[] constant(0)
const_1 = f32[] constant(10)
while_init = (f32[],f32[]) tuple(const_0, const_1)
ROOT while = (f32[],f32[]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_condition = module->GetComputationWithName("condition");
EXPECT_THAT(while_condition->root_instruction(), op::Lt(_, op::Constant()));
}
TEST_F(WhileLoopConstantSinkingTest, ConditionalTupleShapedConstants) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_b = (f32[],(f32[],f32[])) parameter(0)
p_b.0 = f32[] get-tuple-element((f32[],(f32[],f32[])) p_b), index=0
p_b.1 = (f32[],f32[]) get-tuple-element((f32[],(f32[],f32[])) p_b), index=1
p_b.1.0 = f32[] get-tuple-element((f32[],f32[]) p_b.1), index=0
add = f32[] add(p_b.0, p_b.1.0)
ROOT root = (f32[],(f32[],f32[])) tuple(add, p_b.1)
}
condition {
p_c = (f32[],(f32[],f32[])) parameter(0)
p_c.0 = f32[] get-tuple-element((f32[],(f32[],f32[])) p_c), index=0
p_c.1 = (f32[],f32[]) get-tuple-element((f32[],(f32[],f32[])) p_c), index=1
p_c.1.1 = f32[] get-tuple-element((f32[],f32[]) p_c.1), index=1
ROOT result = pred[] compare(p_c.0, p_c.1.1), direction=LT
}
ENTRY entry {
const_0 = f32[] constant(0)
const_1 = (f32[], f32[]) constant((1, 10))
while_init = (f32[],(f32[],f32[])) tuple(const_0, const_1)
ROOT while = (f32[],(f32[],f32[])) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_condition = module->GetComputationWithName("condition");
EXPECT_THAT(while_condition->root_instruction(),
op::Lt(_, op::GetTupleElement(op::Constant())));
}
TEST_F(WhileLoopConstantSinkingTest, ConditionalDontCreateDeadConstant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[],f32[],f32[]) parameter(0)
p_body.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=0
const = f32[] constant(1)
add = f32[] add(p_body.0, const)
p_body.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=1
p_body.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=2
ROOT root = (f32[],f32[],f32[]) tuple(add, p_body.1, p_body.2)
}
condition {
p_cond = (f32[],f32[],f32[]) parameter(0)
p_cond.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=0
p_cond.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=1
p_cond.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=2
ROOT result = pred[] compare(p_cond.0, p_cond.1), direction=LT
}
ENTRY entry {
const_0 = f32[] constant(0)
const_1 = f32[] constant(10)
const_2 = f32[] constant(12)
while_init = (f32[],f32[],f32[]) tuple(const_0, const_1, const_2)
ROOT while = (f32[],f32[],f32[]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_condition = module->GetComputationWithName("condition");
EXPECT_THAT(while_condition->root_instruction(), op::Lt(_, op::Constant()));
for (const HloInstruction* inst : while_condition->instructions()) {
if (inst->opcode() == HloOpcode::kConstant) {
EXPECT_GT(inst->user_count(), 0);
}
}
}
TEST_F(WhileLoopConstantSinkingTest, ConditionalMultipleSameIndexGTEs) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[],f32[],f32[]) parameter(0)
p_body.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=0
const = f32[] constant(1)
add.0 = f32[] add(p_body.0, const)
p_body.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=1
add.1 = f32[] add(p_body.1, const)
p_body.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=2
ROOT root = (f32[],f32[],f32[]) tuple(add.0, add.1, p_body.2)
}
condition {
p_cond = (f32[],f32[],f32[]) parameter(0)
p_cond.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=0
p_cond.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=2
lt.0 = pred[] compare(p_cond.0, p_cond.2), direction=LT
p_cond.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=1
p_cond.2.c = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=2
lt.1 = pred[] compare(p_cond.1, p_cond.2.c), direction=LT
ROOT result = pred[] and(lt.0, lt.1)
}
ENTRY entry {
const_0 = f32[] constant(0)
const_1 = f32[] constant(0)
const_2 = f32[] constant(12)
while_init = (f32[],f32[],f32[]) tuple(const_0, const_1, const_2)
ROOT while = (f32[],f32[],f32[]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_condition = module->GetComputationWithName("condition");
EXPECT_THAT(while_condition->root_instruction(),
op::And(op::Lt(_, op::Constant()), op::Lt(_, op::Constant())));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_constant_sinking.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_constant_sinking_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6ff23afd-0656-478f-be98-99d1185d6c17 | cpp | tensorflow/tensorflow | in_place_dynamic_update_slice | third_party/xla/xla/service/gpu/fusions/legacy/in_place_dynamic_update_slice.cc | third_party/xla/xla/service/gpu/fusions/legacy/in_place_dynamic_update_slice_test.cc | #include "xla/service/gpu/fusions/legacy/in_place_dynamic_update_slice.h"
#include <optional>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/IRBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/elemental_ir_emitter.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/llvm_ir/dynamic_update_slice_util.h"
#include "xla/service/llvm_ir/fused_ir_emitter.h"
#include "xla/service/llvm_ir/ir_array.h"
namespace xla {
namespace gpu {
namespace {
constexpr int kDUSUpdateIndex = 1;
}
LaunchDimensions InPlaceDynamicUpdateSliceFusion::launch_dimensions() const {
const auto& update_shape = dus_ops_.front().GetOperand(1).shape();
return CalculateLaunchDimensions(update_shape, analysis_.device_info());
}
std::optional<IndexingMap>
InPlaceDynamicUpdateSliceFusion::ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* mlir_context) const {
if (hero_operand_index != kDUSUpdateIndex) {
return std::nullopt;
}
auto launch_dims = launch_dimensions();
const auto& update_shape =
dus_ops_.front().GetOperand(kDUSUpdateIndex).shape();
return GetDefaultThreadIdIndexingMap(launch_dims, 1,
update_shape, mlir_context);
}
absl::Status InPlaceDynamicUpdateSliceFusion::EmitKernel(
IrEmitterContext& ir_emitter_context, const HloFusionInstruction& fusion,
const LaunchDimensions& launch_dims, std::vector<llvm_ir::IrArray> inputs,
std::vector<llvm_ir::IrArray> outputs, llvm::IRBuilder<>* builder) const {
for (auto [op, output] : llvm::zip(dus_ops_, outputs)) {
output = output.CastToShape(op.shape(), builder);
}
auto* fused_computation = fusion.fused_instructions_computation();
GpuElementalIrEmitter elemental_emitter(ir_emitter_context, builder);
FusedIrEmitter fused_emitter(elemental_emitter);
for (auto [index, input] : llvm::enumerate(inputs)) {
auto fused_operand = fused_computation->parameter_instruction(index);
fused_emitter.BindGenerator(
*fused_operand, [input = input, builder,
fused_operand](const llvm_ir::IrArray::Index& index) {
return input.EmitReadArrayElement(index, builder,
fused_operand->name());
});
}
std::vector<std::pair<const HloInstruction*, const llvm_ir::IrArray>>
dus_and_output_array;
dus_and_output_array.reserve(dus_ops_.size());
for (auto [op, output] : llvm::zip(dus_ops_, outputs)) {
dus_and_output_array.push_back(std::make_pair(&op.instruction(), output));
}
return llvm_ir::EmitParallelFusedDynamicUpdateSliceInPlace(
fused_computation, dus_and_output_array, &fused_emitter, launch_dims,
builder);
}
}
} | #include "xla/service/gpu/fusions/legacy/in_place_dynamic_update_slice.h"
#include <optional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/IR/MLIRContext.h"
#include "xla/service/gpu/fusions/fusions.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/indexing_map_serialization.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class InPlaceDynamicUpdateSliceFusionTest : public HloTestBase {
protected:
DebugOptions GetDebugOptionsForTest() override {
auto opts = HloTestBase::GetDebugOptionsForTest();
opts.set_xla_gpu_mlir_emitter_level(0);
return opts;
}
mlir::MLIRContext mlir_context_;
stream_executor::DeviceDescription device_info_ =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
};
TEST_F(InPlaceDynamicUpdateSliceFusionTest, ThreadIndexing) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule module
fused_computation {
in = f32[20,30] parameter(0)
updates = f32[5,6] parameter(1)
i0 = s32[] parameter(2)
i1 = s32[] parameter(3)
ROOT updated = f32[20,30] dynamic-update-slice(in, updates, i0, i1)
}
ENTRY entry {
in = f32[20,30] parameter(0)
updates = f32[5,6] parameter(1)
i0 = s32[] constant(2)
i1 = s32[] constant(3)
ROOT fusion = f32[20,30] fusion(in, updates, i0, i1), kind=kLoop, calls=fused_computation
}
)"));
auto* root = module->entry_computation()->root_instruction();
auto analysis_fused = HloFusionAnalysis::Create(*root, device_info_);
auto emitter =
GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused});
auto fusion = dynamic_cast<InPlaceDynamicUpdateSliceFusion*>(emitter.get());
ASSERT_NE(fusion, nullptr);
auto thread_id_update_indexing = fusion->ComputeThreadIdToInputIndexing(
0, 1, &mlir_context_);
EXPECT_THAT(ToString(*thread_id_update_indexing,
{"th_x", "th_y", "th_z", "bl_x", "bl_y", "bl_z"},
{"chunk_id", "unroll_id"}, {}),
MatchIndexingString(R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (
th_x floordiv 6, th_x mod 6),
domain:
th_x in [0, 29],
th_y in [0, 0],
th_z in [0, 0],
bl_x in [0, 0],
bl_y in [0, 0],
bl_z in [0, 0],
chunk_id in [0, 0],
unroll_id in [0, 0]
)"));
auto thread_id_dst_indexing = fusion->ComputeThreadIdToInputIndexing(
0, 0, &mlir_context_);
EXPECT_THAT(thread_id_dst_indexing, ::testing::Eq(std::nullopt));
}
TEST_F(InPlaceDynamicUpdateSliceFusionTest, ProduceConsumerFusion) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
fused_computation.1 {
param_0 = bf16[1,2,5,1,2] parameter(0)
bitcast = bf16[1,5,1,2,2] bitcast(param_0)
param_1 = bf16[1,1,1,2,2] parameter(1)
param_2 = s32[] parameter(2)
param_3 = s32[] parameter(3)
ROOT dynamic-update-slice = bf16[1,5,1,2,2] dynamic-update-slice(bitcast, param_1, param_2, param_3, param_2, param_2, param_2)
}
ENTRY entry_computation {
param_0.2 = bf16[1,2,5,1,2] parameter(3)
param_1.2 = bf16[1,1,1,2,2] parameter(0)
param_2.2 = s32[] parameter(1)
param_3.2 = s32[] parameter(2)
fusion = bf16[1,5,1,2,2] fusion(param_0.2, param_1.2, param_2.2, param_3.2), kind=kLoop, calls=fused_computation.1
ROOT bitcast.1 = bf16[1,2,5,1,2] bitcast(fusion)
}
)"));
auto* root = module->entry_computation()->root_instruction();
auto analysis_fused =
HloFusionAnalysis::Create(*root->operand(0), *root, device_info_);
auto emitter =
GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused});
auto fusion = dynamic_cast<InPlaceDynamicUpdateSliceFusion*>(emitter.get());
ASSERT_NE(fusion, nullptr);
EXPECT_EQ(fusion->launch_dimensions().launch_bound(), 4 );
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/legacy/in_place_dynamic_update_slice.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/legacy/in_place_dynamic_update_slice_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3d19f3e5-b0b2-4ec8-b860-1a7e27e52728 | cpp | tensorflow/tensorflow | partial_tensor_shape | tensorflow/core/framework/partial_tensor_shape.h | tensorflow/core/framework/partial_tensor_shape_test.cc | #ifndef TENSORFLOW_CORE_FRAMEWORK_PARTIAL_TENSOR_SHAPE_H_
#define TENSORFLOW_CORE_FRAMEWORK_PARTIAL_TENSOR_SHAPE_H_
#include "tensorflow/core/framework/tensor_shape.h"
#endif | #include "tensorflow/core/framework/partial_tensor_shape.h"
#include <limits>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(PartialTensorShapeTest, Default) {
const PartialTensorShape s;
EXPECT_EQ(s.dims(), -1);
EXPECT_TRUE(s.unknown_rank());
}
TEST(PartialTensorShapeTest, Concatenate) {
const PartialTensorShape s({10, 5});
ASSERT_EQ(2, s.dims());
EXPECT_EQ(10, s.dim_size(0));
EXPECT_EQ(5, s.dim_size(1));
EXPECT_EQ(50, s.num_elements());
const auto s1 = s.Concatenate(s);
ASSERT_EQ(4, s1.dims());
EXPECT_EQ(10, s1.dim_size(0));
EXPECT_EQ(5, s1.dim_size(1));
EXPECT_EQ(10, s1.dim_size(2));
EXPECT_EQ(5, s1.dim_size(3));
EXPECT_EQ(50 * 50, s1.num_elements());
const auto s2 = s.Concatenate(-1);
const auto s3 = s2.Concatenate(0);
ASSERT_EQ(3, s2.dims());
ASSERT_EQ(4, s3.dims());
EXPECT_EQ(10, s2.dim_size(0));
EXPECT_EQ(10, s3.dim_size(0));
EXPECT_EQ(5, s2.dim_size(1));
EXPECT_EQ(5, s3.dim_size(1));
EXPECT_EQ(-1, s2.dim_size(2));
EXPECT_EQ(-1, s3.dim_size(2));
EXPECT_EQ(0, s3.dim_size(3));
EXPECT_EQ(-1, s2.num_elements());
EXPECT_EQ(-1, s3.num_elements());
const auto s4 = s.Concatenate(PartialTensorShape());
EXPECT_EQ(-1, s4.dims());
EXPECT_EQ(-1, s4.num_elements());
}
TEST(PartialTensorShapeTest, ConcatenateWithStatus) {
PartialTensorShape s({10, 5, 20});
PartialTensorShape s2;
Status status = s.ConcatenateWithStatus(400, &s2);
EXPECT_TRUE(status.ok());
EXPECT_EQ(s2.num_elements(), 400000);
EXPECT_EQ(s2.dims(), 4);
PartialTensorShape s3;
status = s2.ConcatenateWithStatus(-10, &s3);
EXPECT_TRUE(status.ok());
EXPECT_EQ(s3.num_elements(), -1);
EXPECT_EQ(s3.dims(), 5);
PartialTensorShape s4;
status = s.ConcatenateWithStatus(s, &s4);
EXPECT_TRUE(status.ok());
EXPECT_EQ(s4.num_elements(), 1000000);
EXPECT_EQ(s4.dims(), 6);
PartialTensorShape s5;
status = s5.ConcatenateWithStatus(s5, &s4);
EXPECT_TRUE(status.ok());
}
TEST(PartialTensorShapeTest, PartialTensorShapeIsValid) {
PartialTensorShape s({10, 5, 20});
EXPECT_TRUE(s.IsValid());
PartialTensorShape s2({-1, 5, 20});
EXPECT_TRUE(s2.IsValid());
PartialTensorShape s3;
EXPECT_FALSE(s3.IsValid());
PartialTensorShape s4(s3.AsProto());
EXPECT_FALSE(s4.IsValid());
}
TEST(PartialTensorShapeTest, InvalidShapeProto) {
TensorShapeProto proto;
EXPECT_TRUE(PartialTensorShape::IsValid(proto));
proto.add_dim()->set_size(357);
proto.add_dim()->set_size(982);
EXPECT_TRUE(PartialTensorShape::IsValid(proto));
proto.Clear();
proto.add_dim()->set_size(0);
proto.add_dim()->set_size(-1);
EXPECT_TRUE(PartialTensorShape::IsValid(proto));
proto.Clear();
proto.set_unknown_rank(true);
EXPECT_TRUE(PartialTensorShape::IsValid(proto));
proto.add_dim()->set_size(1);
EXPECT_FALSE(PartialTensorShape::IsValid(proto));
proto.Clear();
proto.add_dim()->set_size(-2);
EXPECT_FALSE(PartialTensorShape::IsValid(proto));
}
TEST(PartialTensorShapeTest, PartialTensorShapeIsValidShape) {
PartialTensorShape s;
TensorShapeProto proto = s.AsProto();
TF_EXPECT_OK(PartialTensorShape::IsValidShape(proto));
proto.add_dim()->set_size(1);
EXPECT_THAT(PartialTensorShape::IsValidShape(proto),
testing::StatusIs(
error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"An unknown shape must not have any dimensions set.")));
proto.set_unknown_rank(false);
proto.add_dim()->set_size(-1);
proto.add_dim()->set_size(-2);
EXPECT_THAT(PartialTensorShape::IsValidShape(proto),
testing::StatusIs(error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"has dimensions with values below -1")));
EXPECT_THAT(TensorShape::IsValidShape(proto),
testing::StatusIs(
error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Shape.*is not fully defined")));
}
TEST(PartialTensorShapeTest, BuildPartialTensorShape) {
PartialTensorShape s;
TensorShapeProto sp = s.AsProto();
PartialTensorShape s2;
TF_EXPECT_OK(PartialTensorShape::BuildPartialTensorShape(sp, &s2));
EXPECT_EQ(s2.AsProto().DebugString(), sp.DebugString());
PartialTensorShape s3({-1, 5, 10});
TensorShapeProto sp3 = s3.AsProto();
PartialTensorShape s4;
TF_EXPECT_OK(PartialTensorShape::BuildPartialTensorShape(sp3, &s4));
EXPECT_EQ(s4.AsProto().DebugString(), sp3.DebugString());
sp3.add_dim()->set_size(std::numeric_limits<int64_t>::max());
EXPECT_THAT(
PartialTensorShape::BuildPartialTensorShape(sp3, &s4),
testing::StatusIs(error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"Encountered overflow when multiplying shape")));
}
TEST(PartialTensorShapeTest, PartialShapeFullyDefined) {
const PartialTensorShape a({-1, 0, 1});
const PartialTensorShape b({1, 0, 1});
const PartialTensorShape c({-1, -1, 1});
const PartialTensorShape d({1, 0});
const PartialTensorShape e({});
const PartialTensorShape f;
EXPECT_FALSE(a.IsFullyDefined());
EXPECT_FALSE(c.IsFullyDefined());
EXPECT_TRUE(b.IsFullyDefined());
EXPECT_TRUE(d.IsFullyDefined());
EXPECT_TRUE(e.IsFullyDefined());
EXPECT_FALSE(f.IsFullyDefined());
}
TEST(PartialTensorShapeTest, ToTensorShape) {
const PartialTensorShape a({});
const PartialTensorShape b({1, 0});
const PartialTensorShape c({-1, 0});
const PartialTensorShape d;
TensorShape full;
EXPECT_TRUE(a.AsTensorShape(&full));
EXPECT_EQ(full.dims(), 0);
EXPECT_TRUE(b.AsTensorShape(&full));
EXPECT_EQ(full.dims(), 2);
EXPECT_EQ(full.dim_size(0), 1);
EXPECT_EQ(full.dim_size(1), 0);
EXPECT_FALSE(c.AsTensorShape(&full));
EXPECT_FALSE(d.AsTensorShape(&full));
}
TEST(PartialTensorShapeTest, PartialShapeIdenticalTo) {
const PartialTensorShape a({-1, 0, 1});
const PartialTensorShape b({1, 0, 1});
const PartialTensorShape c({-1, -1, 1});
const PartialTensorShape d({1, 0});
const PartialTensorShape e({-1, 0, 2});
const PartialTensorShape f({});
const PartialTensorShape g;
std::vector<PartialTensorShape> shapes = {a, b, c, d, e, f, g};
for (int i = 0; i < shapes.size(); ++i) {
for (int j = 0; j <= i; ++j) {
if (i == j) {
EXPECT_TRUE(shapes[i].IsIdenticalTo(shapes[j]));
} else {
EXPECT_FALSE(shapes[i].IsIdenticalTo(shapes[j]));
}
}
}
}
TEST(PartialTensorShapeTest, PartialShapeCompatibleWith) {
const PartialTensorShape a({-1, 0, 1});
const PartialTensorShape b({1, 0, 1});
const PartialTensorShape c({-1, -1, 1});
const PartialTensorShape d({1, 0});
const PartialTensorShape e({-1, 0, 2});
const PartialTensorShape f({});
const PartialTensorShape g;
EXPECT_TRUE(f.IsCompatibleWith(f));
EXPECT_TRUE(a.IsCompatibleWith(b));
EXPECT_TRUE(a.IsCompatibleWith(a));
EXPECT_TRUE(b.IsCompatibleWith(b));
EXPECT_TRUE(a.IsCompatibleWith(c));
EXPECT_TRUE(b.IsCompatibleWith(c));
EXPECT_FALSE(a.IsCompatibleWith(d));
EXPECT_FALSE(b.IsCompatibleWith(d));
EXPECT_FALSE(c.IsCompatibleWith(d));
EXPECT_FALSE(a.IsCompatibleWith(e));
EXPECT_FALSE(b.IsCompatibleWith(e));
EXPECT_FALSE(c.IsCompatibleWith(e));
EXPECT_FALSE(a.IsCompatibleWith(f));
EXPECT_FALSE(b.IsCompatibleWith(f));
EXPECT_FALSE(c.IsCompatibleWith(f));
EXPECT_TRUE(a.IsCompatibleWith(g));
EXPECT_TRUE(g.IsCompatibleWith(a));
EXPECT_TRUE(g.IsCompatibleWith(g));
}
TEST(PartialTensorShapeTest, ShapeCompatibleWith) {
const PartialTensorShape a({-1, 0, 1});
const PartialTensorShape unknown;
TensorShape b({0, 1});
TensorShape c({0, 0, 1});
TensorShape d({1, 0, 1});
TensorShape e({1, 1, 1});
EXPECT_FALSE(a.IsCompatibleWith(b));
EXPECT_TRUE(a.IsCompatibleWith(c));
EXPECT_TRUE(a.IsCompatibleWith(d));
EXPECT_FALSE(a.IsCompatibleWith(e));
EXPECT_TRUE(unknown.IsCompatibleWith(b));
EXPECT_TRUE(unknown.IsCompatibleWith(c));
EXPECT_TRUE(unknown.IsCompatibleWith(d));
EXPECT_TRUE(unknown.IsCompatibleWith(e));
}
TEST(PartialTensorShapeTest, PartialShapeMergeWith) {
const PartialTensorShape a({-1, 0, 1});
const PartialTensorShape b({1, 0, 1});
const PartialTensorShape c({-1, -1, 1});
const PartialTensorShape d({1, 0});
const PartialTensorShape e;
PartialTensorShape test;
EXPECT_EQ(absl::OkStatus(), a.MergeWith(a, &test));
EXPECT_EQ(test.dims(), 3);
EXPECT_EQ(test.dim_size(0), -1);
EXPECT_EQ(test.dim_size(1), 0);
EXPECT_EQ(test.dim_size(2), 1);
test = PartialTensorShape();
EXPECT_EQ(absl::OkStatus(), a.MergeWith(b, &test));
EXPECT_EQ(test.dims(), 3);
EXPECT_EQ(test.dim_size(0), 1);
EXPECT_EQ(test.dim_size(1), 0);
EXPECT_EQ(test.dim_size(2), 1);
test = PartialTensorShape();
EXPECT_TRUE(errors::IsInvalidArgument(a.MergeWith(d, &test)));
test = PartialTensorShape();
EXPECT_EQ(absl::OkStatus(), a.MergeWith(c, &test));
EXPECT_EQ(test.dims(), 3);
EXPECT_EQ(test.dim_size(0), -1);
EXPECT_EQ(test.dim_size(1), 0);
EXPECT_EQ(test.dim_size(2), 1);
test = PartialTensorShape();
EXPECT_EQ(absl::OkStatus(), c.MergeWith(a, &test));
EXPECT_EQ(test.dims(), 3);
EXPECT_EQ(test.dim_size(0), -1);
EXPECT_EQ(test.dim_size(1), 0);
EXPECT_EQ(test.dim_size(2), 1);
test = PartialTensorShape();
EXPECT_EQ(absl::OkStatus(), a.MergeWith(e, &test));
EXPECT_EQ(test.dims(), 3);
EXPECT_EQ(test.dim_size(0), -1);
EXPECT_EQ(test.dim_size(1), 0);
EXPECT_EQ(test.dim_size(2), 1);
test = PartialTensorShape();
EXPECT_EQ(absl::OkStatus(), e.MergeWith(a, &test));
EXPECT_EQ(test.dims(), 3);
EXPECT_EQ(test.dim_size(0), -1);
EXPECT_EQ(test.dim_size(1), 0);
EXPECT_EQ(test.dim_size(2), 1);
}
TEST(PartialTensorShapeTest, PartialShapeMergeWithInvalidData) {
PartialTensorShape a = PartialTensorShape({-1, 0, 1});
const PartialTensorShape b({-1, 0, 2});
const PartialTensorShape c({1, -1, 3});
const PartialTensorShape d({-1, std::numeric_limits<int64_t>::max(), -1});
EXPECT_THAT(a.MergeWith(b, &a),
testing::StatusIs(
error::Code::INTERNAL,
::testing::ContainsRegex("Cannot output result to itself")));
EXPECT_THAT(b.MergeWith(c, &a),
testing::StatusIs(error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"Incompatible shapes during merge")));
EXPECT_THAT(c.MergeWith(d, &a),
testing::StatusIs(error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"Encountered overflow when multiplying")));
}
TEST(PartialTensorShapeTest, MakePartialShapeEmpty) {
const int64_t dims[1] = {};
PartialTensorShape shape;
EXPECT_FALSE(shape.IsFullyDefined());
TF_ASSERT_OK(PartialTensorShape::MakePartialShape(dims, 0, &shape));
EXPECT_TRUE(shape.IsFullyDefined());
}
TEST(PartialTensorShapeTest, MakePartialShapeFull) {
const int64_t dims[3] = {7, -1, 2};
PartialTensorShape shape;
TF_ASSERT_OK(PartialTensorShape::MakePartialShape(dims, 3, &shape));
ASSERT_EQ(shape.dims(), 3);
for (int i = 0; i < 3; i++) {
EXPECT_EQ(shape.dim_size(i), dims[i]);
}
}
TEST(PartialTensorShapeTest, MakePartialShapeInvalid) {
const int64_t dims[3] = {7, -2, 2};
PartialTensorShape shape;
EXPECT_EQ(error::INVALID_ARGUMENT,
PartialTensorShape::MakePartialShape(dims, 3, &shape).code());
}
TEST(PartialTensorShapeUtilsTest, PartialShapeListString) {
PartialTensorShape s({2, 5, 20});
EXPECT_EQ(PartialTensorShapeUtils::PartialShapeListString({s}), "[[2,5,20]]");
PartialTensorShape s2;
PartialTensorShape s3({-1, -1, 10});
EXPECT_EQ(PartialTensorShapeUtils::PartialShapeListString({s, s2, s3}),
"[[2,5,20], <unknown>, [?,?,10]]");
}
TEST(PartialTensorShapeUtilsTest, PartialShapeAreCompatible) {
PartialTensorShape s1a({-1, 5, 20});
PartialTensorShape s1b({2, 5, 20});
PartialTensorShape s2a({-1, -1, 20});
PartialTensorShape s2b({5, 10, 20});
EXPECT_TRUE(PartialTensorShapeUtils::AreCompatible({s1a}, {s1b}));
EXPECT_TRUE(PartialTensorShapeUtils::AreCompatible({s1b}, {s1a}));
EXPECT_TRUE(PartialTensorShapeUtils::AreCompatible({s1a, s2b}, {s1b, s2b}));
EXPECT_FALSE(PartialTensorShapeUtils::AreCompatible({s1a}, {s2a, s1a}));
EXPECT_FALSE(PartialTensorShapeUtils::AreCompatible({s1a, s1b}, {s2a, s2b}));
}
TEST(PartialTensorShapeUtilsTest, PartialShapeAreIdentical) {
PartialTensorShape s1a({-1, 5, 20});
PartialTensorShape s1b({2, 5, 20});
PartialTensorShape s1c({-1, 5, 20});
PartialTensorShape s2a({-1, -1, 20});
PartialTensorShape s2b({5, 10, 20});
EXPECT_TRUE(PartialTensorShapeUtils::AreIdentical({s1a}, {s1a}));
EXPECT_TRUE(PartialTensorShapeUtils::AreIdentical({s1a, s1b}, {s1c, s1b}));
EXPECT_TRUE(PartialTensorShapeUtils::AreIdentical({s1c}, {s1a}));
EXPECT_FALSE(PartialTensorShapeUtils::AreIdentical({s1a}, {s1b}));
EXPECT_FALSE(PartialTensorShapeUtils::AreIdentical({s1a, s2b}, {s1b, s2b}));
EXPECT_FALSE(PartialTensorShapeUtils::AreIdentical({s1a}, {s2a, s1a}));
EXPECT_FALSE(PartialTensorShapeUtils::AreIdentical({s1a, s1b}, {s2a, s2b}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/partial_tensor_shape.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/partial_tensor_shape_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ac538130-9704-4263-b4e3-40f5340a8cbc | cpp | tensorflow/tensorflow | device_set | tensorflow/core/common_runtime/device_set.cc | tensorflow/core/common_runtime/device_set_test.cc | #include "tensorflow/core/common_runtime/device_set.h"
#include <set>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/map_util.h"
namespace tensorflow {
DeviceSet::DeviceSet() = default;
DeviceSet::~DeviceSet() = default;
void DeviceSet::AddDevice(Device* device) {
mutex_lock l(devices_mu_);
devices_.push_back(device);
prioritized_devices_.clear();
prioritized_device_types_.clear();
for (const string& name :
DeviceNameUtils::GetNamesForDeviceMappings(device->parsed_name())) {
device_by_name_.insert({name, device});
}
matching_device_cache_.clear();
}
void DeviceSet::FindMatchingDevices(const DeviceNameUtils::ParsedName& spec,
std::vector<Device*>* devices) const {
{
mutex_lock l(devices_mu_);
auto match = matching_device_cache_.find(spec);
if (match != matching_device_cache_.end()) {
*devices = match->second;
}
}
devices->clear();
for (Device* d : devices_) {
if (DeviceNameUtils::IsCompleteSpecification(spec, d->parsed_name())) {
devices->push_back(d);
}
}
mutex_lock l(devices_mu_);
matching_device_cache_.insert({spec, *devices});
}
Device* DeviceSet::FindDeviceByName(const string& name) const {
return gtl::FindPtrOrNull(device_by_name_, name);
}
int DeviceSet::DeviceTypeOrder(const DeviceType& d) {
return DeviceFactory::DevicePriority(d.type_string());
}
static bool DeviceTypeComparator(const DeviceType& a, const DeviceType& b) {
auto a_priority = DeviceSet::DeviceTypeOrder(a);
auto b_priority = DeviceSet::DeviceTypeOrder(b);
if (a_priority != b_priority) {
return a_priority > b_priority;
}
return StringPiece(a.type()) < StringPiece(b.type());
}
std::vector<DeviceType> DeviceSet::PrioritizedDeviceTypeList() const {
std::vector<DeviceType> result;
std::set<string> seen;
for (Device* d : devices_) {
const auto& t = d->device_type();
if (seen.insert(t).second) {
result.emplace_back(t);
}
}
std::sort(result.begin(), result.end(), DeviceTypeComparator);
return result;
}
void DeviceSet::SortPrioritizedDeviceTypeVector(
PrioritizedDeviceTypeVector* vector) {
if (vector == nullptr) return;
auto device_sort = [](const PrioritizedDeviceTypeVector::value_type& a,
const PrioritizedDeviceTypeVector::value_type& b) {
if (a.second != b.second) {
return a.second > b.second;
}
return DeviceTypeComparator(a.first, b.first);
};
std::sort(vector->begin(), vector->end(), device_sort);
}
void DeviceSet::SortPrioritizedDeviceVector(PrioritizedDeviceVector* vector) {
auto device_sort = [](const std::pair<Device*, int32>& a,
const std::pair<Device*, int32>& b) {
if (a.second != b.second) {
return a.second > b.second;
}
const string& a_type_name = a.first->device_type();
const string& b_type_name = b.first->device_type();
if (a_type_name != b_type_name) {
auto a_priority = DeviceFactory::DevicePriority(a_type_name);
auto b_priority = DeviceFactory::DevicePriority(b_type_name);
if (a_priority != b_priority) {
return a_priority > b_priority;
}
}
if (a.first->IsLocal() != b.first->IsLocal()) {
return a.first->IsLocal();
}
return StringPiece(a.first->name()) < StringPiece(b.first->name());
};
std::sort(vector->begin(), vector->end(), device_sort);
}
namespace {
void UpdatePrioritizedVectors(
const std::vector<Device*>& devices,
PrioritizedDeviceVector* prioritized_devices,
PrioritizedDeviceTypeVector* prioritized_device_types) {
if (prioritized_devices->size() != devices.size()) {
for (Device* d : devices) {
prioritized_devices->emplace_back(
d, DeviceSet::DeviceTypeOrder(DeviceType(d->device_type())));
}
DeviceSet::SortPrioritizedDeviceVector(prioritized_devices);
}
if (prioritized_device_types != nullptr &&
prioritized_device_types->size() != devices.size()) {
std::set<DeviceType> seen;
for (const std::pair<Device*, int32>& p : *prioritized_devices) {
DeviceType t(p.first->device_type());
if (seen.insert(t).second) {
prioritized_device_types->emplace_back(t, p.second);
}
}
}
}
}
const PrioritizedDeviceVector& DeviceSet::prioritized_devices() const {
mutex_lock l(devices_mu_);
UpdatePrioritizedVectors(devices_, &prioritized_devices_,
nullptr);
return prioritized_devices_;
}
const PrioritizedDeviceTypeVector& DeviceSet::prioritized_device_types() const {
mutex_lock l(devices_mu_);
UpdatePrioritizedVectors(devices_, &prioritized_devices_,
&prioritized_device_types_);
return prioritized_device_types_;
}
} | #include "tensorflow/core/common_runtime/device_set.h"
#include <vector>
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static Device* Dev(const char* type, const char* name) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
return new FakeDevice(attr);
}
class DeviceSetTest : public ::testing::Test {
public:
Device* AddDevice(const char* type, const char* name) {
Device* d = Dev(type, name);
owned_.emplace_back(d);
devices_.AddDevice(d);
return d;
}
const DeviceSet& device_set() const { return devices_; }
std::vector<DeviceType> types() const {
return devices_.PrioritizedDeviceTypeList();
}
private:
DeviceSet devices_;
std::vector<std::unique_ptr<Device>> owned_;
};
class DummyFactory : public DeviceFactory {
public:
Status ListPhysicalDevices(std::vector<string>* devices) override {
return absl::OkStatus();
}
Status CreateDevices(const SessionOptions& options, const string& name_prefix,
std::vector<std::unique_ptr<Device>>* devices) override {
return absl::OkStatus();
}
};
REGISTER_LOCAL_DEVICE_FACTORY("d1", DummyFactory);
REGISTER_LOCAL_DEVICE_FACTORY("d2", DummyFactory, 51);
REGISTER_LOCAL_DEVICE_FACTORY("d3", DummyFactory, 49);
TEST_F(DeviceSetTest, PrioritizedDeviceTypeList) {
EXPECT_EQ(50, DeviceSet::DeviceTypeOrder(DeviceType("d1")));
EXPECT_EQ(51, DeviceSet::DeviceTypeOrder(DeviceType("d2")));
EXPECT_EQ(49, DeviceSet::DeviceTypeOrder(DeviceType("d3")));
EXPECT_EQ(std::vector<DeviceType>{}, types());
AddDevice("d1", "/job:a/replica:0/task:0/device:d1:0");
EXPECT_EQ(std::vector<DeviceType>{DeviceType("d1")}, types());
AddDevice("d1", "/job:a/replica:0/task:0/device:d1:1");
EXPECT_EQ(std::vector<DeviceType>{DeviceType("d1")}, types());
AddDevice("d2", "/job:a/replica:0/task:0/device:d2:0");
EXPECT_EQ((std::vector<DeviceType>{DeviceType("d2"), DeviceType("d1")}),
types());
AddDevice("d3", "/job:a/replica:0/task:0/device:d3:0");
EXPECT_EQ((std::vector<DeviceType>{
DeviceType("d2"),
DeviceType("d1"),
DeviceType("d3"),
}),
types());
}
TEST_F(DeviceSetTest, prioritized_devices) {
Device* d1 = AddDevice("d1", "/job:a/replica:0/task:0/device:d1:0");
Device* d2 = AddDevice("d2", "/job:a/replica:0/task:0/device:d2:0");
EXPECT_EQ(device_set().prioritized_devices(),
(PrioritizedDeviceVector{std::make_pair(d2, 51),
std::make_pair(d1, 50)}));
Device* d3 = AddDevice("d3", "/job:a/replica:0/task:0/device:d3:0");
EXPECT_EQ(
device_set().prioritized_devices(),
(PrioritizedDeviceVector{std::make_pair(d2, 51), std::make_pair(d1, 50),
std::make_pair(d3, 49)}));
}
TEST_F(DeviceSetTest, prioritized_device_types) {
AddDevice("d1", "/job:a/replica:0/task:0/device:d1:0");
AddDevice("d2", "/job:a/replica:0/task:0/device:d2:0");
EXPECT_EQ(
device_set().prioritized_device_types(),
(PrioritizedDeviceTypeVector{std::make_pair(DeviceType("d2"), 51),
std::make_pair(DeviceType("d1"), 50)}));
AddDevice("d3", "/job:a/replica:0/task:0/device:d3:0");
EXPECT_EQ(
device_set().prioritized_device_types(),
(PrioritizedDeviceTypeVector{std::make_pair(DeviceType("d2"), 51),
std::make_pair(DeviceType("d1"), 50),
std::make_pair(DeviceType("d3"), 49)}));
}
TEST_F(DeviceSetTest, SortPrioritizedDeviceVector) {
Device* d1_0 = AddDevice("d1", "/job:a/replica:0/task:0/device:d1:0");
Device* d2_0 = AddDevice("d2", "/job:a/replica:0/task:0/device:d2:0");
Device* d3_0 = AddDevice("d3", "/job:a/replica:0/task:0/device:d3:0");
Device* d1_1 = AddDevice("d1", "/job:a/replica:0/task:0/device:d1:1");
Device* d2_1 = AddDevice("d2", "/job:a/replica:0/task:0/device:d2:1");
Device* d3_1 = AddDevice("d3", "/job:a/replica:0/task:0/device:d3:1");
PrioritizedDeviceVector sorted{
std::make_pair(d3_1, 30), std::make_pair(d1_0, 10),
std::make_pair(d2_0, 20), std::make_pair(d3_0, 30),
std::make_pair(d1_1, 20), std::make_pair(d2_1, 10)};
device_set().SortPrioritizedDeviceVector(&sorted);
EXPECT_EQ(sorted, (PrioritizedDeviceVector{
std::make_pair(d3_0, 30), std::make_pair(d3_1, 30),
std::make_pair(d2_0, 20), std::make_pair(d1_1, 20),
std::make_pair(d2_1, 10), std::make_pair(d1_0, 10)}));
}
TEST_F(DeviceSetTest, SortPrioritizedDeviceTypeVector) {
PrioritizedDeviceTypeVector sorted{std::make_pair(DeviceType("d3"), 20),
std::make_pair(DeviceType("d1"), 20),
std::make_pair(DeviceType("d2"), 30)};
device_set().SortPrioritizedDeviceTypeVector(&sorted);
EXPECT_EQ(sorted, (PrioritizedDeviceTypeVector{
std::make_pair(DeviceType("d2"), 30),
std::make_pair(DeviceType("d1"), 20),
std::make_pair(DeviceType("d3"), 20)}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_set.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_set_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3e506691-0765-4cfa-bb60-211272e20c60 | cpp | google/quiche | hpack_output_stream | quiche/http2/hpack/hpack_output_stream.cc | quiche/http2/hpack/hpack_output_stream_test.cc | #include "quiche/http2/hpack/hpack_output_stream.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/http2/hpack/hpack_constants.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace spdy {
HpackOutputStream::HpackOutputStream() : bit_offset_(0) {}
HpackOutputStream::~HpackOutputStream() = default;
void HpackOutputStream::AppendBits(uint8_t bits, size_t bit_size) {
QUICHE_DCHECK_GT(bit_size, 0u);
QUICHE_DCHECK_LE(bit_size, 8u);
QUICHE_DCHECK_EQ(bits >> bit_size, 0);
size_t new_bit_offset = bit_offset_ + bit_size;
if (bit_offset_ == 0) {
QUICHE_DCHECK_LE(bit_size, 8u);
buffer_.append(1, bits << (8 - bit_size));
} else if (new_bit_offset <= 8) {
buffer_.back() |= bits << (8 - new_bit_offset);
} else {
buffer_.back() |= bits >> (new_bit_offset - 8);
buffer_.append(1, bits << (16 - new_bit_offset));
}
bit_offset_ = new_bit_offset % 8;
}
void HpackOutputStream::AppendPrefix(HpackPrefix prefix) {
AppendBits(prefix.bits, prefix.bit_size);
}
void HpackOutputStream::AppendBytes(absl::string_view buffer) {
QUICHE_DCHECK_EQ(bit_offset_, 0u);
buffer_.append(buffer.data(), buffer.size());
}
void HpackOutputStream::AppendUint32(uint32_t I) {
size_t N = 8 - bit_offset_;
uint8_t max_first_byte = static_cast<uint8_t>((1 << N) - 1);
if (I < max_first_byte) {
AppendBits(static_cast<uint8_t>(I), N);
} else {
AppendBits(max_first_byte, N);
I -= max_first_byte;
while ((I & ~0x7f) != 0) {
buffer_.append(1, (I & 0x7f) | 0x80);
I >>= 7;
}
AppendBits(static_cast<uint8_t>(I), 8);
}
QUICHE_DCHECK_EQ(bit_offset_, 0u);
}
std::string* HpackOutputStream::MutableString() {
QUICHE_DCHECK_EQ(bit_offset_, 0u);
return &buffer_;
}
std::string HpackOutputStream::TakeString() {
QUICHE_DCHECK_EQ(bit_offset_, 0u);
std::string out = std::move(buffer_);
buffer_ = {};
bit_offset_ = 0;
return out;
}
std::string HpackOutputStream::BoundedTakeString(size_t max_size) {
if (buffer_.size() > max_size) {
std::string overflow = buffer_.substr(max_size);
buffer_.resize(max_size);
std::string out = std::move(buffer_);
buffer_ = std::move(overflow);
return out;
} else {
return TakeString();
}
}
} | #include "quiche/http2/hpack/hpack_output_stream.h"
#include <cstdint>
#include <string>
#include "quiche/common/platform/api/quiche_test.h"
namespace spdy {
namespace {
TEST(HpackOutputStreamTest, AppendBits) {
HpackOutputStream output_stream;
std::string expected_str;
output_stream.AppendBits(0x1, 1);
expected_str.append(1, 0x00);
expected_str.back() |= (0x1 << 7);
output_stream.AppendBits(0x0, 1);
output_stream.AppendBits(0x3, 2);
*expected_str.rbegin() |= (0x3 << 4);
output_stream.AppendBits(0x0, 2);
output_stream.AppendBits(0x7, 3);
*expected_str.rbegin() |= (0x7 >> 1);
expected_str.append(1, 0x00);
expected_str.back() |= (0x7 << 7);
output_stream.AppendBits(0x0, 7);
std::string str = output_stream.TakeString();
EXPECT_EQ(expected_str, str);
}
std::string EncodeUint32(uint8_t N, uint32_t I) {
HpackOutputStream output_stream;
if (N < 8) {
output_stream.AppendBits(0x00, 8 - N);
}
output_stream.AppendUint32(I);
std::string str = output_stream.TakeString();
return str;
}
TEST(HpackOutputStreamTest, OneByteIntegersEightBitPrefix) {
EXPECT_EQ(std::string("\x00", 1), EncodeUint32(8, 0x00));
EXPECT_EQ("\x7f", EncodeUint32(8, 0x7f));
EXPECT_EQ("\xfe", EncodeUint32(8, 0xfe));
}
TEST(HpackOutputStreamTest, TwoByteIntegersEightBitPrefix) {
EXPECT_EQ(std::string("\xff\x00", 2), EncodeUint32(8, 0xff));
EXPECT_EQ("\xff\x01", EncodeUint32(8, 0x0100));
EXPECT_EQ("\xff\x7f", EncodeUint32(8, 0x017e));
}
TEST(HpackOutputStreamTest, ThreeByteIntegersEightBitPrefix) {
EXPECT_EQ("\xff\x80\x01", EncodeUint32(8, 0x017f));
EXPECT_EQ("\xff\x80\x1e", EncodeUint32(8, 0x0fff));
EXPECT_EQ("\xff\xff\x7f", EncodeUint32(8, 0x40fe));
}
TEST(HpackOutputStreamTest, FourByteIntegersEightBitPrefix) {
EXPECT_EQ("\xff\x80\x80\x01", EncodeUint32(8, 0x40ff));
EXPECT_EQ("\xff\x80\xfe\x03", EncodeUint32(8, 0xffff));
EXPECT_EQ("\xff\xff\xff\x7f", EncodeUint32(8, 0x002000fe));
}
TEST(HpackOutputStreamTest, FiveByteIntegersEightBitPrefix) {
EXPECT_EQ("\xff\x80\x80\x80\x01", EncodeUint32(8, 0x002000ff));
EXPECT_EQ("\xff\x80\xfe\xff\x07", EncodeUint32(8, 0x00ffffff));
EXPECT_EQ("\xff\xff\xff\xff\x7f", EncodeUint32(8, 0x100000fe));
}
TEST(HpackOutputStreamTest, SixByteIntegersEightBitPrefix) {
EXPECT_EQ("\xff\x80\x80\x80\x80\x01", EncodeUint32(8, 0x100000ff));
EXPECT_EQ("\xff\x80\xfe\xff\xff\x0f", EncodeUint32(8, 0xffffffff));
}
TEST(HpackOutputStreamTest, OneByteIntegersOneToSevenBitPrefixes) {
EXPECT_EQ(std::string("\x00", 1), EncodeUint32(7, 0x00));
EXPECT_EQ(std::string("\x00", 1), EncodeUint32(6, 0x00));
EXPECT_EQ(std::string("\x00", 1), EncodeUint32(5, 0x00));
EXPECT_EQ(std::string("\x00", 1), EncodeUint32(4, 0x00));
EXPECT_EQ(std::string("\x00", 1), EncodeUint32(3, 0x00));
EXPECT_EQ(std::string("\x00", 1), EncodeUint32(2, 0x00));
EXPECT_EQ(std::string("\x00", 1), EncodeUint32(1, 0x00));
EXPECT_EQ("\x7e", EncodeUint32(7, 0x7e));
EXPECT_EQ("\x3e", EncodeUint32(6, 0x3e));
EXPECT_EQ("\x1e", EncodeUint32(5, 0x1e));
EXPECT_EQ("\x0e", EncodeUint32(4, 0x0e));
EXPECT_EQ("\x06", EncodeUint32(3, 0x06));
EXPECT_EQ("\x02", EncodeUint32(2, 0x02));
EXPECT_EQ(std::string("\x00", 1), EncodeUint32(1, 0x00));
}
TEST(HpackOutputStreamTest, TwoByteIntegersOneToSevenBitPrefixes) {
EXPECT_EQ(std::string("\x7f\x00", 2), EncodeUint32(7, 0x7f));
EXPECT_EQ(std::string("\x3f\x00", 2), EncodeUint32(6, 0x3f));
EXPECT_EQ(std::string("\x1f\x00", 2), EncodeUint32(5, 0x1f));
EXPECT_EQ(std::string("\x0f\x00", 2), EncodeUint32(4, 0x0f));
EXPECT_EQ(std::string("\x07\x00", 2), EncodeUint32(3, 0x07));
EXPECT_EQ(std::string("\x03\x00", 2), EncodeUint32(2, 0x03));
EXPECT_EQ(std::string("\x01\x00", 2), EncodeUint32(1, 0x01));
EXPECT_EQ("\x7f\x7f", EncodeUint32(7, 0xfe));
EXPECT_EQ("\x3f\x7f", EncodeUint32(6, 0xbe));
EXPECT_EQ("\x1f\x7f", EncodeUint32(5, 0x9e));
EXPECT_EQ("\x0f\x7f", EncodeUint32(4, 0x8e));
EXPECT_EQ("\x07\x7f", EncodeUint32(3, 0x86));
EXPECT_EQ("\x03\x7f", EncodeUint32(2, 0x82));
EXPECT_EQ("\x01\x7f", EncodeUint32(1, 0x80));
}
TEST(HpackOutputStreamTest, ThreeByteIntegersOneToSevenBitPrefixes) {
EXPECT_EQ("\x7f\x80\x01", EncodeUint32(7, 0xff));
EXPECT_EQ("\x3f\x80\x01", EncodeUint32(6, 0xbf));
EXPECT_EQ("\x1f\x80\x01", EncodeUint32(5, 0x9f));
EXPECT_EQ("\x0f\x80\x01", EncodeUint32(4, 0x8f));
EXPECT_EQ("\x07\x80\x01", EncodeUint32(3, 0x87));
EXPECT_EQ("\x03\x80\x01", EncodeUint32(2, 0x83));
EXPECT_EQ("\x01\x80\x01", EncodeUint32(1, 0x81));
EXPECT_EQ("\x7f\xff\x7f", EncodeUint32(7, 0x407e));
EXPECT_EQ("\x3f\xff\x7f", EncodeUint32(6, 0x403e));
EXPECT_EQ("\x1f\xff\x7f", EncodeUint32(5, 0x401e));
EXPECT_EQ("\x0f\xff\x7f", EncodeUint32(4, 0x400e));
EXPECT_EQ("\x07\xff\x7f", EncodeUint32(3, 0x4006));
EXPECT_EQ("\x03\xff\x7f", EncodeUint32(2, 0x4002));
EXPECT_EQ("\x01\xff\x7f", EncodeUint32(1, 0x4000));
}
TEST(HpackOutputStreamTest, FourByteIntegersOneToSevenBitPrefixes) {
EXPECT_EQ("\x7f\x80\x80\x01", EncodeUint32(7, 0x407f));
EXPECT_EQ("\x3f\x80\x80\x01", EncodeUint32(6, 0x403f));
EXPECT_EQ("\x1f\x80\x80\x01", EncodeUint32(5, 0x401f));
EXPECT_EQ("\x0f\x80\x80\x01", EncodeUint32(4, 0x400f));
EXPECT_EQ("\x07\x80\x80\x01", EncodeUint32(3, 0x4007));
EXPECT_EQ("\x03\x80\x80\x01", EncodeUint32(2, 0x4003));
EXPECT_EQ("\x01\x80\x80\x01", EncodeUint32(1, 0x4001));
EXPECT_EQ("\x7f\xff\xff\x7f", EncodeUint32(7, 0x20007e));
EXPECT_EQ("\x3f\xff\xff\x7f", EncodeUint32(6, 0x20003e));
EXPECT_EQ("\x1f\xff\xff\x7f", EncodeUint32(5, 0x20001e));
EXPECT_EQ("\x0f\xff\xff\x7f", EncodeUint32(4, 0x20000e));
EXPECT_EQ("\x07\xff\xff\x7f", EncodeUint32(3, 0x200006));
EXPECT_EQ("\x03\xff\xff\x7f", EncodeUint32(2, 0x200002));
EXPECT_EQ("\x01\xff\xff\x7f", EncodeUint32(1, 0x200000));
}
TEST(HpackOutputStreamTest, FiveByteIntegersOneToSevenBitPrefixes) {
EXPECT_EQ("\x7f\x80\x80\x80\x01", EncodeUint32(7, 0x20007f));
EXPECT_EQ("\x3f\x80\x80\x80\x01", EncodeUint32(6, 0x20003f));
EXPECT_EQ("\x1f\x80\x80\x80\x01", EncodeUint32(5, 0x20001f));
EXPECT_EQ("\x0f\x80\x80\x80\x01", EncodeUint32(4, 0x20000f));
EXPECT_EQ("\x07\x80\x80\x80\x01", EncodeUint32(3, 0x200007));
EXPECT_EQ("\x03\x80\x80\x80\x01", EncodeUint32(2, 0x200003));
EXPECT_EQ("\x01\x80\x80\x80\x01", EncodeUint32(1, 0x200001));
EXPECT_EQ("\x7f\xff\xff\xff\x7f", EncodeUint32(7, 0x1000007e));
EXPECT_EQ("\x3f\xff\xff\xff\x7f", EncodeUint32(6, 0x1000003e));
EXPECT_EQ("\x1f\xff\xff\xff\x7f", EncodeUint32(5, 0x1000001e));
EXPECT_EQ("\x0f\xff\xff\xff\x7f", EncodeUint32(4, 0x1000000e));
EXPECT_EQ("\x07\xff\xff\xff\x7f", EncodeUint32(3, 0x10000006));
EXPECT_EQ("\x03\xff\xff\xff\x7f", EncodeUint32(2, 0x10000002));
EXPECT_EQ("\x01\xff\xff\xff\x7f", EncodeUint32(1, 0x10000000));
}
TEST(HpackOutputStreamTest, SixByteIntegersOneToSevenBitPrefixes) {
EXPECT_EQ("\x7f\x80\x80\x80\x80\x01", EncodeUint32(7, 0x1000007f));
EXPECT_EQ("\x3f\x80\x80\x80\x80\x01", EncodeUint32(6, 0x1000003f));
EXPECT_EQ("\x1f\x80\x80\x80\x80\x01", EncodeUint32(5, 0x1000001f));
EXPECT_EQ("\x0f\x80\x80\x80\x80\x01", EncodeUint32(4, 0x1000000f));
EXPECT_EQ("\x07\x80\x80\x80\x80\x01", EncodeUint32(3, 0x10000007));
EXPECT_EQ("\x03\x80\x80\x80\x80\x01", EncodeUint32(2, 0x10000003));
EXPECT_EQ("\x01\x80\x80\x80\x80\x01", EncodeUint32(1, 0x10000001));
EXPECT_EQ("\x7f\x80\xff\xff\xff\x0f", EncodeUint32(7, 0xffffffff));
EXPECT_EQ("\x3f\xc0\xff\xff\xff\x0f", EncodeUint32(6, 0xffffffff));
EXPECT_EQ("\x1f\xe0\xff\xff\xff\x0f", EncodeUint32(5, 0xffffffff));
EXPECT_EQ("\x0f\xf0\xff\xff\xff\x0f", EncodeUint32(4, 0xffffffff));
EXPECT_EQ("\x07\xf8\xff\xff\xff\x0f", EncodeUint32(3, 0xffffffff));
EXPECT_EQ("\x03\xfc\xff\xff\xff\x0f", EncodeUint32(2, 0xffffffff));
EXPECT_EQ("\x01\xfe\xff\xff\xff\x0f", EncodeUint32(1, 0xffffffff));
}
TEST(HpackOutputStreamTest, AppendUint32PreservesUpperBits) {
HpackOutputStream output_stream;
output_stream.AppendBits(0x7f, 7);
output_stream.AppendUint32(0x01);
std::string str = output_stream.TakeString();
EXPECT_EQ(std::string("\xff\x00", 2), str);
}
TEST(HpackOutputStreamTest, AppendBytes) {
HpackOutputStream output_stream;
output_stream.AppendBytes("buffer1");
output_stream.AppendBytes("buffer2");
std::string str = output_stream.TakeString();
EXPECT_EQ("buffer1buffer2", str);
}
TEST(HpackOutputStreamTest, BoundedTakeString) {
HpackOutputStream output_stream;
output_stream.AppendBytes("buffer12");
output_stream.AppendBytes("buffer456");
std::string str = output_stream.BoundedTakeString(9);
EXPECT_EQ("buffer12b", str);
output_stream.AppendBits(0x7f, 7);
output_stream.AppendUint32(0x11);
str = output_stream.BoundedTakeString(9);
EXPECT_EQ("uffer456\xff", str);
str = output_stream.BoundedTakeString(9);
EXPECT_EQ("\x10", str);
}
TEST(HpackOutputStreamTest, MutableString) {
HpackOutputStream output_stream;
output_stream.AppendBytes("1");
output_stream.MutableString()->append("2");
output_stream.AppendBytes("foo");
output_stream.MutableString()->append("bar");
std::string str = output_stream.TakeString();
EXPECT_EQ("12foobar", str);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/hpack_output_stream.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/hpack_output_stream_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
fc8bfaa4-1e18-4a7e-9928-d841f1cf4f11 | cpp | tensorflow/tensorflow | stablehlo_reduce_window_test_util | tensorflow/lite/kernels/stablehlo_reduce_window_test_util.h | tensorflow/lite/kernels/stablehlo_reduce_window_test_util_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_STABLEHLO_REDUCE_WINDOW_TEST_UTIL_H_
#define TENSORFLOW_LITE_KERNELS_STABLEHLO_REDUCE_WINDOW_TEST_UTIL_H_
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <initializer_list>
#include <numeric>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
namespace tflite {
namespace reduce_window {
namespace reference {
constexpr int kMaxDims = 6;
template <class T>
struct Tensor {
std::vector<int64_t> shape;
std::vector<T> data;
static Tensor<T> FromShape(std::vector<int64_t> shape,
const T init_value = 0) {
Tensor tensor{std::move(shape)};
tensor.data.resize(tensor.size(), init_value);
return tensor;
}
template <class I>
static Tensor<T> iota(std::initializer_list<I> shape) {
Tensor<T> tensor;
tensor.shape.assign(shape.begin(), shape.end());
tensor.data.resize(absl::c_accumulate(shape, 1, std::multiplies<>()));
absl::c_iota(tensor.data, 1);
return tensor;
}
int64_t size() const {
return absl::c_accumulate(shape, 1, std::multiplies<>());
}
std::vector<int64_t> Strides() const {
std::vector<int64_t> strides(kMaxDims, 0);
if (!shape.empty()) {
strides[shape.size() - 1] = 1;
for (size_t i = shape.size() - 1; i > 0; --i) {
strides[i - 1] = shape[i] * strides[i];
}
}
return strides;
}
};
inline std::vector<int64_t> ExtendToMaxDim(std::vector<int64_t> vec,
int64_t val = 0) {
vec.resize(kMaxDims, val);
return vec;
}
inline std::vector<int64_t> DilateShape(std::vector<int64_t> shape,
const std::vector<int64_t> dilations) {
for (size_t i = 0; i < shape.size(); ++i) {
shape[i] = (shape[i] - 1) * dilations[i] + 1;
}
if (absl::c_any_of(shape, [](auto s) { return s <= 0; })) {
absl::c_fill(shape, 0);
}
return shape;
}
template <class T>
Tensor<T> Dilate(const Tensor<T>& input, const std::vector<int64_t>& dilations,
const T padding_value) {
Tensor<T> output =
Tensor<T>::FromShape(DilateShape(input.shape, dilations), padding_value);
if (absl::c_all_of(output.shape, [](auto s) { return s == 0; })) {
return output;
}
const std::vector<int64_t> strides = input.Strides();
const std::vector<int64_t> output_strides = output.Strides();
const std::vector<int64_t> safe_dilations = ExtendToMaxDim(dilations);
const std::vector<int64_t> safe_input_shape = ExtendToMaxDim(input.shape);
int a = 0;
do {
int b = 0;
do {
int c = 0;
do {
int d = 0;
do {
int e = 0;
do {
int f = 0;
do {
const int i_idx = a * strides[0] + b * strides[1] +
c * strides[2] + d * strides[3] +
e * strides[4] + f * strides[5];
const int o_idx = a * safe_dilations[0] * output_strides[0] +
b * safe_dilations[1] * output_strides[1] +
c * safe_dilations[2] * output_strides[2] +
d * safe_dilations[3] * output_strides[3] +
e * safe_dilations[4] * output_strides[4] +
f * safe_dilations[5] * output_strides[5];
output.data[o_idx] = input.data[i_idx];
} while (++f < safe_input_shape[5]);
} while (++e < safe_input_shape[4]);
} while (++d < safe_input_shape[3]);
} while (++c < safe_input_shape[2]);
} while (++b < safe_input_shape[1]);
} while (++a < safe_input_shape[0]);
return output;
}
inline std::vector<int64_t> PadCropShape(std::vector<int64_t> shape,
const std::vector<int64_t> padding) {
for (size_t i = 0; i < shape.size(); ++i) {
shape[i] = shape[i] + padding[2 * i] + padding[2 * i + 1];
}
if (absl::c_any_of(shape, [](auto s) { return s <= 0; })) {
absl::c_fill(shape, 0);
}
return shape;
}
template <class T>
Tensor<T> Pad(const Tensor<T>& input, const std::vector<int64_t>& padding,
const T padding_value) {
std::vector<int64_t> safe_padding(kMaxDims * 2, 0);
absl::c_transform(padding, safe_padding.begin(),
[](int64_t p) { return std::max<int64_t>(p, 0); });
Tensor<T> output = Tensor<T>::FromShape(
PadCropShape(input.shape, safe_padding), padding_value);
if (absl::c_all_of(output.shape, [](auto s) { return s == 0; })) {
return output;
}
const std::vector<int64_t> strides = input.Strides();
const std::vector<int64_t> output_strides = output.Strides();
const std::vector<int64_t> safe_input_shape = ExtendToMaxDim(input.shape);
int a = 0;
do {
int b = 0;
do {
int c = 0;
do {
int d = 0;
do {
int e = 0;
do {
int f = 0;
do {
const int i_idx = a * strides[0] + b * strides[1] +
c * strides[2] + d * strides[3] +
e * strides[4] + f * strides[5];
const int o_idx = (a + safe_padding[0]) * output_strides[0] +
(b + safe_padding[2]) * output_strides[1] +
(c + safe_padding[4]) * output_strides[2] +
(d + safe_padding[6]) * output_strides[3] +
(e + safe_padding[8]) * output_strides[4] +
(f + safe_padding[10]) * output_strides[5];
output.data[o_idx] = input.data[i_idx];
} while (++f < safe_input_shape[5]);
} while (++e < safe_input_shape[4]);
} while (++d < safe_input_shape[3]);
} while (++c < safe_input_shape[2]);
} while (++b < safe_input_shape[1]);
} while (++a < safe_input_shape[0]);
return output;
}
template <class T>
Tensor<T> Crop(const Tensor<T>& input, const std::vector<int64_t>& cropping) {
std::vector<int64_t> safe_cropping(kMaxDims * 2, 0);
absl::c_transform(cropping, safe_cropping.begin(),
[](int64_t p) { return std::min<int64_t>(p, 0); });
Tensor<T> output =
Tensor<T>::FromShape(PadCropShape(input.shape, safe_cropping));
if (absl::c_all_of(output.shape, [](auto s) { return s == 0; })) {
return output;
}
const std::vector<int64_t> strides = input.Strides();
const std::vector<int64_t> output_strides = output.Strides();
const std::vector<int64_t> safe_output_shape = ExtendToMaxDim(output.shape);
int a = 0;
do {
int b = 0;
do {
int c = 0;
do {
int d = 0;
do {
int e = 0;
do {
int f = 0;
do {
const int i_idx = (a - safe_cropping[0]) * strides[0] +
(b - safe_cropping[2]) * strides[1] +
(c - safe_cropping[4]) * strides[2] +
(d - safe_cropping[6]) * strides[3] +
(e - safe_cropping[8]) * strides[4] +
(f - safe_cropping[10]) * strides[5];
const int o_idx = a * output_strides[0] + b * output_strides[1] +
c * output_strides[2] + d * output_strides[3] +
e * output_strides[4] + f * output_strides[5];
output.data[o_idx] = input.data[i_idx];
} while (++f < safe_output_shape[5]);
} while (++e < safe_output_shape[4]);
} while (++d < safe_output_shape[3]);
} while (++c < safe_output_shape[2]);
} while (++b < safe_output_shape[1]);
} while (++a < safe_output_shape[0]);
return output;
}
template <class T>
Tensor<T> WindowCopy(const Tensor<T>& input,
const std::vector<int64_t>& window_dimensions,
const std::vector<int64_t>& window_dilations,
const std::vector<int64_t>& window_offset) {
Tensor<T> output = Tensor<T>::FromShape(window_dimensions);
const std::vector<int64_t> safe_window_dimensions =
ExtendToMaxDim(window_dimensions);
const std::vector<int64_t> safe_window_dilations =
ExtendToMaxDim(window_dilations, 1);
const std::vector<int64_t> safe_window_offset = ExtendToMaxDim(window_offset);
const std::vector<int64_t> strides = input.Strides();
const std::vector<int64_t> output_strides = output.Strides();
int a = 0;
do {
int b = 0;
do {
int c = 0;
do {
int d = 0;
do {
int e = 0;
do {
int f = 0;
do {
const int i_idx =
(a * safe_window_dilations[0] + safe_window_offset[0]) *
strides[0] +
(b * safe_window_dilations[1] + safe_window_offset[1]) *
strides[1] +
(c * safe_window_dilations[2] + safe_window_offset[2]) *
strides[2] +
(d * safe_window_dilations[3] + safe_window_offset[3]) *
strides[3] +
(e * safe_window_dilations[4] + safe_window_offset[4]) *
strides[4] +
(f * safe_window_dilations[5] + safe_window_offset[5]) *
strides[5];
const int o_idx = a * output_strides[0] + b * output_strides[1] +
c * output_strides[2] + d * output_strides[3] +
e * output_strides[4] + f * output_strides[5];
output.data[o_idx] = input.data[i_idx];
} while (++f < safe_window_dimensions[5]);
} while (++e < safe_window_dimensions[4]);
} while (++d < safe_window_dimensions[3]);
} while (++c < safe_window_dimensions[2]);
} while (++b < safe_window_dimensions[1]);
} while (++a < safe_window_dimensions[0]);
return output;
}
inline std::vector<int64_t> ReduceWindowShape(
std::vector<int64_t> shape, const std::vector<int64_t>& base_dilations,
const std::vector<int64_t>& padding,
const std::vector<int64_t>& window_dimensions,
const std::vector<int64_t>& window_dilations,
const std::vector<int64_t>& window_strides) {
const std::vector<int64_t> base_shape =
PadCropShape(DilateShape(shape, base_dilations), padding);
const std::vector<int64_t> dilated_window_dimensions =
DilateShape(window_dimensions, window_dilations);
shape.assign(base_shape.size(), 0);
for (int i = 0; i < base_shape.size(); ++i) {
if (base_shape[i] >= dilated_window_dimensions[i]) {
shape[i] =
(base_shape[i] - dilated_window_dimensions[i]) / window_strides[i] +
1;
}
}
return shape;
}
template <class T, class F>
Tensor<T> ReduceWindow(const Tensor<T>& input,
const std::vector<int64_t>& base_dilations,
const std::vector<int64_t>& padding, const T& init_value,
const std::vector<int64_t>& window_dimensions,
const std::vector<int64_t>& window_dilations,
const std::vector<int64_t>& window_strides, F&& body) {
Tensor<T> output = Tensor<T>::FromShape(
ReduceWindowShape(input.shape, base_dilations, padding, window_dimensions,
window_dilations, window_strides),
init_value);
if (output.data.empty()) {
return output;
}
const std::vector<int64_t> safe_output_shape = ExtendToMaxDim(output.shape);
const std::vector<int64_t> safe_window_strides =
ExtendToMaxDim(window_strides);
const std::vector<int64_t> output_strides = output.Strides();
const Tensor<T> dilated = Dilate<T>(input, base_dilations, init_value);
const Tensor<T> padded = Pad<T>(dilated, padding, init_value);
const Tensor<T> base = Crop<T>(padded, padding);
std::vector<int64_t> output_offsets(6, 0);
std::vector<int64_t> window_offsets(6, 0);
do {
output_offsets[1] = 0;
window_offsets[1] = 0;
do {
output_offsets[2] = 0;
window_offsets[2] = 0;
do {
output_offsets[3] = 0;
window_offsets[3] = 0;
do {
output_offsets[4] = 0;
window_offsets[4] = 0;
do {
output_offsets[5] = 0;
window_offsets[5] = 0;
do {
const int64_t o_idx = output_offsets[0] * output_strides[0] +
output_offsets[1] * output_strides[1] +
output_offsets[2] * output_strides[2] +
output_offsets[3] * output_strides[3] +
output_offsets[4] * output_strides[4] +
output_offsets[5] * output_strides[5];
const Tensor<T> window = WindowCopy(
base, window_dimensions, window_dilations, window_offsets);
if (window.data.empty()) {
output.data[o_idx] = init_value;
} else {
output.data[o_idx] = std::accumulate(
window.data.begin(), window.data.end(), init_value, body);
}
window_offsets[5] += safe_window_strides[5];
} while (++output_offsets[5] < safe_output_shape[5]);
window_offsets[4] += safe_window_strides[4];
} while (++output_offsets[4] < safe_output_shape[4]);
window_offsets[3] += safe_window_strides[3];
} while (++output_offsets[3] < safe_output_shape[3]);
window_offsets[2] += safe_window_strides[2];
} while (++output_offsets[2] < safe_output_shape[2]);
window_offsets[1] += safe_window_strides[1];
} while (++output_offsets[1] < safe_output_shape[1]);
window_offsets[0] += safe_window_strides[0];
} while (++output_offsets[0] < safe_output_shape[0]);
return output;
}
}
}
}
#endif | #include "tensorflow/lite/kernels/stablehlo_reduce_window_test_util.h"
#include <functional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite::reduce_window::reference {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
TEST(ReferenceTest, DilateWorks) {
reference::Tensor<int> input = reference::Tensor<int>::iota({3, 3});
reference::Tensor<int> output =
reference::Dilate(input, {2, 3}, -1);
EXPECT_THAT(output.data, ElementsAreArray({
1, -1, -1, 2, -1, -1, 3,
-1, -1, -1, -1, -1, -1, -1,
4, -1, -1, 5, -1, -1, 6,
-1, -1, -1, -1, -1, -1, -1,
7, -1, -1, 8, -1, -1, 9
}));
}
TEST(ReferenceTest, PadWorks) {
reference::Tensor<int> input = reference::Tensor<int>::iota({3, 3});
reference::Tensor<int> output =
reference::Pad(input, {1, 2, 3, 4}, -1);
EXPECT_THAT(output.data,
ElementsAreArray({
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, 1, 2, 3, -1, -1, -1, -1,
-1, -1, -1, 4, 5, 6, -1, -1, -1, -1,
-1, -1, -1, 7, 8, 9, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
}));
}
TEST(ReferenceTest, PadIgnoresNegativeValues) {
reference::Tensor<int> input = reference::Tensor<int>::iota({3, 3});
reference::Tensor<int> output =
reference::Pad(input, {-1, -1, -1, -1}, -1);
EXPECT_THAT(output.data, ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9}));
}
TEST(ReferenceTest, CropWorks) {
reference::Tensor<int> input =
reference::Tensor<int>::iota({6, 10});
reference::Tensor<int> output =
reference::Crop(input, {-4, -1, -2, -3});
EXPECT_THAT(output.data, ElementsAreArray({43, 44, 45, 46, 47}));
}
TEST(ReferenceTest, CropIgnoresPositiveValues) {
reference::Tensor<int> input = reference::Tensor<int>::iota({3, 3});
reference::Tensor<int> output =
reference::Crop(input, {0, 0, 0, 0});
EXPECT_THAT(output.data, ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9}));
}
TEST(ReferenceTest, WindowCopyWorks) {
reference::Tensor<int> input = reference::Tensor<int>::iota({6, 4});
EXPECT_THAT(reference::WindowCopy(input, {2, 2},
{2, 2},
{2, 1})
.data,
ElementsAreArray({10, 12, 18, 20}));
}
TEST(ReferenceTest, RandomJaxReference0) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, -1, 0, 0},
0,
{1, 2},
{2, 2},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(19, 8));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 4, 6, 8, 10, 12, 14, 16, 18,
0, 0, 0, 0, 0, 0, 0, 0, 24, 26, 28, 30, 32, 34, 36, 38,
0, 0, 0, 0, 0, 0, 0, 0, 44, 46, 48, 50, 52, 54, 56, 58,
0, 0, 0, 0, 0, 0, 0, 0, 64, 66, 68, 70, 72, 74, 76, 78,
0, 0, 0, 0, 0, 0, 0, 0, 84, 86, 88, 90, 92, 94, 96, 98,
0, 0, 0, 0, 0, 0, 0, 0, 104, 106, 108, 110, 112, 114, 116, 118,
0, 0, 0, 0, 0, 0, 0, 0, 124, 126, 128, 130, 132, 134, 136, 138,
0, 0, 0, 0, 0, 0, 0, 0, 144, 146, 148, 150, 152, 154, 156, 158,
0, 0, 0, 0, 0, 0, 0, 0, 164, 166, 168, 170, 172, 174, 176, 178,
0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference1) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, 1, 0},
0,
{1, 2},
{1, 2},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(6, 18));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 3, 0, 5, 0, 7, 0, 9, 0, 11,
0, 13, 0, 15, 0, 17, 0, 19, 0, 43, 0, 45, 0, 47,
0, 49, 0, 51, 0, 53, 0, 55, 0, 57, 0, 59, 0, 83,
0, 85, 0, 87, 0, 89, 0, 91, 0, 93, 0, 95, 0, 97,
0, 99, 0, 123, 0, 125, 0, 127, 0, 129, 0, 131, 0, 133,
0, 135, 0, 137, 0, 139, 0, 163, 0, 165, 0, 167, 0, 169,
0, 171, 0, 173, 0, 175, 0, 177, 0, 179}));
}
TEST(ReferenceTest, RandomJaxReference2) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{2, -2, -2, 2},
-2147483647,
{2, 2},
{2, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 4));
EXPECT_THAT(res.data,
ElementsAreArray({5, 7, 9, 9, 15, 17, 19, 19, 25, 27, 29,
29, 35, 37, 39, 39, 45, 47, 49, 49, 55, 57,
59, 59, 65, 67, 69, 69, 75, 77, 79, 79}));
}
TEST(ReferenceTest, RandomJaxReference3) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, 1, -1, 1},
-2147483647,
{1, 1},
{1, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(6, 19));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, 2, -2147483647, 3, -2147483647,
4, -2147483647, 5, -2147483647, 6,
-2147483647, 7, -2147483647, 8, -2147483647,
9, -2147483647, 10, -2147483647, -2147483647,
22, -2147483647, 23, -2147483647, 24,
-2147483647, 25, -2147483647, 26, -2147483647,
27, -2147483647, 28, -2147483647, 29,
-2147483647, 30, -2147483647, -2147483647, 42,
-2147483647, 43, -2147483647, 44, -2147483647,
45, -2147483647, 46, -2147483647, 47,
-2147483647, 48, -2147483647, 49, -2147483647,
50, -2147483647, -2147483647, 62, -2147483647,
63, -2147483647, 64, -2147483647, 65,
-2147483647, 66, -2147483647, 67, -2147483647,
68, -2147483647, 69, -2147483647, 70,
-2147483647, -2147483647, 82, -2147483647, 83,
-2147483647, 84, -2147483647, 85, -2147483647,
86, -2147483647, 87, -2147483647, 88,
-2147483647, 89, -2147483647, 90, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference4) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, -2, -1, -2},
0,
{1, 2},
{1, 2},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(3, 3));
EXPECT_THAT(res.data,
ElementsAreArray({46, 50, 54, 86, 90, 94, 126, 130, 134}));
}
TEST(ReferenceTest, RandomJaxReference5) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, 2, 1, 1},
1,
{2, 1},
{2, 2},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(11, 6));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 12, 14, 16, 18, 20, 1, 44, 96, 156, 224,
300, 1, 384, 476, 576, 684, 800, 1, 924, 1056, 1196,
1344, 1500, 1, 1664, 1836, 2016, 2204, 2400, 1, 2604, 2816,
3036, 3264, 3500, 1, 3744, 3996, 4256, 4524, 4800, 1, 5084,
5376, 5676, 5984, 6300, 1, 6624, 6956, 7296, 7644, 8000, 1,
82, 84, 86, 88, 90, 1, 92, 94, 96, 98, 100}));
}
TEST(ReferenceTest, RandomJaxReference6) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{2, -1, 0, -2},
-2147483647,
{2, 1},
{2, 1},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 17));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, -2147483647, 2, -2147483647, 3,
-2147483647, 4, -2147483647, 5, -2147483647,
6, -2147483647, 7, -2147483647, 8,
-2147483647, 9, 11, -2147483647, 12,
-2147483647, 13, -2147483647, 14, -2147483647,
15, -2147483647, 16, -2147483647, 17,
-2147483647, 18, -2147483647, 19, 21,
-2147483647, 22, -2147483647, 23, -2147483647,
24, -2147483647, 25, -2147483647, 26,
-2147483647, 27, -2147483647, 28, -2147483647,
29, 31, -2147483647, 32, -2147483647,
33, -2147483647, 34, -2147483647, 35,
-2147483647, 36, -2147483647, 37, -2147483647,
38, -2147483647, 39, 41, -2147483647,
42, -2147483647, 43, -2147483647, 44,
-2147483647, 45, -2147483647, 46, -2147483647,
47, -2147483647, 48, -2147483647, 49,
51, -2147483647, 52, -2147483647, 53,
-2147483647, 54, -2147483647, 55, -2147483647,
56, -2147483647, 57, -2147483647, 58,
-2147483647, 59, 61, -2147483647, 62,
-2147483647, 63, -2147483647, 64, -2147483647,
65, -2147483647, 66, -2147483647, 67,
-2147483647, 68, -2147483647, 69, 71,
-2147483647, 72, -2147483647, 73, -2147483647,
74, -2147483647, 75, -2147483647, 76,
-2147483647, 77, -2147483647, 78, -2147483647,
79, 81, -2147483647, 82, -2147483647,
83, -2147483647, 84, -2147483647, 85,
-2147483647, 86, -2147483647, 87, -2147483647,
88, -2147483647, 89}));
}
TEST(ReferenceTest, RandomJaxReference7) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, -2, 1, 0},
0,
{1, 1},
{1, 1},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(3, 11));
EXPECT_THAT(res.data,
ElementsAreArray({0, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
0, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
0, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70}));
}
TEST(ReferenceTest, RandomJaxReference8) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{2, 1, -2, -2},
-2147483647,
{1, 2},
{1, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(13, 3));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 4, 6, 8, 14,
16, 18, 24, 26, 28,
34, 36, 38, 44, 46,
48, 54, 56, 58, 64,
66, 68, 74, 76, 78,
84, 86, 88, 94, 96,
98, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference9) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-1, 2, -2, -2},
-2147483647,
{2, 2},
{2, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 7));
EXPECT_THAT(res.data, ElementsAreArray(
{32, 33, 34, 35, 36, 37, 38, 42, 43, 44, 45, 46, 47,
48, 52, 53, 54, 55, 56, 57, 58, 62, 63, 64, 65, 66,
67, 68, 72, 73, 74, 75, 76, 77, 78, 82, 83, 84, 85,
86, 87, 88, 92, 93, 94, 95, 96, 97, 98, 82, 83, 84,
85, 86, 87, 88, 92, 93, 94, 95, 96, 97, 98}));
}
TEST(ReferenceTest, RandomJaxReference10) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, -1, 0, 2},
0,
{2, 2},
{1, 1},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(17, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 30, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
57, 58, 59, 60, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
63, 64, 65, 66, 67, 68, 69, 70, 61, 62, 63, 64, 65, 66, 67, 68,
69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 71, 72, 73, 74,
75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90}));
}
TEST(ReferenceTest, RandomJaxReference11) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 0, 2, 0},
0,
{2, 1},
{2, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(4, 6));
EXPECT_THAT(res.data,
ElementsAreArray({0, 22, 26, 30, 34, 38, 0, 62,
66, 70, 74, 78, 0, 102, 106, 110,
114, 118, 0, 142, 146, 150, 154, 158}));
}
TEST(ReferenceTest, RandomJaxReference12) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, -2, 1, -2},
-2147483647,
{1, 1},
{2, 2},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference13) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, 2, 1, -2},
-2147483647,
{1, 1},
{1, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(13, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 2, 4, 6, 8,
-2147483647, 12, 14, 16, 18,
-2147483647, 22, 24, 26, 28,
-2147483647, 32, 34, 36, 38,
-2147483647, 42, 44, 46, 48,
-2147483647, 52, 54, 56, 58,
-2147483647, 62, 64, 66, 68,
-2147483647, 72, 74, 76, 78,
-2147483647, 82, 84, 86, 88,
-2147483647, 92, 94, 96, 98,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference14) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, 2, 1, -1},
1,
{1, 2},
{2, 1},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(11, 9));
EXPECT_THAT(res.data,
ElementsAreArray(
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference15) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, -2, 1, 2},
2147483646,
{1, 2},
{2, 1},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(3, 11));
EXPECT_THAT(
res.data,
ElementsAreArray({21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 2147483646,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 2147483646,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference16) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, 0, 0, 0},
2147483646,
{2, 1},
{1, 1},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(5, 19));
EXPECT_THAT(res.data,
ElementsAreArray(
{1, 2147483646, 2, 2147483646, 3,
2147483646, 4, 2147483646, 5, 2147483646,
6, 2147483646, 7, 2147483646, 8,
2147483646, 9, 2147483646, 10, 21,
2147483646, 22, 2147483646, 23, 2147483646,
24, 2147483646, 25, 2147483646, 26,
2147483646, 27, 2147483646, 28, 2147483646,
29, 2147483646, 30, 41, 2147483646,
42, 2147483646, 43, 2147483646, 44,
2147483646, 45, 2147483646, 46, 2147483646,
47, 2147483646, 48, 2147483646, 49,
2147483646, 50, 61, 2147483646, 62,
2147483646, 63, 2147483646, 64, 2147483646,
65, 2147483646, 66, 2147483646, 67,
2147483646, 68, 2147483646, 69, 2147483646,
70, 81, 2147483646, 82, 2147483646,
83, 2147483646, 84, 2147483646, 85,
2147483646, 86, 2147483646, 87, 2147483646,
88, 2147483646, 89, 2147483646, 90}));
}
TEST(ReferenceTest, RandomJaxReference17) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, 2, 1},
2147483646,
{2, 2},
{1, 2},
{1, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 20));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
1, 2147483646, 1, 2147483646, 2,
2147483646, 3, 2147483646, 4, 2147483646,
5, 2147483646, 6, 2147483646, 7,
2147483646, 8, 2147483646, 9, 2147483646,
1, 2147483646, 1, 2147483646, 2,
2147483646, 3, 2147483646, 4, 2147483646,
5, 2147483646, 6, 2147483646, 7,
2147483646, 8, 2147483646, 9, 2147483646,
11, 2147483646, 11, 2147483646, 12,
2147483646, 13, 2147483646, 14, 2147483646,
15, 2147483646, 16, 2147483646, 17,
2147483646, 18, 2147483646, 19, 2147483646,
21, 2147483646, 21, 2147483646, 22,
2147483646, 23, 2147483646, 24, 2147483646,
25, 2147483646, 26, 2147483646, 27,
2147483646, 28, 2147483646, 29, 2147483646,
31, 2147483646, 31, 2147483646, 32,
2147483646, 33, 2147483646, 34, 2147483646,
35, 2147483646, 36, 2147483646, 37,
2147483646, 38, 2147483646, 39, 2147483646,
41, 2147483646, 41, 2147483646, 42,
2147483646, 43, 2147483646, 44, 2147483646,
45, 2147483646, 46, 2147483646, 47,
2147483646, 48, 2147483646, 49, 2147483646,
51, 2147483646, 51, 2147483646, 52,
2147483646, 53, 2147483646, 54, 2147483646,
55, 2147483646, 56, 2147483646, 57,
2147483646, 58, 2147483646, 59, 2147483646,
61, 2147483646, 61, 2147483646, 62,
2147483646, 63, 2147483646, 64, 2147483646,
65, 2147483646, 66, 2147483646, 67,
2147483646, 68, 2147483646, 69, 2147483646,
71, 2147483646, 71, 2147483646, 72,
2147483646, 73, 2147483646, 74, 2147483646,
75, 2147483646, 76, 2147483646, 77,
2147483646, 78, 2147483646, 79, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference18) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{1, -2, -1, 0},
1,
{1, 1},
{1, 2},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(9, 18));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 2, 1, 3, 1, 4, 1, 5, 1, 6, 1, 7, 1, 8, 1, 9, 1, 10,
1, 12, 1, 13, 1, 14, 1, 15, 1, 16, 1, 17, 1, 18, 1, 19, 1, 20,
1, 22, 1, 23, 1, 24, 1, 25, 1, 26, 1, 27, 1, 28, 1, 29, 1, 30,
1, 32, 1, 33, 1, 34, 1, 35, 1, 36, 1, 37, 1, 38, 1, 39, 1, 40,
1, 42, 1, 43, 1, 44, 1, 45, 1, 46, 1, 47, 1, 48, 1, 49, 1, 50,
1, 52, 1, 53, 1, 54, 1, 55, 1, 56, 1, 57, 1, 58, 1, 59, 1, 60,
1, 62, 1, 63, 1, 64, 1, 65, 1, 66, 1, 67, 1, 68, 1, 69, 1, 70,
1, 72, 1, 73, 1, 74, 1, 75, 1, 76, 1, 77, 1, 78, 1, 79, 1, 80}));
}
TEST(ReferenceTest, RandomJaxReference19) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, 0, 0, -1},
2147483646,
{2, 1},
{1, 1},
{1, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 9));
EXPECT_THAT(res.data, ElementsAreArray(
{1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4,
5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18,
19, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32, 33,
34, 35, 36, 37, 38, 39, 41, 42, 43, 44, 45, 46, 47,
48, 49, 51, 52, 53, 54, 55, 56, 57, 58, 59, 61, 62,
63, 64, 65, 66, 67, 68, 69, 71, 72, 73, 74, 75, 76,
77, 78, 79, 81, 82, 83, 84, 85, 86, 87, 88, 89}));
}
TEST(ReferenceTest, RandomJaxReference20) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, 2, 1, -1},
2147483646,
{2, 2},
{1, 1},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 2, 4, 6, 8, 11, 12,
14, 16, 18, 21, 22, 24, 26,
28, 31, 32, 34, 36, 38, 41,
42, 44, 46, 48, 51, 52, 54,
56, 58, 61, 62, 64, 66, 68,
71, 72, 74, 76, 78, 81, 82,
84, 86, 88, 91, 92, 94, 96,
98, 2147483646, 2147483646, 2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference21) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{1, 0, 1, -1},
0,
{2, 2},
{1, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(5, 9));
EXPECT_THAT(res.data,
ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9,
32, 34, 36, 38, 40, 42, 44, 46, 48,
72, 74, 76, 78, 80, 82, 84, 86, 88,
112, 114, 116, 118, 120, 122, 124, 126, 128,
152, 154, 156, 158, 160, 162, 164, 166, 168}));
}
TEST(ReferenceTest, RandomJaxReference22) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 2, -2, -2},
-2147483647,
{1, 2},
{1, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 7));
EXPECT_THAT(
res.data,
ElementsAreArray(
{23, 24, 25, 26, 27,
28, 29, 33, 34, 35,
36, 37, 38, 39, 43,
44, 45, 46, 47, 48,
49, 53, 54, 55, 56,
57, 58, 59, 63, 64,
65, 66, 67, 68, 69,
73, 74, 75, 76, 77,
78, 79, 83, 84, 85,
86, 87, 88, 89, 93,
94, 95, 96, 97, 98,
99, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference23) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{2, -2, 2, 0},
0,
{1, 2},
{1, 1},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(10, 11));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 5, 7, 9,
11, 13, 15, 17, 19, 0, 11, 23, 25, 27, 29, 31, 33, 35,
37, 39, 0, 21, 43, 45, 47, 49, 51, 53, 55, 57, 59, 0,
31, 63, 65, 67, 69, 71, 73, 75, 77, 79, 0, 41, 83, 85,
87, 89, 91, 93, 95, 97, 99, 0, 51, 103, 105, 107, 109, 111,
113, 115, 117, 119, 0, 61, 123, 125, 127, 129, 131, 133, 135, 137,
139, 0, 71, 143, 145, 147, 149, 151, 153, 155, 157, 159}));
}
TEST(ReferenceTest, RandomJaxReference24) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{2, 2, -2, -2},
0,
{2, 1},
{2, 2},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(11, 6));
EXPECT_THAT(
res.data,
ElementsAreArray({3, 4, 5, 6, 7, 8, 16, 18, 20, 22, 24,
26, 36, 38, 40, 42, 44, 46, 56, 58, 60, 62,
64, 66, 76, 78, 80, 82, 84, 86, 96, 98, 100,
102, 104, 106, 116, 118, 120, 122, 124, 126, 136, 138,
140, 142, 144, 146, 156, 158, 160, 162, 164, 166, 176,
178, 180, 182, 184, 186, 93, 94, 95, 96, 97, 98}));
}
TEST(ReferenceTest, RandomJaxReference25) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{2, -1, 2, 2},
1,
{2, 1},
{1, 1},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(10, 14));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 1,
1, 1, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 1,
1, 1, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 1, 1,
1, 1, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 1, 1,
1, 1, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 1, 1,
1, 1, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 1, 1,
1, 1, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 1, 1,
1, 1, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 1, 1,
1, 1, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference26) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-1, 1, -1, -2},
-2147483647,
{2, 2},
{2, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(17, 7));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference27) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, -2, 2, -2},
0,
{2, 1},
{1, 2},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(8, 5));
EXPECT_THAT(res.data,
ElementsAreArray({0, 1, 3, 5, 7, 0, 12, 16, 20, 24,
0, 32, 36, 40, 44, 0, 52, 56, 60, 64,
0, 72, 76, 80, 84, 0, 92, 96, 100, 104,
0, 112, 116, 120, 124, 0, 132, 136, 140, 144}));
}
TEST(ReferenceTest, RandomJaxReference28) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, -2, 0, -2},
1,
{1, 1},
{2, 1},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(6, 8));
EXPECT_THAT(res.data, ElementsAreArray(
{21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 41, 42, 43, 44, 45, 46, 47, 48,
51, 52, 53, 54, 55, 56, 57, 58, 61, 62, 63, 64,
65, 66, 67, 68, 71, 72, 73, 74, 75, 76, 77, 78}));
}
TEST(ReferenceTest, RandomJaxReference29) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-1, -1, 2, 0},
2147483646,
{1, 1},
{1, 1},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(4, 21));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 11, 2147483646, 12,
2147483646, 13, 2147483646, 14, 2147483646,
15, 2147483646, 16, 2147483646, 17,
2147483646, 18, 2147483646, 19, 2147483646,
20, 2147483646, 2147483646, 31, 2147483646,
32, 2147483646, 33, 2147483646, 34,
2147483646, 35, 2147483646, 36, 2147483646,
37, 2147483646, 38, 2147483646, 39,
2147483646, 40, 2147483646, 2147483646, 51,
2147483646, 52, 2147483646, 53, 2147483646,
54, 2147483646, 55, 2147483646, 56,
2147483646, 57, 2147483646, 58, 2147483646,
59, 2147483646, 60, 2147483646, 2147483646,
71, 2147483646, 72, 2147483646, 73,
2147483646, 74, 2147483646, 75, 2147483646,
76, 2147483646, 77, 2147483646, 78,
2147483646, 79, 2147483646, 80}));
}
TEST(ReferenceTest, RandomJaxReference30) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, 1, -2, -1},
0,
{1, 1},
{1, 2},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(10, 4));
EXPECT_THAT(res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference31) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 1, -1, -2},
-2147483647,
{1, 1},
{2, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(5, 16));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, 22, -2147483647, 23, -2147483647,
24, -2147483647, 25, -2147483647, 26,
-2147483647, 27, -2147483647, 28, -2147483647,
29, -2147483647, 42, -2147483647, 43,
-2147483647, 44, -2147483647, 45, -2147483647,
46, -2147483647, 47, -2147483647, 48,
-2147483647, 49, -2147483647, 62, -2147483647,
63, -2147483647, 64, -2147483647, 65,
-2147483647, 66, -2147483647, 67, -2147483647,
68, -2147483647, 69, -2147483647, 82,
-2147483647, 83, -2147483647, 84, -2147483647,
85, -2147483647, 86, -2147483647, 87,
-2147483647, 88, -2147483647, 89, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference32) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, 2, -1, 0},
0,
{2, 1},
{2, 2},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(9, 5));
EXPECT_THAT(res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference33) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{1, -1, 2, 1},
-2147483647,
{2, 2},
{2, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(17, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
11, 12, 13, 14, 15,
16, 17, 18, 19, 20,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
21, 22, 23, 24, 25,
26, 27, 28, 29, 30,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
31, 32, 33, 34, 35,
36, 37, 38, 39, 40,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
41, 42, 43, 44, 45,
46, 47, 48, 49, 50,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
51, 52, 53, 54, 55,
56, 57, 58, 59, 60,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
61, 62, 63, 64, 65,
66, 67, 68, 69, 70,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
71, 72, 73, 74, 75,
76, 77, 78, 79, 80,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
81, 82, 83, 84, 85,
86, 87, 88, 89, 90,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference34) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 2, 2, 2},
0,
{1, 2},
{1, 2},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(12, 12));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 2, 4, 6, 8, 10, 12, 14, 16, 18, 9, 10, 11, 12,
24, 26, 28, 30, 32, 34, 36, 38, 19, 20, 21, 22, 44, 46,
48, 50, 52, 54, 56, 58, 29, 30, 31, 32, 64, 66, 68, 70,
72, 74, 76, 78, 39, 40, 41, 42, 84, 86, 88, 90, 92, 94,
96, 98, 49, 50, 51, 52, 104, 106, 108, 110, 112, 114, 116, 118,
59, 60, 61, 62, 124, 126, 128, 130, 132, 134, 136, 138, 69, 70,
71, 72, 144, 146, 148, 150, 152, 154, 156, 158, 79, 80, 81, 82,
164, 166, 168, 170, 172, 174, 176, 178, 89, 90, 91, 92, 184, 186,
188, 190, 192, 194, 196, 198, 99, 100, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference35) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{1, 2, 1, -1},
0,
{2, 2},
{2, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(6, 9));
EXPECT_THAT(
res.data,
ElementsAreArray({11, 12, 13, 14, 15, 16, 17, 18, 19, 42, 44,
46, 48, 50, 52, 54, 56, 58, 82, 84, 86, 88,
90, 92, 94, 96, 98, 122, 124, 126, 128, 130, 132,
134, 136, 138, 162, 164, 166, 168, 170, 172, 174, 176,
178, 91, 92, 93, 94, 95, 96, 97, 98, 99}));
}
TEST(ReferenceTest, RandomJaxReference36) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{2, 2, 2, 1},
-2147483647,
{2, 1},
{2, 1},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 22));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, 1, -2147483647, 2,
-2147483647, 3, -2147483647, 4, -2147483647,
5, -2147483647, 6, -2147483647, 7,
-2147483647, 8, -2147483647, 9, -2147483647,
10, -2147483647, -2147483647, -2147483647, 11,
-2147483647, 12, -2147483647, 13, -2147483647,
14, -2147483647, 15, -2147483647, 16,
-2147483647, 17, -2147483647, 18, -2147483647,
19, -2147483647, 20, -2147483647, -2147483647,
-2147483647, 21, -2147483647, 22, -2147483647,
23, -2147483647, 24, -2147483647, 25,
-2147483647, 26, -2147483647, 27, -2147483647,
28, -2147483647, 29, -2147483647, 30,
-2147483647, -2147483647, -2147483647, 31, -2147483647,
32, -2147483647, 33, -2147483647, 34,
-2147483647, 35, -2147483647, 36, -2147483647,
37, -2147483647, 38, -2147483647, 39,
-2147483647, 40, -2147483647, -2147483647, -2147483647,
41, -2147483647, 42, -2147483647, 43,
-2147483647, 44, -2147483647, 45, -2147483647,
46, -2147483647, 47, -2147483647, 48,
-2147483647, 49, -2147483647, 50, -2147483647,
-2147483647, -2147483647, 51, -2147483647, 52,
-2147483647, 53, -2147483647, 54, -2147483647,
55, -2147483647, 56, -2147483647, 57,
-2147483647, 58, -2147483647, 59, -2147483647,
60, -2147483647, -2147483647, -2147483647, 61,
-2147483647, 62, -2147483647, 63, -2147483647,
64, -2147483647, 65, -2147483647, 66,
-2147483647, 67, -2147483647, 68, -2147483647,
69, -2147483647, 70, -2147483647, -2147483647,
-2147483647, 71, -2147483647, 72, -2147483647,
73, -2147483647, 74, -2147483647, 75,
-2147483647, 76, -2147483647, 77, -2147483647,
78, -2147483647, 79, -2147483647, 80,
-2147483647, -2147483647, -2147483647, 81, -2147483647,
82, -2147483647, 83, -2147483647, 84,
-2147483647, 85, -2147483647, 86, -2147483647,
87, -2147483647, 88, -2147483647, 89,
-2147483647, 90, -2147483647, -2147483647, -2147483647,
91, -2147483647, 92, -2147483647, 93,
-2147483647, 94, -2147483647, 95, -2147483647,
96, -2147483647, 97, -2147483647, 98,
-2147483647, 99, -2147483647, 100, -2147483647,
-2147483647, -2147483647, 91, -2147483647, 92,
-2147483647, 93, -2147483647, 94, -2147483647,
95, -2147483647, 96, -2147483647, 97,
-2147483647, 98, -2147483647, 99, -2147483647,
100, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference37) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, 2, 1, 2},
-2147483647,
{2, 2},
{1, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(18, 6));
EXPECT_THAT(
res.data,
ElementsAreArray(
{12, 14, 16, 18, 20,
20, 22, 24, 26, 28,
30, 30, 22, 24, 26,
28, 30, 30, 32, 34,
36, 38, 40, 40, 32,
34, 36, 38, 40, 40,
42, 44, 46, 48, 50,
50, 42, 44, 46, 48,
50, 50, 52, 54, 56,
58, 60, 60, 52, 54,
56, 58, 60, 60, 62,
64, 66, 68, 70, 70,
62, 64, 66, 68, 70,
70, 72, 74, 76, 78,
80, 80, 72, 74, 76,
78, 80, 80, 82, 84,
86, 88, 90, 90, 82,
84, 86, 88, 90, 90,
92, 94, 96, 98, 100,
100, 92, 94, 96, 98,
100, 100, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference38) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, -2, 1, 1},
-2147483647,
{1, 2},
{1, 1},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(17, 11));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
10, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, 11, 12, 13,
14, 15, 16, 17, 18,
19, 20, 20, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 21,
22, 23, 24, 25, 26,
27, 28, 29, 30, 30,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 31, 32, 33, 34,
35, 36, 37, 38, 39,
40, 40, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, 41, 42,
43, 44, 45, 46, 47,
48, 49, 50, 50, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
51, 52, 53, 54, 55,
56, 57, 58, 59, 60,
60, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, 61, 62, 63,
64, 65, 66, 67, 68,
69, 70, 70, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 71,
72, 73, 74, 75, 76,
77, 78, 79, 80, 80,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 81, 82, 83, 84,
85, 86, 87, 88, 89,
90, 90}));
}
TEST(ReferenceTest, RandomJaxReference39) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, -1, -2, 0},
0,
{2, 1},
{2, 1},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(15, 8));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 36, 38, 40, 42, 44, 46, 48,
50, 0, 0, 0, 0, 0, 0, 0, 0, 56, 58, 60, 62, 64, 66,
68, 70, 0, 0, 0, 0, 0, 0, 0, 0, 76, 78, 80, 82, 84,
86, 88, 90, 0, 0, 0, 0, 0, 0, 0, 0, 96, 98, 100, 102,
104, 106, 108, 110, 0, 0, 0, 0, 0, 0, 0, 0, 116, 118, 120,
122, 124, 126, 128, 130, 0, 0, 0, 0, 0, 0, 0, 0, 136, 138,
140, 142, 144, 146, 148, 150, 0, 0, 0, 0, 0, 0, 0, 0, 156,
158, 160, 162, 164, 166, 168, 170, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference40) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{2, -1, -2, 2},
1,
{2, 1},
{1, 1},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(19, 5));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 3, 5, 7, 9, 1, 3, 5, 7, 9,
1, 13, 15, 17, 19, 1, 13, 15, 17, 19, 1, 23, 25, 27,
29, 1, 23, 25, 27, 29, 1, 33, 35, 37, 39, 1, 33, 35,
37, 39, 1, 43, 45, 47, 49, 1, 43, 45, 47, 49, 1, 53,
55, 57, 59, 1, 53, 55, 57, 59, 1, 63, 65, 67, 69, 1,
63, 65, 67, 69, 1, 73, 75, 77, 79, 1, 73, 75, 77, 79,
1, 83, 85, 87, 89, 1, 83, 85, 87, 89, 1}));
}
TEST(ReferenceTest, RandomJaxReference41) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-1, 2, -2, 0},
-2147483647,
{2, 2},
{2, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(18, 8));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, 23, 24,
25, 26, 27, 28, 29,
30, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 33,
34, 35, 36, 37, 38,
39, 40, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
43, 44, 45, 46, 47,
48, 49, 50, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 53, 54, 55, 56,
57, 58, 59, 60, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, 63, 64, 65,
66, 67, 68, 69, 70,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, 73, 74,
75, 76, 77, 78, 79,
80, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 83,
84, 85, 86, 87, 88,
89, 90, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
93, 94, 95, 96, 97,
98, 99, 100, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 93, 94, 95, 96,
97, 98, 99, 100}));
}
TEST(ReferenceTest, RandomJaxReference42) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, -1, -1, 1},
1,
{2, 2},
{1, 1},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(15, 9));
EXPECT_THAT(
res.data,
ElementsAreArray(
{156, 182, 210, 240, 272, 306, 342, 380, 20, 506, 552,
600, 650, 702, 756, 812, 870, 30, 506, 552, 600, 650,
702, 756, 812, 870, 30, 1056, 1122, 1190, 1260, 1332, 1406,
1482, 1560, 40, 1056, 1122, 1190, 1260, 1332, 1406, 1482, 1560,
40, 1806, 1892, 1980, 2070, 2162, 2256, 2352, 2450, 50, 1806,
1892, 1980, 2070, 2162, 2256, 2352, 2450, 50, 2756, 2862, 2970,
3080, 3192, 3306, 3422, 3540, 60, 2756, 2862, 2970, 3080, 3192,
3306, 3422, 3540, 60, 3906, 4032, 4160, 4290, 4422, 4556, 4692,
4830, 70, 3906, 4032, 4160, 4290, 4422, 4556, 4692, 4830, 70,
5256, 5402, 5550, 5700, 5852, 6006, 6162, 6320, 80, 5256, 5402,
5550, 5700, 5852, 6006, 6162, 6320, 80, 6806, 6972, 7140, 7310,
7482, 7656, 7832, 8010, 90, 6806, 6972, 7140, 7310, 7482, 7656,
7832, 8010, 90}));
}
TEST(ReferenceTest, RandomJaxReference43) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{1, 0, -2, 1},
-2147483647,
{2, 1},
{1, 1},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(19, 18));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2, -2147483647, 3, -2147483647, 4, -2147483647, 5, -2147483647,
6, -2147483647, 7, -2147483647, 8, -2147483647, 9, -2147483647,
10, -2147483647, 2, -2147483647, 3, -2147483647, 4, -2147483647,
5, -2147483647, 6, -2147483647, 7, -2147483647, 8, -2147483647,
9, -2147483647, 10, -2147483647, 12, -2147483647, 13, -2147483647,
14, -2147483647, 15, -2147483647, 16, -2147483647, 17, -2147483647,
18, -2147483647, 19, -2147483647, 20, -2147483647, 12, -2147483647,
13, -2147483647, 14, -2147483647, 15, -2147483647, 16, -2147483647,
17, -2147483647, 18, -2147483647, 19, -2147483647, 20, -2147483647,
22, -2147483647, 23, -2147483647, 24, -2147483647, 25, -2147483647,
26, -2147483647, 27, -2147483647, 28, -2147483647, 29, -2147483647,
30, -2147483647, 22, -2147483647, 23, -2147483647, 24, -2147483647,
25, -2147483647, 26, -2147483647, 27, -2147483647, 28, -2147483647,
29, -2147483647, 30, -2147483647, 32, -2147483647, 33, -2147483647,
34, -2147483647, 35, -2147483647, 36, -2147483647, 37, -2147483647,
38, -2147483647, 39, -2147483647, 40, -2147483647, 32, -2147483647,
33, -2147483647, 34, -2147483647, 35, -2147483647, 36, -2147483647,
37, -2147483647, 38, -2147483647, 39, -2147483647, 40, -2147483647,
42, -2147483647, 43, -2147483647, 44, -2147483647, 45, -2147483647,
46, -2147483647, 47, -2147483647, 48, -2147483647, 49, -2147483647,
50, -2147483647, 42, -2147483647, 43, -2147483647, 44, -2147483647,
45, -2147483647, 46, -2147483647, 47, -2147483647, 48, -2147483647,
49, -2147483647, 50, -2147483647, 52, -2147483647, 53, -2147483647,
54, -2147483647, 55, -2147483647, 56, -2147483647, 57, -2147483647,
58, -2147483647, 59, -2147483647, 60, -2147483647, 52, -2147483647,
53, -2147483647, 54, -2147483647, 55, -2147483647, 56, -2147483647,
57, -2147483647, 58, -2147483647, 59, -2147483647, 60, -2147483647,
62, -2147483647, 63, -2147483647, 64, -2147483647, 65, -2147483647,
66, -2147483647, 67, -2147483647, 68, -2147483647, 69, -2147483647,
70, -2147483647, 62, -2147483647, 63, -2147483647, 64, -2147483647,
65, -2147483647, 66, -2147483647, 67, -2147483647, 68, -2147483647,
69, -2147483647, 70, -2147483647, 72, -2147483647, 73, -2147483647,
74, -2147483647, 75, -2147483647, 76, -2147483647, 77, -2147483647,
78, -2147483647, 79, -2147483647, 80, -2147483647, 72, -2147483647,
73, -2147483647, 74, -2147483647, 75, -2147483647, 76, -2147483647,
77, -2147483647, 78, -2147483647, 79, -2147483647, 80, -2147483647,
82, -2147483647, 83, -2147483647, 84, -2147483647, 85, -2147483647,
86, -2147483647, 87, -2147483647, 88, -2147483647, 89, -2147483647,
90, -2147483647, 82, -2147483647, 83, -2147483647, 84, -2147483647,
85, -2147483647, 86, -2147483647, 87, -2147483647, 88, -2147483647,
89, -2147483647, 90, -2147483647, 92, -2147483647, 93, -2147483647,
94, -2147483647, 95, -2147483647, 96, -2147483647, 97, -2147483647,
98, -2147483647, 99, -2147483647, 100, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference44) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, -2, 2, -1},
-2147483647,
{1, 1},
{2, 2},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(17, 11));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, 1, 2, 3,
4, 5, 6, 7, 8,
9, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 11,
12, 13, 14, 15, 16,
17, 18, 19, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 21, 22, 23, 24,
25, 26, 27, 28, 29,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, 31, 32,
33, 34, 35, 36, 37,
38, 39, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
41, 42, 43, 44, 45,
46, 47, 48, 49, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, 51, 52, 53,
54, 55, 56, 57, 58,
59, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 61,
62, 63, 64, 65, 66,
67, 68, 69, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 71, 72, 73, 74,
75, 76, 77, 78, 79,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, 81, 82,
83, 84, 85, 86, 87,
88, 89}));
}
TEST(ReferenceTest, RandomJaxReference45) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, -1, -2, -2},
1,
{1, 1},
{2, 1},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(18, 6));
EXPECT_THAT(
res.data,
ElementsAreArray({3, 4, 5, 6, 7, 8, 1, 1, 1, 1, 1, 1, 13, 14,
15, 16, 17, 18, 1, 1, 1, 1, 1, 1, 23, 24, 25, 26,
27, 28, 1, 1, 1, 1, 1, 1, 33, 34, 35, 36, 37, 38,
1, 1, 1, 1, 1, 1, 43, 44, 45, 46, 47, 48, 1, 1,
1, 1, 1, 1, 53, 54, 55, 56, 57, 58, 1, 1, 1, 1,
1, 1, 63, 64, 65, 66, 67, 68, 1, 1, 1, 1, 1, 1,
73, 74, 75, 76, 77, 78, 1, 1, 1, 1, 1, 1, 83, 84,
85, 86, 87, 88, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference46) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-1, 2, 0, -1},
1,
{2, 2},
{1, 1},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(10, 17));
EXPECT_THAT(
res.data,
ElementsAreArray(
{11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19,
21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29,
31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39,
41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49,
51, 52, 52, 53, 53, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59,
61, 62, 62, 63, 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68, 69, 69,
71, 72, 72, 73, 73, 74, 74, 75, 75, 76, 76, 77, 77, 78, 78, 79, 79,
81, 82, 82, 83, 83, 84, 84, 85, 85, 86, 86, 87, 87, 88, 88, 89, 89,
91, 92, 92, 93, 93, 94, 94, 95, 95, 96, 96, 97, 97, 98, 98, 99, 99,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference47) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, -1, 0, 0},
0,
{1, 1},
{1, 1},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(18, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 41, 42, 43, 44, 45,
46, 47, 48, 49, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 52,
53, 54, 55, 56, 57, 58, 59, 60, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference48) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, -1, 1, 2},
1,
{1, 2},
{2, 1},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(16, 6));
EXPECT_THAT(
res.data,
ElementsAreArray({11, 156, 210, 272, 342, 20, 1, 1, 1, 1, 1, 1,
21, 506, 600, 702, 812, 30, 1, 1, 1, 1, 1, 1,
31, 1056, 1190, 1332, 1482, 40, 1, 1, 1, 1, 1, 1,
41, 1806, 1980, 2162, 2352, 50, 1, 1, 1, 1, 1, 1,
51, 2756, 2970, 3192, 3422, 60, 1, 1, 1, 1, 1, 1,
61, 3906, 4160, 4422, 4692, 70, 1, 1, 1, 1, 1, 1,
71, 5256, 5550, 5852, 6162, 80, 1, 1, 1, 1, 1, 1,
81, 6806, 7140, 7482, 7832, 90, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference49) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, 1, -2, 0},
2147483646,
{1, 1},
{2, 2},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 17));
EXPECT_THAT(res.data,
ElementsAreArray(
{2, 2147483646, 3, 2147483646, 4,
2147483646, 5, 2147483646, 6, 2147483646,
7, 2147483646, 8, 2147483646, 9,
2147483646, 10, 12, 2147483646, 13,
2147483646, 14, 2147483646, 15, 2147483646,
16, 2147483646, 17, 2147483646, 18,
2147483646, 19, 2147483646, 20, 22,
2147483646, 23, 2147483646, 24, 2147483646,
25, 2147483646, 26, 2147483646, 27,
2147483646, 28, 2147483646, 29, 2147483646,
30, 32, 2147483646, 33, 2147483646,
34, 2147483646, 35, 2147483646, 36,
2147483646, 37, 2147483646, 38, 2147483646,
39, 2147483646, 40, 42, 2147483646,
43, 2147483646, 44, 2147483646, 45,
2147483646, 46, 2147483646, 47, 2147483646,
48, 2147483646, 49, 2147483646, 50,
52, 2147483646, 53, 2147483646, 54,
2147483646, 55, 2147483646, 56, 2147483646,
57, 2147483646, 58, 2147483646, 59,
2147483646, 60, 62, 2147483646, 63,
2147483646, 64, 2147483646, 65, 2147483646,
66, 2147483646, 67, 2147483646, 68,
2147483646, 69, 2147483646, 70, 72,
2147483646, 73, 2147483646, 74, 2147483646,
75, 2147483646, 76, 2147483646, 77,
2147483646, 78, 2147483646, 79, 2147483646,
80, 82, 2147483646, 83, 2147483646,
84, 2147483646, 85, 2147483646, 86,
2147483646, 87, 2147483646, 88, 2147483646,
89, 2147483646, 90, 92, 2147483646,
93, 2147483646, 94, 2147483646, 95,
2147483646, 96, 2147483646, 97, 2147483646,
98, 2147483646, 99, 2147483646, 100}));
}
TEST(ReferenceTest, RandomJaxReference50) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-1, -1, 1, 0},
2147483646,
{2, 1},
{1, 1},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(16, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference51) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, 2, -2, -1},
2147483646,
{2, 2},
{2, 2},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(19, 7));
EXPECT_THAT(res.data,
ElementsAreArray(
{2, 3, 4, 5, 6,
7, 8, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 12,
13, 14, 15, 16, 17,
18, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 22, 23,
24, 25, 26, 27, 28,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 32, 33, 34,
35, 36, 37, 38, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 42, 43, 44, 45,
46, 47, 48, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
52, 53, 54, 55, 56,
57, 58, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 62,
63, 64, 65, 66, 67,
68, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 72, 73,
74, 75, 76, 77, 78,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 82, 83, 84,
85, 86, 87, 88, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 92, 93, 94, 95,
96, 97, 98}));
}
TEST(ReferenceTest, RandomJaxReference52) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 0, 1, 2},
0,
{1, 2},
{1, 1},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(8, 11));
EXPECT_THAT(res.data,
ElementsAreArray(
{21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 0, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 0, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 0, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 0, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 0, 71, 72, 73, 74, 75,
76, 77, 78, 79, 80, 0, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 0, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 0}));
}
TEST(ReferenceTest, RandomJaxReference53) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{2, 1, 0, 2},
2147483646,
{2, 2},
{1, 1},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 10));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12, 13, 14, 15,
16, 17, 18, 19, 20,
21, 22, 23, 24, 25,
26, 27, 28, 29, 30,
31, 32, 33, 34, 35,
36, 37, 38, 39, 40,
41, 42, 43, 44, 45,
46, 47, 48, 49, 50,
51, 52, 53, 54, 55,
56, 57, 58, 59, 60,
61, 62, 63, 64, 65,
66, 67, 68, 69, 70,
71, 72, 73, 74, 75,
76, 77, 78, 79, 80,
81, 82, 83, 84, 85,
86, 87, 88, 89, 90,
91, 92, 93, 94, 95,
96, 97, 98, 99, 100}));
}
TEST(ReferenceTest, RandomJaxReference54) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, 0, 0, 2},
-2147483647,
{1, 1},
{2, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 12));
EXPECT_THAT(
res.data,
ElementsAreArray(
{11, 12, 13, 14, 15, 16, 17, 18, 19, 20, -2147483647, -2147483647,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, -2147483647, -2147483647,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, -2147483647, -2147483647,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, -2147483647, -2147483647,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, -2147483647, -2147483647,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70, -2147483647, -2147483647,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80, -2147483647, -2147483647,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90, -2147483647, -2147483647,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference55) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{2, 1, -2, 2},
-2147483647,
{2, 1},
{2, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(20, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{3, 5, 7, 9, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
13, 15, 17, 19, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
23, 25, 27, 29, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
33, 35, 37, 39, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
43, 45, 47, 49, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
53, 55, 57, 59, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
63, 65, 67, 69, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
73, 75, 77, 79, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
83, 85, 87, 89, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
93, 95, 97, 99, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference56) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 0, 0, 1},
1,
{2, 1},
{1, 2},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(18, 11));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 1, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 1, 21, 22, 23, 24, 25, 26, 27,
28, 29, 30, 1, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 1, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 1, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 1, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 1, 51, 52, 53,
54, 55, 56, 57, 58, 59, 60, 1, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 1, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 1, 61, 62, 63, 64,
65, 66, 67, 68, 69, 70, 1, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
1, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 1, 81, 82, 83, 84, 85,
86, 87, 88, 89, 90, 1, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 1,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 1}));
}
TEST(ReferenceTest, RandomJaxReference57) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, 0, -2, 2},
-2147483647,
{1, 2},
{1, 2},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 9));
EXPECT_THAT(
res.data,
ElementsAreArray({3, 4, 5, 6, 7, 8, 9, 10, 10, 13, 14, 15, 16,
17, 18, 19, 20, 20, 23, 24, 25, 26, 27, 28, 29, 30,
30, 33, 34, 35, 36, 37, 38, 39, 40, 40, 43, 44, 45,
46, 47, 48, 49, 50, 50, 53, 54, 55, 56, 57, 58, 59,
60, 60, 63, 64, 65, 66, 67, 68, 69, 70, 70, 73, 74,
75, 76, 77, 78, 79, 80, 80, 83, 84, 85, 86, 87, 88,
89, 90, 90, 93, 94, 95, 96, 97, 98, 99, 100, 100}));
}
TEST(ReferenceTest, RandomJaxReference58) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-1, 2, 1, -2},
0,
{1, 1},
{2, 2},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(11, 9));
EXPECT_THAT(res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference59) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{2, -2, 2, 2},
2147483646,
{2, 2},
{1, 2},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(18, 11));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 1, 1, 2, 3,
4, 5, 6, 7, 8,
9, 10, 1, 1, 2,
3, 4, 5, 6, 7,
8, 9, 10, 11, 11,
12, 13, 14, 15, 16,
17, 18, 19, 20, 11,
11, 12, 13, 14, 15,
16, 17, 18, 19, 20,
21, 21, 22, 23, 24,
25, 26, 27, 28, 29,
30, 21, 21, 22, 23,
24, 25, 26, 27, 28,
29, 30, 31, 31, 32,
33, 34, 35, 36, 37,
38, 39, 40, 31, 31,
32, 33, 34, 35, 36,
37, 38, 39, 40, 41,
41, 42, 43, 44, 45,
46, 47, 48, 49, 50,
41, 41, 42, 43, 44,
45, 46, 47, 48, 49,
50, 51, 51, 52, 53,
54, 55, 56, 57, 58,
59, 60, 51, 51, 52,
53, 54, 55, 56, 57,
58, 59, 60, 61, 61,
62, 63, 64, 65, 66,
67, 68, 69, 70, 61,
61, 62, 63, 64, 65,
66, 67, 68, 69, 70,
71, 71, 72, 73, 74,
75, 76, 77, 78, 79,
80, 71, 71, 72, 73,
74, 75, 76, 77, 78,
79, 80, 81, 81, 82,
83, 84, 85, 86, 87,
88, 89, 90}));
}
TEST(ReferenceTest, RandomJaxReference60) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 2, -1, 0},
0,
{1, 2},
{2, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(6, 4));
EXPECT_THAT(res.data,
ElementsAreArray({5, 9, 13, 17, 45, 49, 53, 57,
85, 89, 93, 97, 125, 129, 133, 137,
165, 169, 173, 177, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference61) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, -1, 2, -1},
0,
{2, 1},
{1, 1},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(17, 20));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 8, 0,
9, 0, 0, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 0, 16, 0, 17, 0,
18, 0, 19, 0, 0, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 0, 16, 0,
17, 0, 18, 0, 19, 0, 0, 0, 21, 0, 22, 0, 23, 0, 24, 0, 25, 0,
26, 0, 27, 0, 28, 0, 29, 0, 0, 0, 21, 0, 22, 0, 23, 0, 24, 0,
25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 0, 0, 31, 0, 32, 0, 33, 0,
34, 0, 35, 0, 36, 0, 37, 0, 38, 0, 39, 0, 0, 0, 31, 0, 32, 0,
33, 0, 34, 0, 35, 0, 36, 0, 37, 0, 38, 0, 39, 0, 0, 0, 41, 0,
42, 0, 43, 0, 44, 0, 45, 0, 46, 0, 47, 0, 48, 0, 49, 0, 0, 0,
41, 0, 42, 0, 43, 0, 44, 0, 45, 0, 46, 0, 47, 0, 48, 0, 49, 0,
0, 0, 51, 0, 52, 0, 53, 0, 54, 0, 55, 0, 56, 0, 57, 0, 58, 0,
59, 0, 0, 0, 51, 0, 52, 0, 53, 0, 54, 0, 55, 0, 56, 0, 57, 0,
58, 0, 59, 0, 0, 0, 61, 0, 62, 0, 63, 0, 64, 0, 65, 0, 66, 0,
67, 0, 68, 0, 69, 0, 0, 0, 61, 0, 62, 0, 63, 0, 64, 0, 65, 0,
66, 0, 67, 0, 68, 0, 69, 0, 0, 0, 71, 0, 72, 0, 73, 0, 74, 0,
75, 0, 76, 0, 77, 0, 78, 0, 79, 0, 0, 0, 71, 0, 72, 0, 73, 0,
74, 0, 75, 0, 76, 0, 77, 0, 78, 0, 79, 0, 0, 0, 81, 0, 82, 0,
83, 0, 84, 0, 85, 0, 86, 0, 87, 0, 88, 0, 89, 0, 0, 0, 81, 0,
82, 0, 83, 0, 84, 0, 85, 0, 86, 0, 87, 0, 88, 0, 89, 0}));
}
TEST(ReferenceTest, RandomJaxReference62) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, -1, 2, 0},
-2147483647,
{2, 1},
{2, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(3, 12));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
-2147483647, -2147483647, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
-2147483647, -2147483647, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90}));
}
TEST(ReferenceTest, RandomJaxReference63) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, 0, 2, -2},
1,
{2, 1},
{2, 2},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(16, 10));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 231, 264, 299, 336, 375, 416, 459, 504,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 651, 704, 759, 816, 875, 936, 999, 1064,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1271, 1344, 1419, 1496, 1575, 1656, 1739, 1824,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 2091, 2184, 2279, 2376, 2475, 2576, 2679, 2784,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 3111, 3224, 3339, 3456, 3575, 3696, 3819, 3944,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 4331, 4464, 4599, 4736, 4875, 5016, 5159, 5304,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 5751, 5904, 6059, 6216, 6375, 6536, 6699, 6864,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 7371, 7544, 7719, 7896, 8075, 8256, 8439, 8624}));
}
TEST(ReferenceTest, RandomJaxReference64) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, 2, 0, -2},
-2147483647,
{2, 2},
{1, 2},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 3));
EXPECT_THAT(res.data,
ElementsAreArray(
{3, 5, 7, 13, 15, 17, 23, 25, 27,
33, 35, 37, 43, 45, 47, 53, 55, 57,
63, 65, 67, 73, 75, 77, 83, 85, 87,
93, 95, 97, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference65) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-1, 0, 2, 0},
0,
{2, 1},
{1, 2},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(4, 11));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50,
0, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90,
0, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130,
0, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170}));
}
TEST(ReferenceTest, RandomJaxReference66) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, 0, -1, -1},
0,
{2, 2},
{1, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(5, 8));
EXPECT_THAT(
res.data,
ElementsAreArray({14, 16, 18, 20, 22, 24, 26, 28, 54, 56,
58, 60, 62, 64, 66, 68, 94, 96, 98, 100,
102, 104, 106, 108, 134, 136, 138, 140, 142, 144,
146, 148, 174, 176, 178, 180, 182, 184, 186, 188}));
}
TEST(ReferenceTest, RandomJaxReference67) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, 0, 2, 2},
0,
{1, 2},
{1, 1},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(6, 13));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 11, 23, 25, 27, 29, 31, 33, 35, 37, 39, 20, 0,
0, 31, 63, 65, 67, 69, 71, 73, 75, 77, 79, 40, 0,
0, 51, 103, 105, 107, 109, 111, 113, 115, 117, 119, 60, 0,
0, 71, 143, 145, 147, 149, 151, 153, 155, 157, 159, 80, 0,
0, 91, 183, 185, 187, 189, 191, 193, 195, 197, 199, 100, 0}));
}
TEST(ReferenceTest, RandomJaxReference68) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, 2, 1, -2},
0,
{2, 1},
{1, 2},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(13, 9));
EXPECT_THAT(res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference69) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, 1, -2, -1},
-2147483647,
{2, 2},
{2, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 5));
EXPECT_THAT(
res.data,
ElementsAreArray({25, 26, 27, 28, 29, 35, 36, 37, 38, 39, 45, 46, 47, 48,
49, 55, 56, 57, 58, 59, 65, 66, 67, 68, 69, 75, 76, 77,
78, 79, 85, 86, 87, 88, 89, 95, 96, 97, 98, 99}));
}
TEST(ReferenceTest, RandomJaxReference70) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-1, -2, 0, 2},
2147483646,
{1, 2},
{1, 1},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(4, 6));
EXPECT_THAT(res.data, ElementsAreArray({11, 13, 15, 17, 19, 2147483646,
31, 33, 35, 37, 39, 2147483646,
51, 53, 55, 57, 59, 2147483646,
71, 73, 75, 77, 79, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference71) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, 2, -2, 2},
-2147483647,
{2, 1},
{1, 1},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(21, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{3, 4, 5, 6, 7,
8, 9, 10, -2147483647, -2147483647,
3, 4, 5, 6, 7,
8, 9, 10, -2147483647, -2147483647,
13, 14, 15, 16, 17,
18, 19, 20, -2147483647, -2147483647,
13, 14, 15, 16, 17,
18, 19, 20, -2147483647, -2147483647,
23, 24, 25, 26, 27,
28, 29, 30, -2147483647, -2147483647,
23, 24, 25, 26, 27,
28, 29, 30, -2147483647, -2147483647,
33, 34, 35, 36, 37,
38, 39, 40, -2147483647, -2147483647,
33, 34, 35, 36, 37,
38, 39, 40, -2147483647, -2147483647,
43, 44, 45, 46, 47,
48, 49, 50, -2147483647, -2147483647,
43, 44, 45, 46, 47,
48, 49, 50, -2147483647, -2147483647,
53, 54, 55, 56, 57,
58, 59, 60, -2147483647, -2147483647,
53, 54, 55, 56, 57,
58, 59, 60, -2147483647, -2147483647,
63, 64, 65, 66, 67,
68, 69, 70, -2147483647, -2147483647,
63, 64, 65, 66, 67,
68, 69, 70, -2147483647, -2147483647,
73, 74, 75, 76, 77,
78, 79, 80, -2147483647, -2147483647,
73, 74, 75, 76, 77,
78, 79, 80, -2147483647, -2147483647,
83, 84, 85, 86, 87,
88, 89, 90, -2147483647, -2147483647,
83, 84, 85, 86, 87,
88, 89, 90, -2147483647, -2147483647,
93, 94, 95, 96, 97,
98, 99, 100, -2147483647, -2147483647,
93, 94, 95, 96, 97,
98, 99, 100, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference72) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, -1, 2, 0},
0,
{1, 2},
{2, 2},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(5, 5));
EXPECT_THAT(res.data,
ElementsAreArray({1, 4, 8, 12, 16, 21, 44, 48, 52,
56, 41, 84, 88, 92, 96, 61, 124, 128,
132, 136, 81, 164, 168, 172, 176}));
}
TEST(ReferenceTest, RandomJaxReference73) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 0, 0, 0},
-2147483647,
{1, 2},
{1, 2},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 8));
EXPECT_THAT(
res.data,
ElementsAreArray({3, 4, 5, 6, 7, 8, 9, 10, 13, 14, 15, 16, 17, 18,
19, 20, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36,
37, 38, 39, 40, 43, 44, 45, 46, 47, 48, 49, 50, 53, 54,
55, 56, 57, 58, 59, 60, 63, 64, 65, 66, 67, 68, 69, 70,
73, 74, 75, 76, 77, 78, 79, 80, 83, 84, 85, 86, 87, 88,
89, 90, 93, 94, 95, 96, 97, 98, 99, 100}));
}
TEST(ReferenceTest, RandomJaxReference74) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, -2, -2, -1},
0,
{2, 2},
{1, 2},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(7, 5));
EXPECT_THAT(res.data,
ElementsAreArray({36, 40, 44, 48, 52, 76, 80, 84, 88,
92, 116, 120, 124, 128, 132, 156, 160, 164,
168, 172, 196, 200, 204, 208, 212, 236, 240,
244, 248, 252, 276, 280, 284, 288, 292}));
}
TEST(ReferenceTest, RandomJaxReference75) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 1, -2, 1},
0,
{2, 1},
{2, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(9, 5));
EXPECT_THAT(res.data,
ElementsAreArray({16, 20, 24, 28, 0, 36, 40, 44, 48,
0, 56, 60, 64, 68, 0, 76, 80, 84,
88, 0, 96, 100, 104, 108, 0, 116, 120,
124, 128, 0, 136, 140, 144, 148, 0, 156,
160, 164, 168, 0, 176, 180, 184, 188, 0}));
}
TEST(ReferenceTest, RandomJaxReference76) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, -1, 0},
0,
{1, 1},
{1, 2},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(6, 18));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 8,
0, 9, 0, 10, 0, 22, 0, 23, 0, 24, 0, 25, 0, 26, 0, 27,
0, 28, 0, 29, 0, 30, 0, 42, 0, 43, 0, 44, 0, 45, 0, 46,
0, 47, 0, 48, 0, 49, 0, 50, 0, 62, 0, 63, 0, 64, 0, 65,
0, 66, 0, 67, 0, 68, 0, 69, 0, 70, 0, 82, 0, 83, 0, 84,
0, 85, 0, 86, 0, 87, 0, 88, 0, 89, 0, 90}));
}
TEST(ReferenceTest, RandomJaxReference77) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, 2, -1, -2},
-2147483647,
{1, 2},
{2, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference78) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, 1, 2, -1},
0,
{1, 1},
{2, 1},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(18, 6));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 11, 13, 15, 17, 19, 0, 0, 0, 0, 0, 0, 0, 21,
23, 25, 27, 29, 0, 0, 0, 0, 0, 0, 0, 31, 33, 35,
37, 39, 0, 0, 0, 0, 0, 0, 0, 41, 43, 45, 47, 49,
0, 0, 0, 0, 0, 0, 0, 51, 53, 55, 57, 59, 0, 0,
0, 0, 0, 0, 0, 61, 63, 65, 67, 69, 0, 0, 0, 0,
0, 0, 0, 71, 73, 75, 77, 79, 0, 0, 0, 0, 0, 0,
0, 81, 83, 85, 87, 89, 0, 0, 0, 0, 0, 0, 0, 91,
93, 95, 97, 99, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference79) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-1, -1, -2, 1},
2147483646,
{1, 1},
{1, 1},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 9));
EXPECT_THAT(res.data,
ElementsAreArray(
{12, 13, 14, 15, 16, 17, 18, 19, 20, 22, 23, 24, 25, 26, 27,
28, 29, 30, 32, 33, 34, 35, 36, 37, 38, 39, 40, 42, 43, 44,
45, 46, 47, 48, 49, 50, 52, 53, 54, 55, 56, 57, 58, 59, 60,
62, 63, 64, 65, 66, 67, 68, 69, 70, 72, 73, 74, 75, 76, 77,
78, 79, 80, 82, 83, 84, 85, 86, 87, 88, 89, 90}));
}
TEST(ReferenceTest, RandomJaxReference80) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 2, 1, -1},
1,
{2, 1},
{2, 2},
{2, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(10, 5));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 24, 56, 96, 144, 1, 264, 336, 416, 504,
1, 704, 816, 936, 1064, 1, 1344, 1496, 1656, 1824,
1, 2184, 2376, 2576, 2784, 1, 3224, 3456, 3696, 3944,
1, 4464, 4736, 5016, 5304, 1, 5904, 6216, 6536, 6864,
1, 7544, 7896, 8256, 8624, 1, 92, 94, 96, 98}));
}
TEST(ReferenceTest, RandomJaxReference81) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, -1, 0, 2},
1,
{1, 1},
{2, 1},
{2, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(5, 6));
EXPECT_THAT(res.data,
ElementsAreArray({1, 3, 5, 7, 9, 1, 21, 23, 25, 27,
29, 1, 41, 43, 45, 47, 49, 1, 61, 63,
65, 67, 69, 1, 81, 83, 85, 87, 89, 1}));
}
TEST(ReferenceTest, RandomJaxReference82) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 2, 0, 2},
1,
{2, 2},
{1, 2},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(11, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{429, 2925, 8925, 20349, 171, 69069, 112125,
172125, 252909, 551, 494109, 664125, 874125, 1129869,
1131, 1803549, 2234925, 2738925, 3323229, 1911, 4765389,
5640525, 6630525, 7744989, 2891, 10387629, 11936925, 13652925,
15547149, 4071, 19918269, 22420125, 25150125, 28121709, 5451,
34845309, 38626125, 42706125, 47100669, 7031, 56896749, 62330925,
68144925, 74356029, 8811, 8463, 8835, 9215, 9603,
99, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference83) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, -2, -2},
-2147483647,
{2, 1},
{1, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 8));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, 2, 3,
4, 5, 6, 7, 8,
9, 12, 13, 14, 15,
16, 17, 18, 19, 22,
23, 24, 25, 26, 27,
28, 29, 32, 33, 34,
35, 36, 37, 38, 39,
42, 43, 44, 45, 46,
47, 48, 49, 52, 53,
54, 55, 56, 57, 58,
59, 62, 63, 64, 65,
66, 67, 68, 69, 72,
73, 74, 75, 76, 77,
78, 79, 82, 83, 84,
85, 86, 87, 88, 89}));
}
TEST(ReferenceTest, RandomJaxReference84) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{2, -2, -2, 2},
2147483646,
{1, 1},
{2, 2},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(19, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2, 3, 4, 5, 6,
7, 8, 9, 10, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
12, 13, 14, 15, 16,
17, 18, 19, 20, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
22, 23, 24, 25, 26,
27, 28, 29, 30, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
32, 33, 34, 35, 36,
37, 38, 39, 40, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
42, 43, 44, 45, 46,
47, 48, 49, 50, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
52, 53, 54, 55, 56,
57, 58, 59, 60, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
62, 63, 64, 65, 66,
67, 68, 69, 70, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
72, 73, 74, 75, 76,
77, 78, 79, 80, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
82, 83, 84, 85, 86,
87, 88, 89, 90, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference85) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, 2, -2, -2},
-2147483647,
{1, 2},
{2, 2},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 2));
EXPECT_THAT(res.data, ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference86) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, -1, 2, -2},
-2147483647,
{1, 2},
{2, 1},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 5));
EXPECT_THAT(res.data,
ElementsAreArray(
{-2147483647, 12, 14, 16, 18, -2147483647, 22, 24, 26, 28,
-2147483647, 32, 34, 36, 38, -2147483647, 42, 44, 46, 48,
-2147483647, 52, 54, 56, 58, -2147483647, 62, 64, 66, 68,
-2147483647, 72, 74, 76, 78, -2147483647, 82, 84, 86, 88}));
}
TEST(ReferenceTest, RandomJaxReference87) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 0, 2, -1},
-2147483647,
{1, 2},
{1, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 10));
EXPECT_THAT(res.data, ElementsAreArray(
{-2147483647, 21, 22, 23, 24, 25, 26, 27, 28, 29,
-2147483647, 31, 32, 33, 34, 35, 36, 37, 38, 39,
-2147483647, 41, 42, 43, 44, 45, 46, 47, 48, 49,
-2147483647, 51, 52, 53, 54, 55, 56, 57, 58, 59,
-2147483647, 61, 62, 63, 64, 65, 66, 67, 68, 69,
-2147483647, 71, 72, 73, 74, 75, 76, 77, 78, 79,
-2147483647, 81, 82, 83, 84, 85, 86, 87, 88, 89,
-2147483647, 91, 92, 93, 94, 95, 96, 97, 98, 99}));
}
TEST(ReferenceTest, RandomJaxReference88) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, 1, 2, 0},
-2147483647,
{2, 2},
{2, 1},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(4, 11));
EXPECT_THAT(
res.data,
ElementsAreArray({-2147483647, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
-2147483647, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
-2147483647, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
-2147483647, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90}));
}
TEST(ReferenceTest, RandomJaxReference89) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, -2, 2, 2},
0,
{1, 1},
{2, 1},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(9, 14));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference90) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 0, 1, 1},
0,
{1, 1},
{1, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(4, 11));
EXPECT_THAT(res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference91) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, -2, 1, 2},
1,
{2, 2},
{1, 2},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(5, 6));
EXPECT_THAT(
res.data,
ElementsAreArray({704, 574464, 763776, 995904, 1276800, 1200,
1344, 2010624, 2477376, 3020544, 3648000, 2000,
2184, 5189184, 6120576, 7171584, 8352000, 3000,
3224, 11142144, 12773376, 14577024, 16564800, 4200,
4464, 21141504, 23755776, 26604864, 29702400, 5600}));
}
TEST(ReferenceTest, RandomJaxReference92) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, 0, 0, 2},
2147483646,
{2, 2},
{1, 2},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 10));
EXPECT_THAT(res.data, ElementsAreArray(
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90}));
}
TEST(ReferenceTest, RandomJaxReference93) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, -1, 0, -2},
2147483646,
{1, 1},
{1, 2},
{1, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 17));
EXPECT_THAT(res.data,
ElementsAreArray(
{1, 2147483646, 2, 2147483646, 3,
2147483646, 4, 2147483646, 5, 2147483646,
6, 2147483646, 7, 2147483646, 8,
2147483646, 9, 11, 2147483646, 12,
2147483646, 13, 2147483646, 14, 2147483646,
15, 2147483646, 16, 2147483646, 17,
2147483646, 18, 2147483646, 19, 21,
2147483646, 22, 2147483646, 23, 2147483646,
24, 2147483646, 25, 2147483646, 26,
2147483646, 27, 2147483646, 28, 2147483646,
29, 31, 2147483646, 32, 2147483646,
33, 2147483646, 34, 2147483646, 35,
2147483646, 36, 2147483646, 37, 2147483646,
38, 2147483646, 39, 41, 2147483646,
42, 2147483646, 43, 2147483646, 44,
2147483646, 45, 2147483646, 46, 2147483646,
47, 2147483646, 48, 2147483646, 49,
51, 2147483646, 52, 2147483646, 53,
2147483646, 54, 2147483646, 55, 2147483646,
56, 2147483646, 57, 2147483646, 58,
2147483646, 59, 61, 2147483646, 62,
2147483646, 63, 2147483646, 64, 2147483646,
65, 2147483646, 66, 2147483646, 67,
2147483646, 68, 2147483646, 69, 71,
2147483646, 72, 2147483646, 73, 2147483646,
74, 2147483646, 75, 2147483646, 76,
2147483646, 77, 2147483646, 78, 2147483646,
79, 81, 2147483646, 82, 2147483646,
83, 2147483646, 84, 2147483646, 85,
2147483646, 86, 2147483646, 87, 2147483646,
88, 2147483646, 89}));
}
TEST(ReferenceTest, RandomJaxReference94) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, 0, -1, -2},
-2147483647,
{1, 2},
{2, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 3));
EXPECT_THAT(res.data, ElementsAreArray({23, 25, 27, 33, 35, 37, 43, 45,
47, 53, 55, 57, 63, 65, 67, 73,
75, 77, 83, 85, 87, 93, 95, 97}));
}
TEST(ReferenceTest, RandomJaxReference95) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, 0, 2, 2},
1,
{1, 1},
{1, 1},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(10, 23));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 2, 1, 3, 1, 4, 1, 5, 1, 6, 1, 7, 1, 8,
1, 9, 1, 10, 1, 1, 1, 1, 11, 1, 12, 1, 13, 1, 14, 1, 15,
1, 16, 1, 17, 1, 18, 1, 19, 1, 20, 1, 1, 1, 1, 21, 1, 22,
1, 23, 1, 24, 1, 25, 1, 26, 1, 27, 1, 28, 1, 29, 1, 30, 1,
1, 1, 1, 31, 1, 32, 1, 33, 1, 34, 1, 35, 1, 36, 1, 37, 1,
38, 1, 39, 1, 40, 1, 1, 1, 1, 41, 1, 42, 1, 43, 1, 44, 1,
45, 1, 46, 1, 47, 1, 48, 1, 49, 1, 50, 1, 1, 1, 1, 51, 1,
52, 1, 53, 1, 54, 1, 55, 1, 56, 1, 57, 1, 58, 1, 59, 1, 60,
1, 1, 1, 1, 61, 1, 62, 1, 63, 1, 64, 1, 65, 1, 66, 1, 67,
1, 68, 1, 69, 1, 70, 1, 1, 1, 1, 71, 1, 72, 1, 73, 1, 74,
1, 75, 1, 76, 1, 77, 1, 78, 1, 79, 1, 80, 1, 1, 1, 1, 81,
1, 82, 1, 83, 1, 84, 1, 85, 1, 86, 1, 87, 1, 88, 1, 89, 1,
90, 1, 1, 1, 1, 91, 1, 92, 1, 93, 1, 94, 1, 95, 1, 96, 1,
97, 1, 98, 1, 99, 1, 100, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference96) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{2, -1, -1, 2},
0,
{2, 2},
{1, 1},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(10, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 7, 9,
11, 13, 15, 17, 19, 10, 0, 30, 34, 38, 42, 46, 50,
54, 58, 30, 0, 70, 74, 78, 82, 86, 90, 94, 98, 50,
0, 110, 114, 118, 122, 126, 130, 134, 138, 70, 0, 150, 154,
158, 162, 166, 170, 174, 178, 90, 0, 190, 194, 198, 202, 206,
210, 214, 218, 110, 0, 230, 234, 238, 242, 246, 250, 254, 258,
130, 0, 270, 274, 278, 282, 286, 290, 294, 298, 150, 0, 310,
314, 318, 322, 326, 330, 334, 338, 170, 0}));
}
TEST(ReferenceTest, RandomJaxReference97) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{2, 2, -1, 1},
0,
{2, 2},
{2, 1},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(12, 5));
EXPECT_THAT(
res.data,
ElementsAreArray({5, 9, 13, 17, 10, 25, 29, 33, 37, 20,
50, 58, 66, 74, 40, 90, 98, 106, 114, 60,
130, 138, 146, 154, 80, 170, 178, 186, 194, 100,
210, 218, 226, 234, 120, 250, 258, 266, 274, 140,
290, 298, 306, 314, 160, 330, 338, 346, 354, 180,
165, 169, 173, 177, 90, 185, 189, 193, 197, 100}));
}
TEST(ReferenceTest, RandomJaxReference98) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{2, -2, -1, 0},
2147483646,
{2, 2},
{1, 1},
{1, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(18, 17));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2, 2, 3,
3, 4, 4, 5, 5,
6, 6, 7, 7, 8,
8, 9, 9, 10, 2,
2, 3, 3, 4, 4,
5, 5, 6, 6, 7,
7, 8, 8, 9, 9,
10, 12, 12, 13, 13,
14, 14, 15, 15, 16,
16, 17, 17, 18, 18,
19, 19, 20, 12, 12,
13, 13, 14, 14, 15,
15, 16, 16, 17, 17,
18, 18, 19, 19, 20,
22, 22, 23, 23, 24,
24, 25, 25, 26, 26,
27, 27, 28, 28, 29,
29, 30, 22, 22, 23,
23, 24, 24, 25, 25,
26, 26, 27, 27, 28,
28, 29, 29, 30, 32,
32, 33, 33, 34, 34,
35, 35, 36, 36, 37,
37, 38, 38, 39, 39,
40, 32, 32, 33, 33,
34, 34, 35, 35, 36,
36, 37, 37, 38, 38,
39, 39, 40, 42, 42,
43, 43, 44, 44, 45,
45, 46, 46, 47, 47,
48, 48, 49, 49, 50,
42, 42, 43, 43, 44,
44, 45, 45, 46, 46,
47, 47, 48, 48, 49,
49, 50, 52, 52, 53,
53, 54, 54, 55, 55,
56, 56, 57, 57, 58,
58, 59, 59, 60, 52,
52, 53, 53, 54, 54,
55, 55, 56, 56, 57,
57, 58, 58, 59, 59,
60, 62, 62, 63, 63,
64, 64, 65, 65, 66,
66, 67, 67, 68, 68,
69, 69, 70, 62, 62,
63, 63, 64, 64, 65,
65, 66, 66, 67, 67,
68, 68, 69, 69, 70,
72, 72, 73, 73, 74,
74, 75, 75, 76, 76,
77, 77, 78, 78, 79,
79, 80, 72, 72, 73,
73, 74, 74, 75, 75,
76, 76, 77, 77, 78,
78, 79, 79, 80, 82,
82, 83, 83, 84, 84,
85, 85, 86, 86, 87,
87, 88, 88, 89, 89,
90}));
}
TEST(ReferenceTest, RandomJaxReference99) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-1, -1, -2, 1},
1,
{1, 1},
{2, 1},
{2, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(4, 9));
EXPECT_THAT(res.data, ElementsAreArray({12, 13, 14, 15, 16, 17, 18, 19, 20,
32, 33, 34, 35, 36, 37, 38, 39, 40,
52, 53, 54, 55, 56, 57, 58, 59, 60,
72, 73, 74, 75, 76, 77, 78, 79, 80}));
}
TEST(ReferenceTest, RandomJaxReference100) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, 1, 1, 1},
2147483646,
{1, 2},
{1, 1},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 20));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9,
9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17,
18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26,
26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34,
35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43,
43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 50, 50, 51, 51,
52, 52, 53, 53, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59, 60,
60, 61, 61, 62, 62, 63, 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68,
69, 69, 70, 70, 71, 71, 72, 72, 73, 73, 74, 74, 75, 75, 76, 76, 77,
77, 78, 78, 79, 79, 80, 80, 81, 81, 82, 82, 83, 83, 84, 84, 85, 85,
86, 86, 87, 87, 88, 88, 89, 89, 90, 90, 91, 91, 92, 92, 93, 93, 94,
94, 95, 95, 96, 96, 97, 97, 98, 98, 99, 99, 100, 100}));
}
TEST(ReferenceTest, RandomJaxReference101) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, 2, 2, 0},
1,
{2, 1},
{2, 1},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(17, 12));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 231, 264, 299, 336, 375, 416, 459, 504, 551, 600,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 651, 704, 759, 816, 875, 936, 999, 1064, 1131, 1200,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1271, 1344, 1419, 1496, 1575, 1656, 1739, 1824, 1911, 2000,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 2091, 2184, 2279, 2376, 2475, 2576, 2679, 2784, 2891, 3000,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 3111, 3224, 3339, 3456, 3575, 3696, 3819, 3944, 4071, 4200,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 4331, 4464, 4599, 4736, 4875, 5016, 5159, 5304, 5451, 5600,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 5751, 5904, 6059, 6216, 6375, 6536, 6699, 6864, 7031, 7200,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 7371, 7544, 7719, 7896, 8075, 8256, 8439, 8624, 8811, 9000,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100}));
}
TEST(ReferenceTest, RandomJaxReference102) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{1, 1, -2, 1},
-2147483647,
{1, 2},
{2, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 16));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647}));
}
TEST(ReferenceTest, RandomJaxReference103) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 1, 1, -1},
1,
{1, 2},
{2, 2},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(11, 8));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2, 3, 8, 15, 24, 35, 48, 63, 12, 143, 168,
195, 224, 255, 288, 323, 22, 483, 528, 575, 624, 675,
728, 783, 32, 1023, 1088, 1155, 1224, 1295, 1368, 1443, 42,
1763, 1848, 1935, 2024, 2115, 2208, 2303, 52, 2703, 2808, 2915,
3024, 3135, 3248, 3363, 62, 3843, 3968, 4095, 4224, 4355, 4488,
4623, 72, 5183, 5328, 5475, 5624, 5775, 5928, 6083, 82, 6723,
6888, 7055, 7224, 7395, 7568, 7743, 92, 8463, 8648, 8835, 9024,
9215, 9408, 9603, 1, 1, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference104) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{2, -1, 1, -1},
0,
{2, 2},
{2, 1},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(18, 9));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 3, 5, 7, 9, 11, 13, 15, 17, 0, 0, 0, 0, 0,
0, 0, 0, 0, 12, 26, 30, 34, 38, 42, 46, 50, 54, 0,
0, 0, 0, 0, 0, 0, 0, 0, 32, 66, 70, 74, 78, 82,
86, 90, 94, 0, 0, 0, 0, 0, 0, 0, 0, 0, 52, 106,
110, 114, 118, 122, 126, 130, 134, 0, 0, 0, 0, 0, 0, 0,
0, 0, 72, 146, 150, 154, 158, 162, 166, 170, 174, 0, 0, 0,
0, 0, 0, 0, 0, 0, 92, 186, 190, 194, 198, 202, 206, 210,
214, 0, 0, 0, 0, 0, 0, 0, 0, 0, 112, 226, 230, 234,
238, 242, 246, 250, 254, 0, 0, 0, 0, 0, 0, 0, 0, 0,
132, 266, 270, 274, 278, 282, 286, 290, 294, 0, 0, 0, 0, 0,
0, 0, 0, 0, 152, 306, 310, 314, 318, 322, 326, 330, 334, 0,
0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference105) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, 2, 1, -1},
1,
{2, 1},
{2, 1},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(18, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
231, 264, 299, 336, 375, 416, 459, 504, 551, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 651, 704,
759, 816, 875, 936, 999, 1064, 1131, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1271, 1344, 1419, 1496,
1575, 1656, 1739, 1824, 1911, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 2091, 2184, 2279, 2376, 2475, 2576,
2679, 2784, 2891, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 3111, 3224, 3339, 3456, 3575, 3696, 3819, 3944,
4071, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 4331, 4464, 4599, 4736, 4875, 5016, 5159, 5304, 5451, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5751,
5904, 6059, 6216, 6375, 6536, 6699, 6864, 7031, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 7371, 7544, 7719,
7896, 8075, 8256, 8439, 8624, 8811, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 91, 92, 93, 94, 95,
96, 97, 98, 99}));
}
TEST(ReferenceTest, RandomJaxReference106) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, 2, -2, 2},
1,
{1, 2},
{2, 1},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(7, 18));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9,
9, 10, 10, 1, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28,
28, 29, 29, 30, 30, 1, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47,
47, 48, 48, 49, 49, 50, 50, 1, 62, 63, 63, 64, 64, 65, 65, 66,
66, 67, 67, 68, 68, 69, 69, 70, 70, 1, 82, 83, 83, 84, 84, 85,
85, 86, 86, 87, 87, 88, 88, 89, 89, 90, 90, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference107) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, 1, 2, 0},
2147483646,
{1, 1},
{1, 2},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 11));
EXPECT_THAT(
res.data,
ElementsAreArray({2147483646, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
2147483646, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
2147483646, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
2147483646, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
2147483646, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
2147483646, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
2147483646, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
2147483646, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
2147483646, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
2147483646, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100}));
}
TEST(ReferenceTest, RandomJaxReference108) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, 2, -1},
-2147483647,
{1, 1},
{2, 1},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 20));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, 1, -2147483647, 2,
-2147483647, 3, -2147483647, 4, -2147483647,
5, -2147483647, 6, -2147483647, 7,
-2147483647, 8, -2147483647, 9, -2147483647,
-2147483647, -2147483647, 11, -2147483647, 12,
-2147483647, 13, -2147483647, 14, -2147483647,
15, -2147483647, 16, -2147483647, 17,
-2147483647, 18, -2147483647, 19, -2147483647,
-2147483647, -2147483647, 21, -2147483647, 22,
-2147483647, 23, -2147483647, 24, -2147483647,
25, -2147483647, 26, -2147483647, 27,
-2147483647, 28, -2147483647, 29, -2147483647,
-2147483647, -2147483647, 31, -2147483647, 32,
-2147483647, 33, -2147483647, 34, -2147483647,
35, -2147483647, 36, -2147483647, 37,
-2147483647, 38, -2147483647, 39, -2147483647,
-2147483647, -2147483647, 41, -2147483647, 42,
-2147483647, 43, -2147483647, 44, -2147483647,
45, -2147483647, 46, -2147483647, 47,
-2147483647, 48, -2147483647, 49, -2147483647,
-2147483647, -2147483647, 51, -2147483647, 52,
-2147483647, 53, -2147483647, 54, -2147483647,
55, -2147483647, 56, -2147483647, 57,
-2147483647, 58, -2147483647, 59, -2147483647,
-2147483647, -2147483647, 61, -2147483647, 62,
-2147483647, 63, -2147483647, 64, -2147483647,
65, -2147483647, 66, -2147483647, 67,
-2147483647, 68, -2147483647, 69, -2147483647,
-2147483647, -2147483647, 71, -2147483647, 72,
-2147483647, 73, -2147483647, 74, -2147483647,
75, -2147483647, 76, -2147483647, 77,
-2147483647, 78, -2147483647, 79, -2147483647,
-2147483647, -2147483647, 81, -2147483647, 82,
-2147483647, 83, -2147483647, 84, -2147483647,
85, -2147483647, 86, -2147483647, 87,
-2147483647, 88, -2147483647, 89, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference109) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, -2, 0, 0},
2147483646,
{2, 2},
{1, 1},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 5));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27,
29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55,
57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79}));
}
TEST(ReferenceTest, RandomJaxReference110) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-1, -1, 2, 0},
1,
{1, 2},
{1, 1},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(17, 20));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17,
17, 18, 18, 19, 19, 20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 21, 21, 22, 22, 23, 23, 24,
24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 31,
31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39,
40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46,
47, 47, 48, 48, 49, 49, 50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 51, 51, 52, 52, 53, 53,
54, 54, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59, 60, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
61, 61, 62, 62, 63, 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68, 69,
69, 70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 71, 71, 72, 72, 73, 73, 74, 74, 75, 75, 76,
76, 77, 77, 78, 78, 79, 79, 80, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 81, 81, 82, 82, 83,
83, 84, 84, 85, 85, 86, 86, 87, 87, 88, 88, 89, 89, 90, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference111) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-1, 0, 2, -1},
2147483646,
{2, 1},
{1, 2},
{1, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 11));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 2147483646, 11, 12, 13, 14, 15, 16, 17, 18, 19,
2147483646, 2147483646, 21, 22, 23, 24, 25, 26, 27, 28, 29,
2147483646, 2147483646, 31, 32, 33, 34, 35, 36, 37, 38, 39,
2147483646, 2147483646, 41, 42, 43, 44, 45, 46, 47, 48, 49,
2147483646, 2147483646, 51, 52, 53, 54, 55, 56, 57, 58, 59,
2147483646, 2147483646, 61, 62, 63, 64, 65, 66, 67, 68, 69,
2147483646, 2147483646, 71, 72, 73, 74, 75, 76, 77, 78, 79,
2147483646, 2147483646, 81, 82, 83, 84, 85, 86, 87, 88, 89}));
}
TEST(ReferenceTest, RandomJaxReference112) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, 1, 1, 2},
2147483646,
{2, 1},
{1, 2},
{1, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(20, 13));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 1, 2, 3, 4,
5, 6, 7, 8, 9,
10, 2147483646, 2147483646, 2147483646, 1,
2, 3, 4, 5, 6,
7, 8, 9, 10, 2147483646,
2147483646, 2147483646, 11, 12, 13,
14, 15, 16, 17, 18,
19, 20, 2147483646, 2147483646, 2147483646,
11, 12, 13, 14, 15,
16, 17, 18, 19, 20,
2147483646, 2147483646, 2147483646, 21, 22,
23, 24, 25, 26, 27,
28, 29, 30, 2147483646, 2147483646,
2147483646, 21, 22, 23, 24,
25, 26, 27, 28, 29,
30, 2147483646, 2147483646, 2147483646, 31,
32, 33, 34, 35, 36,
37, 38, 39, 40, 2147483646,
2147483646, 2147483646, 31, 32, 33,
34, 35, 36, 37, 38,
39, 40, 2147483646, 2147483646, 2147483646,
41, 42, 43, 44, 45,
46, 47, 48, 49, 50,
2147483646, 2147483646, 2147483646, 41, 42,
43, 44, 45, 46, 47,
48, 49, 50, 2147483646, 2147483646,
2147483646, 51, 52, 53, 54,
55, 56, 57, 58, 59,
60, 2147483646, 2147483646, 2147483646, 51,
52, 53, 54, 55, 56,
57, 58, 59, 60, 2147483646,
2147483646, 2147483646, 61, 62, 63,
64, 65, 66, 67, 68,
69, 70, 2147483646, 2147483646, 2147483646,
61, 62, 63, 64, 65,
66, 67, 68, 69, 70,
2147483646, 2147483646, 2147483646, 71, 72,
73, 74, 75, 76, 77,
78, 79, 80, 2147483646, 2147483646,
2147483646, 71, 72, 73, 74,
75, 76, 77, 78, 79,
80, 2147483646, 2147483646, 2147483646, 81,
82, 83, 84, 85, 86,
87, 88, 89, 90, 2147483646,
2147483646, 2147483646, 81, 82, 83,
84, 85, 86, 87, 88,
89, 90, 2147483646, 2147483646, 2147483646,
91, 92, 93, 94, 95,
96, 97, 98, 99, 100,
2147483646, 2147483646, 2147483646, 91, 92,
93, 94, 95, 96, 97,
98, 99, 100, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference113) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{2, -2, 1, 0},
-2147483647,
{1, 2},
{2, 1},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(5, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
21, 22, 23, 24, 25,
26, 27, 28, 29, 30,
41, 42, 43, 44, 45,
46, 47, 48, 49, 50,
61, 62, 63, 64, 65,
66, 67, 68, 69, 70}));
}
TEST(ReferenceTest, RandomJaxReference114) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, -1, 1, 1},
0,
{1, 2},
{1, 2},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(8, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{12, 24, 26, 28, 30, 32, 34, 36, 38, 19, 22, 44, 46, 48,
50, 52, 54, 56, 58, 29, 32, 64, 66, 68, 70, 72, 74, 76,
78, 39, 42, 84, 86, 88, 90, 92, 94, 96, 98, 49, 52, 104,
106, 108, 110, 112, 114, 116, 118, 59, 62, 124, 126, 128, 130, 132,
134, 136, 138, 69, 72, 144, 146, 148, 150, 152, 154, 156, 158, 79,
82, 164, 166, 168, 170, 172, 174, 176, 178, 89}));
}
TEST(ReferenceTest, RandomJaxReference115) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, -2, -2, 1},
0,
{2, 2},
{2, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(7, 4));
EXPECT_THAT(res.data, ElementsAreArray({74, 82, 90, 98, 114, 122, 130,
138, 154, 162, 170, 178, 194, 202,
210, 218, 234, 242, 250, 258, 274,
282, 290, 298, 314, 322, 330, 338}));
}
TEST(ReferenceTest, RandomJaxReference116) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, -2, 1, 1},
-2147483647,
{2, 1},
{1, 2},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(16, 21));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, 1, -2147483647, 2, -2147483647,
3, -2147483647, 4, -2147483647, 5,
-2147483647, 6, -2147483647, 7, -2147483647,
8, -2147483647, 9, -2147483647, 10,
-2147483647, -2147483647, 11, -2147483647, 12,
-2147483647, 13, -2147483647, 14, -2147483647,
15, -2147483647, 16, -2147483647, 17,
-2147483647, 18, -2147483647, 19, -2147483647,
20, -2147483647, -2147483647, 11, -2147483647,
12, -2147483647, 13, -2147483647, 14,
-2147483647, 15, -2147483647, 16, -2147483647,
17, -2147483647, 18, -2147483647, 19,
-2147483647, 20, -2147483647, -2147483647, 21,
-2147483647, 22, -2147483647, 23, -2147483647,
24, -2147483647, 25, -2147483647, 26,
-2147483647, 27, -2147483647, 28, -2147483647,
29, -2147483647, 30, -2147483647, -2147483647,
21, -2147483647, 22, -2147483647, 23,
-2147483647, 24, -2147483647, 25, -2147483647,
26, -2147483647, 27, -2147483647, 28,
-2147483647, 29, -2147483647, 30, -2147483647,
-2147483647, 31, -2147483647, 32, -2147483647,
33, -2147483647, 34, -2147483647, 35,
-2147483647, 36, -2147483647, 37, -2147483647,
38, -2147483647, 39, -2147483647, 40,
-2147483647, -2147483647, 31, -2147483647, 32,
-2147483647, 33, -2147483647, 34, -2147483647,
35, -2147483647, 36, -2147483647, 37,
-2147483647, 38, -2147483647, 39, -2147483647,
40, -2147483647, -2147483647, 41, -2147483647,
42, -2147483647, 43, -2147483647, 44,
-2147483647, 45, -2147483647, 46, -2147483647,
47, -2147483647, 48, -2147483647, 49,
-2147483647, 50, -2147483647, -2147483647, 41,
-2147483647, 42, -2147483647, 43, -2147483647,
44, -2147483647, 45, -2147483647, 46,
-2147483647, 47, -2147483647, 48, -2147483647,
49, -2147483647, 50, -2147483647, -2147483647,
51, -2147483647, 52, -2147483647, 53,
-2147483647, 54, -2147483647, 55, -2147483647,
56, -2147483647, 57, -2147483647, 58,
-2147483647, 59, -2147483647, 60, -2147483647,
-2147483647, 51, -2147483647, 52, -2147483647,
53, -2147483647, 54, -2147483647, 55,
-2147483647, 56, -2147483647, 57, -2147483647,
58, -2147483647, 59, -2147483647, 60,
-2147483647, -2147483647, 61, -2147483647, 62,
-2147483647, 63, -2147483647, 64, -2147483647,
65, -2147483647, 66, -2147483647, 67,
-2147483647, 68, -2147483647, 69, -2147483647,
70, -2147483647, -2147483647, 61, -2147483647,
62, -2147483647, 63, -2147483647, 64,
-2147483647, 65, -2147483647, 66, -2147483647,
67, -2147483647, 68, -2147483647, 69,
-2147483647, 70, -2147483647, -2147483647, 71,
-2147483647, 72, -2147483647, 73, -2147483647,
74, -2147483647, 75, -2147483647, 76,
-2147483647, 77, -2147483647, 78, -2147483647,
79, -2147483647, 80, -2147483647, -2147483647,
71, -2147483647, 72, -2147483647, 73,
-2147483647, 74, -2147483647, 75, -2147483647,
76, -2147483647, 77, -2147483647, 78,
-2147483647, 79, -2147483647, 80, -2147483647,
-2147483647, 81, -2147483647, 82, -2147483647,
83, -2147483647, 84, -2147483647, 85,
-2147483647, 86, -2147483647, 87, -2147483647,
88, -2147483647, 89, -2147483647, 90,
-2147483647}));
}
TEST(ReferenceTest, RandomJaxReference117) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, -2, -1, 0},
1,
{1, 2},
{2, 1},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(8, 8));
EXPECT_THAT(
res.data,
ElementsAreArray(
{156, 182, 210, 240, 272, 306, 342, 380, 506, 552, 600,
650, 702, 756, 812, 870, 1056, 1122, 1190, 1260, 1332, 1406,
1482, 1560, 1806, 1892, 1980, 2070, 2162, 2256, 2352, 2450, 2756,
2862, 2970, 3080, 3192, 3306, 3422, 3540, 3906, 4032, 4160, 4290,
4422, 4556, 4692, 4830, 5256, 5402, 5550, 5700, 5852, 6006, 6162,
6320, 6806, 6972, 7140, 7310, 7482, 7656, 7832, 8010}));
}
TEST(ReferenceTest, RandomJaxReference118) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-2, -1, -2, 1},
0,
{1, 2},
{2, 2},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(8, 8));
EXPECT_THAT(
res.data,
ElementsAreArray({25, 27, 29, 31, 33, 35, 37, 39, 45, 47, 49,
51, 53, 55, 57, 59, 65, 67, 69, 71, 73, 75,
77, 79, 85, 87, 89, 91, 93, 95, 97, 99, 105,
107, 109, 111, 113, 115, 117, 119, 125, 127, 129, 131,
133, 135, 137, 139, 145, 147, 149, 151, 153, 155, 157,
159, 165, 167, 169, 171, 173, 175, 177, 179}));
}
TEST(ReferenceTest, RandomJaxReference119) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 0, 1, 2},
1,
{2, 1},
{2, 2},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(6, 22));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 861, 1, 924, 1, 989, 1, 1056, 1, 1125, 1, 1196,
1, 1269, 1, 1344, 1, 1421, 1, 1500, 1, 1, 1, 1581,
1, 1664, 1, 1749, 1, 1836, 1, 1925, 1, 2016, 1, 2109,
1, 2204, 1, 2301, 1, 2400, 1, 1, 1, 2501, 1, 2604,
1, 2709, 1, 2816, 1, 2925, 1, 3036, 1, 3149, 1, 3264,
1, 3381, 1, 3500, 1, 1, 1, 3621, 1, 3744, 1, 3869,
1, 3996, 1, 4125, 1, 4256, 1, 4389, 1, 4524, 1, 4661,
1, 4800, 1, 1, 1, 4941, 1, 5084, 1, 5229, 1, 5376,
1, 5525, 1, 5676, 1, 5829, 1, 5984, 1, 6141, 1, 6300,
1, 1, 1, 6461, 1, 6624, 1, 6789, 1, 6956, 1, 7125,
1, 7296, 1, 7469, 1, 7644, 1, 7821, 1, 8000, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference120) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-2, 1, 2, 0},
2147483646,
{2, 1},
{1, 1},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 21));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 11, 2147483646, 12,
2147483646, 13, 2147483646, 14, 2147483646,
15, 2147483646, 16, 2147483646, 17,
2147483646, 18, 2147483646, 19, 2147483646,
20, 2147483646, 2147483646, 21, 2147483646,
22, 2147483646, 23, 2147483646, 24,
2147483646, 25, 2147483646, 26, 2147483646,
27, 2147483646, 28, 2147483646, 29,
2147483646, 30, 2147483646, 2147483646, 31,
2147483646, 32, 2147483646, 33, 2147483646,
34, 2147483646, 35, 2147483646, 36,
2147483646, 37, 2147483646, 38, 2147483646,
39, 2147483646, 40, 2147483646, 2147483646,
41, 2147483646, 42, 2147483646, 43,
2147483646, 44, 2147483646, 45, 2147483646,
46, 2147483646, 47, 2147483646, 48,
2147483646, 49, 2147483646, 50, 2147483646,
2147483646, 51, 2147483646, 52, 2147483646,
53, 2147483646, 54, 2147483646, 55,
2147483646, 56, 2147483646, 57, 2147483646,
58, 2147483646, 59, 2147483646, 60,
2147483646, 2147483646, 61, 2147483646, 62,
2147483646, 63, 2147483646, 64, 2147483646,
65, 2147483646, 66, 2147483646, 67,
2147483646, 68, 2147483646, 69, 2147483646,
70, 2147483646, 2147483646, 71, 2147483646,
72, 2147483646, 73, 2147483646, 74,
2147483646, 75, 2147483646, 76, 2147483646,
77, 2147483646, 78, 2147483646, 79,
2147483646, 80, 2147483646, 2147483646, 81,
2147483646, 82, 2147483646, 83, 2147483646,
84, 2147483646, 85, 2147483646, 86,
2147483646, 87, 2147483646, 88, 2147483646,
89, 2147483646, 90, 2147483646, 2147483646,
91, 2147483646, 92, 2147483646, 93,
2147483646, 94, 2147483646, 95, 2147483646,
96, 2147483646, 97, 2147483646, 98,
2147483646, 99, 2147483646, 100}));
}
TEST(ReferenceTest, RandomJaxReference121) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, 2, -1, 1},
-2147483647,
{2, 2},
{1, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(20, 9));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference122) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, 2, -1, 1},
-2147483647,
{2, 1},
{2, 1},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(5, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference123) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, -2, 0, 2},
0,
{1, 2},
{1, 1},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(6, 6));
EXPECT_THAT(res.data,
ElementsAreArray({43, 47, 51, 55, 59, 0, 63, 67, 71,
75, 79, 0, 83, 87, 91, 95, 99, 0,
103, 107, 111, 115, 119, 0, 123, 127, 131,
135, 139, 0, 143, 147, 151, 155, 159, 0}));
}
TEST(ReferenceTest, RandomJaxReference124) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 2, -2, 0},
1,
{2, 1},
{2, 1},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(10, 4));
EXPECT_THAT(res.data,
ElementsAreArray({69, 125, 189, 261, 429, 525, 629, 741,
989, 1125, 1269, 1421, 1749, 1925, 2109, 2301,
2709, 2925, 3149, 3381, 3869, 4125, 4389, 4661,
5229, 5525, 5829, 6141, 6789, 7125, 7469, 7821,
83, 85, 87, 89, 93, 95, 97, 99}));
}
TEST(ReferenceTest, RandomJaxReference125) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-1, -1, 2, 1},
0,
{1, 2},
{2, 1},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(9, 21));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference126) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 1, 0, 0},
-2147483647,
{1, 1},
{1, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(20, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 3, 5, 7, 9,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
11, 13, 15, 17, 19,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
21, 23, 25, 27, 29,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
31, 33, 35, 37, 39,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
41, 43, 45, 47, 49,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
51, 53, 55, 57, 59,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
61, 63, 65, 67, 69,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
71, 73, 75, 77, 79,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
81, 83, 85, 87, 89,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
91, 93, 95, 97, 99,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference127) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, -2, 0, -2},
0,
{2, 1},
{2, 1},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(16, 4));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 12, 16, 20, 24, 0, 0, 0, 0, 32, 36, 40, 44,
0, 0, 0, 0, 52, 56, 60, 64, 0, 0, 0, 0, 72, 76, 80, 84,
0, 0, 0, 0, 92, 96, 100, 104, 0, 0, 0, 0, 112, 116, 120, 124,
0, 0, 0, 0, 132, 136, 140, 144, 0, 0, 0, 0, 152, 156, 160, 164}));
}
TEST(ReferenceTest, RandomJaxReference128) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-1, -2, 0, -2},
2147483646,
{1, 2},
{2, 2},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(7, 3));
EXPECT_THAT(res.data,
ElementsAreArray({11, 13, 15, 21, 23, 25, 31, 33, 35, 41, 43,
45, 51, 53, 55, 61, 63, 65, 71, 73, 75}));
}
TEST(ReferenceTest, RandomJaxReference129) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{1, 2, -1, 2},
1,
{2, 2},
{1, 2},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(12, 9));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference130) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, 1, 1, 1},
1,
{1, 1},
{2, 2},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(19, 6));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 1, 1, 1, 12, 14, 16, 18, 20, 1, 1, 1, 1, 1,
1, 1, 22, 24, 26, 28, 30, 1, 1, 1, 1, 1, 1, 1, 32, 34, 36,
38, 40, 1, 1, 1, 1, 1, 1, 1, 42, 44, 46, 48, 50, 1, 1, 1,
1, 1, 1, 1, 52, 54, 56, 58, 60, 1, 1, 1, 1, 1, 1, 1, 62,
64, 66, 68, 70, 1, 1, 1, 1, 1, 1, 1, 72, 74, 76, 78, 80, 1,
1, 1, 1, 1, 1, 1, 82, 84, 86, 88, 90, 1, 1, 1, 1, 1, 1,
1, 92, 94, 96, 98, 100, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference131) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{1, -1, -2, -1},
-2147483647,
{2, 1},
{1, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 16));
EXPECT_THAT(
res.data,
ElementsAreArray({2, -2147483647, 3, -2147483647, 4, -2147483647,
5, -2147483647, 6, -2147483647, 7, -2147483647,
8, -2147483647, 9, -2147483647, 12, -2147483647,
13, -2147483647, 14, -2147483647, 15, -2147483647,
16, -2147483647, 17, -2147483647, 18, -2147483647,
19, -2147483647, 22, -2147483647, 23, -2147483647,
24, -2147483647, 25, -2147483647, 26, -2147483647,
27, -2147483647, 28, -2147483647, 29, -2147483647,
32, -2147483647, 33, -2147483647, 34, -2147483647,
35, -2147483647, 36, -2147483647, 37, -2147483647,
38, -2147483647, 39, -2147483647, 42, -2147483647,
43, -2147483647, 44, -2147483647, 45, -2147483647,
46, -2147483647, 47, -2147483647, 48, -2147483647,
49, -2147483647, 52, -2147483647, 53, -2147483647,
54, -2147483647, 55, -2147483647, 56, -2147483647,
57, -2147483647, 58, -2147483647, 59, -2147483647,
62, -2147483647, 63, -2147483647, 64, -2147483647,
65, -2147483647, 66, -2147483647, 67, -2147483647,
68, -2147483647, 69, -2147483647, 72, -2147483647,
73, -2147483647, 74, -2147483647, 75, -2147483647,
76, -2147483647, 77, -2147483647, 78, -2147483647,
79, -2147483647, 82, -2147483647, 83, -2147483647,
84, -2147483647, 85, -2147483647, 86, -2147483647,
87, -2147483647, 88, -2147483647, 89, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference132) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 2, 2, -1},
-2147483647,
{2, 2},
{2, 1},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 10));
EXPECT_THAT(res.data, ElementsAreArray(
{-2147483647, 11, 12, 13, 14, 15, 16, 17, 18, 19,
-2147483647, 21, 22, 23, 24, 25, 26, 27, 28, 29,
-2147483647, 31, 32, 33, 34, 35, 36, 37, 38, 39,
-2147483647, 41, 42, 43, 44, 45, 46, 47, 48, 49,
-2147483647, 51, 52, 53, 54, 55, 56, 57, 58, 59,
-2147483647, 61, 62, 63, 64, 65, 66, 67, 68, 69,
-2147483647, 71, 72, 73, 74, 75, 76, 77, 78, 79,
-2147483647, 81, 82, 83, 84, 85, 86, 87, 88, 89,
-2147483647, 91, 92, 93, 94, 95, 96, 97, 98, 99,
-2147483647, 91, 92, 93, 94, 95, 96, 97, 98, 99}));
}
TEST(ReferenceTest, RandomJaxReference133) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 2, 1, -1},
2147483646,
{2, 2},
{2, 2},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 4));
EXPECT_THAT(
res.data,
ElementsAreArray({2, 2, 4, 6, 12, 12, 14, 16, 22, 22, 24, 26, 32, 32,
34, 36, 42, 42, 44, 46, 52, 52, 54, 56, 62, 62, 64, 66,
72, 72, 74, 76, 82, 82, 84, 86, 92, 92, 94, 96}));
}
TEST(ReferenceTest, RandomJaxReference134) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 2, 2, 1},
-2147483647,
{2, 1},
{1, 1},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(5, 22));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, 31, -2147483647, 32,
-2147483647, 33, -2147483647, 34, -2147483647,
35, -2147483647, 36, -2147483647, 37,
-2147483647, 38, -2147483647, 39, -2147483647,
40, -2147483647, -2147483647, -2147483647, 51,
-2147483647, 52, -2147483647, 53, -2147483647,
54, -2147483647, 55, -2147483647, 56,
-2147483647, 57, -2147483647, 58, -2147483647,
59, -2147483647, 60, -2147483647, -2147483647,
-2147483647, 71, -2147483647, 72, -2147483647,
73, -2147483647, 74, -2147483647, 75,
-2147483647, 76, -2147483647, 77, -2147483647,
78, -2147483647, 79, -2147483647, 80,
-2147483647, -2147483647, -2147483647, 91, -2147483647,
92, -2147483647, 93, -2147483647, 94,
-2147483647, 95, -2147483647, 96, -2147483647,
97, -2147483647, 98, -2147483647, 99,
-2147483647, 100, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference135) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, 0, 0, 2},
2147483646,
{1, 1},
{2, 1},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 12));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference136) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, 1, 0, 0},
2147483646,
{1, 2},
{2, 1},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 9));
EXPECT_THAT(res.data,
ElementsAreArray(
{1, 2, 3, 4, 5,
6, 7, 8, 9, 11,
12, 13, 14, 15, 16,
17, 18, 19, 21, 22,
23, 24, 25, 26, 27,
28, 29, 31, 32, 33,
34, 35, 36, 37, 38,
39, 41, 42, 43, 44,
45, 46, 47, 48, 49,
51, 52, 53, 54, 55,
56, 57, 58, 59, 61,
62, 63, 64, 65, 66,
67, 68, 69, 71, 72,
73, 74, 75, 76, 77,
78, 79, 81, 82, 83,
84, 85, 86, 87, 88,
89, 91, 92, 93, 94,
95, 96, 97, 98, 99,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference137) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, -1, 2, -1},
2147483646,
{2, 2},
{2, 2},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(4, 5));
EXPECT_THAT(res.data,
ElementsAreArray({1, 1, 3, 5, 7, 21, 21, 23, 25, 27,
41, 41, 43, 45, 47, 61, 61, 63, 65, 67}));
}
TEST(ReferenceTest, RandomJaxReference138) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, -1, 1, 2},
-2147483647,
{1, 1},
{2, 2},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(18, 22));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, 1, -2147483647, 2, -2147483647,
3, -2147483647, 4, -2147483647, 5,
-2147483647, 6, -2147483647, 7, -2147483647,
8, -2147483647, 9, -2147483647, 10,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
11, -2147483647, 12, -2147483647, 13,
-2147483647, 14, -2147483647, 15, -2147483647,
16, -2147483647, 17, -2147483647, 18,
-2147483647, 19, -2147483647, 20, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 21,
-2147483647, 22, -2147483647, 23, -2147483647,
24, -2147483647, 25, -2147483647, 26,
-2147483647, 27, -2147483647, 28, -2147483647,
29, -2147483647, 30, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, 31, -2147483647,
32, -2147483647, 33, -2147483647, 34,
-2147483647, 35, -2147483647, 36, -2147483647,
37, -2147483647, 38, -2147483647, 39,
-2147483647, 40, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, 41, -2147483647, 42,
-2147483647, 43, -2147483647, 44, -2147483647,
45, -2147483647, 46, -2147483647, 47,
-2147483647, 48, -2147483647, 49, -2147483647,
50, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 51, -2147483647, 52, -2147483647,
53, -2147483647, 54, -2147483647, 55,
-2147483647, 56, -2147483647, 57, -2147483647,
58, -2147483647, 59, -2147483647, 60,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
61, -2147483647, 62, -2147483647, 63,
-2147483647, 64, -2147483647, 65, -2147483647,
66, -2147483647, 67, -2147483647, 68,
-2147483647, 69, -2147483647, 70, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 71,
-2147483647, 72, -2147483647, 73, -2147483647,
74, -2147483647, 75, -2147483647, 76,
-2147483647, 77, -2147483647, 78, -2147483647,
79, -2147483647, 80, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, 81, -2147483647,
82, -2147483647, 83, -2147483647, 84,
-2147483647, 85, -2147483647, 86, -2147483647,
87, -2147483647, 88, -2147483647, 89,
-2147483647, 90, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647}));
}
TEST(ReferenceTest, RandomJaxReference139) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{1, 2, 0, 2},
1,
{2, 2},
{2, 2},
{2, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(6, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{132, 156, 182, 210, 240, 272, 306,
342, 380, 20, 130944, 164736, 204204, 249900,
302400, 362304, 430236, 506844, 592800, 800, 2630784,
2910336, 3211164, 3534300, 3880800, 4251744, 4648236, 5071404,
5522400, 2400, 13557024, 14485536, 15460524, 16483500, 17556000,
18679584, 19855836, 21086364, 22372800, 4800, 42797664, 44970336,
47224284, 49561500, 51984000, 54493824, 57093036, 59783724, 62568000,
8000, 8372, 8556, 8742, 8930, 9120, 9312,
9506, 9702, 9900, 100}));
}
TEST(ReferenceTest, RandomJaxReference140) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-2, 0, -1, -2},
1,
{2, 1},
{2, 1},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(15, 8));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference141) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, -2, 1, 1},
-2147483647,
{1, 1},
{2, 1},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(3, 6));
EXPECT_THAT(res.data, ElementsAreArray({-2147483647, 22, 24, 26, 28, 30,
-2147483647, 42, 44, 46, 48, 50,
-2147483647, 62, 64, 66, 68, 70}));
}
TEST(ReferenceTest, RandomJaxReference142) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, 0, 0, -1},
1,
{2, 1},
{2, 2},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(18, 9));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 1, 1, 1, 1, 1, 11, 24,
39, 56, 75, 96, 119, 144, 171, 1, 1, 1, 1,
1, 1, 1, 1, 1, 231, 264, 299, 336, 375, 416,
459, 504, 551, 1, 1, 1, 1, 1, 1, 1, 1,
1, 651, 704, 759, 816, 875, 936, 999, 1064, 1131, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1271, 1344, 1419,
1496, 1575, 1656, 1739, 1824, 1911, 1, 1, 1, 1, 1,
1, 1, 1, 1, 2091, 2184, 2279, 2376, 2475, 2576, 2679,
2784, 2891, 1, 1, 1, 1, 1, 1, 1, 1, 1,
3111, 3224, 3339, 3456, 3575, 3696, 3819, 3944, 4071, 1, 1,
1, 1, 1, 1, 1, 1, 1, 4331, 4464, 4599, 4736,
4875, 5016, 5159, 5304, 5451, 1, 1, 1, 1, 1, 1,
1, 1, 1, 5751, 5904, 6059, 6216, 6375, 6536, 6699, 6864,
7031, 1, 1, 1, 1, 1, 1, 1, 1, 1, 7371,
7544, 7719, 7896, 8075, 8256, 8439, 8624, 8811}));
}
TEST(ReferenceTest, RandomJaxReference143) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, -2, -1},
-2147483647,
{2, 1},
{2, 2},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(5, 8));
EXPECT_THAT(
res.data,
ElementsAreArray({2, 3, 4, 5, 6, 7, 8, 9, 22, 23, 24, 25, 26, 27,
28, 29, 42, 43, 44, 45, 46, 47, 48, 49, 62, 63, 64, 65,
66, 67, 68, 69, 82, 83, 84, 85, 86, 87, 88, 89}));
}
TEST(ReferenceTest, RandomJaxReference144) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, 1, 2, 2},
2147483646,
{1, 1},
{1, 1},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(6, 23));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 1, 2147483646, 2,
2147483646, 3, 2147483646, 4, 2147483646,
5, 2147483646, 6, 2147483646, 7,
2147483646, 8, 2147483646, 9, 2147483646,
10, 2147483646, 2147483646, 2147483646, 2147483646,
21, 2147483646, 22, 2147483646, 23,
2147483646, 24, 2147483646, 25, 2147483646,
26, 2147483646, 27, 2147483646, 28,
2147483646, 29, 2147483646, 30, 2147483646,
2147483646, 2147483646, 2147483646, 41, 2147483646,
42, 2147483646, 43, 2147483646, 44,
2147483646, 45, 2147483646, 46, 2147483646,
47, 2147483646, 48, 2147483646, 49,
2147483646, 50, 2147483646, 2147483646, 2147483646,
2147483646, 61, 2147483646, 62, 2147483646,
63, 2147483646, 64, 2147483646, 65,
2147483646, 66, 2147483646, 67, 2147483646,
68, 2147483646, 69, 2147483646, 70,
2147483646, 2147483646, 2147483646, 2147483646, 81,
2147483646, 82, 2147483646, 83, 2147483646,
84, 2147483646, 85, 2147483646, 86,
2147483646, 87, 2147483646, 88, 2147483646,
89, 2147483646, 90, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference145) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{2, -2, 2, -2},
1,
{1, 2},
{2, 1},
{2, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(10, 5));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 2, 12, 30, 56,
1, 132, 182, 240, 306, 1, 462, 552, 650, 756,
1, 992, 1122, 1260, 1406, 1, 1722, 1892, 2070, 2256,
1, 2652, 2862, 3080, 3306, 1, 3782, 4032, 4290, 4556,
1, 5112, 5402, 5700, 6006, 1, 6642, 6972, 7310, 7656}));
}
TEST(ReferenceTest, RandomJaxReference146) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, -2, 1, 0},
2147483646,
{2, 1},
{1, 2},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(17, 6));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 2, 4, 6, 8, 10, 2147483646, 2, 4, 6, 8, 10,
2147483646, 12, 14, 16, 18, 20, 2147483646, 12, 14, 16, 18, 20,
2147483646, 22, 24, 26, 28, 30, 2147483646, 22, 24, 26, 28, 30,
2147483646, 32, 34, 36, 38, 40, 2147483646, 32, 34, 36, 38, 40,
2147483646, 42, 44, 46, 48, 50, 2147483646, 42, 44, 46, 48, 50,
2147483646, 52, 54, 56, 58, 60, 2147483646, 52, 54, 56, 58, 60,
2147483646, 62, 64, 66, 68, 70, 2147483646, 62, 64, 66, 68, 70,
2147483646, 72, 74, 76, 78, 80, 2147483646, 72, 74, 76, 78, 80,
2147483646, 82, 84, 86, 88, 90}));
}
TEST(ReferenceTest, RandomJaxReference147) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, 0, 2, 0},
0,
{2, 2},
{1, 2},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(8, 5));
EXPECT_THAT(res.data, ElementsAreArray(
{11, 24, 28, 32, 36, 21, 44, 48, 52, 56,
31, 64, 68, 72, 76, 41, 84, 88, 92, 96,
51, 104, 108, 112, 116, 61, 124, 128, 132, 136,
71, 144, 148, 152, 156, 81, 164, 168, 172, 176}));
}
TEST(ReferenceTest, RandomJaxReference148) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{1, -2, 2, 1},
1,
{2, 1},
{1, 1},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(17, 22));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 2, 1, 3, 1, 4, 1, 5, 1, 6, 1, 7, 1, 8, 1, 9, 1,
10, 1, 1, 1, 1, 1, 2, 1, 3, 1, 4, 1, 5, 1, 6, 1, 7, 1, 8, 1,
9, 1, 10, 1, 1, 1, 11, 1, 12, 1, 13, 1, 14, 1, 15, 1, 16, 1, 17, 1,
18, 1, 19, 1, 20, 1, 1, 1, 11, 1, 12, 1, 13, 1, 14, 1, 15, 1, 16, 1,
17, 1, 18, 1, 19, 1, 20, 1, 1, 1, 21, 1, 22, 1, 23, 1, 24, 1, 25, 1,
26, 1, 27, 1, 28, 1, 29, 1, 30, 1, 1, 1, 21, 1, 22, 1, 23, 1, 24, 1,
25, 1, 26, 1, 27, 1, 28, 1, 29, 1, 30, 1, 1, 1, 31, 1, 32, 1, 33, 1,
34, 1, 35, 1, 36, 1, 37, 1, 38, 1, 39, 1, 40, 1, 1, 1, 31, 1, 32, 1,
33, 1, 34, 1, 35, 1, 36, 1, 37, 1, 38, 1, 39, 1, 40, 1, 1, 1, 41, 1,
42, 1, 43, 1, 44, 1, 45, 1, 46, 1, 47, 1, 48, 1, 49, 1, 50, 1, 1, 1,
41, 1, 42, 1, 43, 1, 44, 1, 45, 1, 46, 1, 47, 1, 48, 1, 49, 1, 50, 1,
1, 1, 51, 1, 52, 1, 53, 1, 54, 1, 55, 1, 56, 1, 57, 1, 58, 1, 59, 1,
60, 1, 1, 1, 51, 1, 52, 1, 53, 1, 54, 1, 55, 1, 56, 1, 57, 1, 58, 1,
59, 1, 60, 1, 1, 1, 61, 1, 62, 1, 63, 1, 64, 1, 65, 1, 66, 1, 67, 1,
68, 1, 69, 1, 70, 1, 1, 1, 61, 1, 62, 1, 63, 1, 64, 1, 65, 1, 66, 1,
67, 1, 68, 1, 69, 1, 70, 1, 1, 1, 71, 1, 72, 1, 73, 1, 74, 1, 75, 1,
76, 1, 77, 1, 78, 1, 79, 1, 80, 1, 1, 1, 71, 1, 72, 1, 73, 1, 74, 1,
75, 1, 76, 1, 77, 1, 78, 1, 79, 1, 80, 1, 1, 1, 81, 1, 82, 1, 83, 1,
84, 1, 85, 1, 86, 1, 87, 1, 88, 1, 89, 1, 90, 1}));
}
TEST(ReferenceTest, RandomJaxReference149) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-1, -2, -2, 2},
-2147483647,
{2, 1},
{1, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(6, 5));
EXPECT_THAT(res.data,
ElementsAreArray(
{23, 25, 27, 29, -2147483647, 33, 35, 37, 39, -2147483647,
43, 45, 47, 49, -2147483647, 53, 55, 57, 59, -2147483647,
63, 65, 67, 69, -2147483647, 73, 75, 77, 79, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference150) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, -2, 0},
0,
{1, 1},
{2, 2},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(6, 17));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 8,
0, 9, 0, 10, 22, 0, 23, 0, 24, 0, 25, 0, 26, 0, 27,
0, 28, 0, 29, 0, 30, 42, 0, 43, 0, 44, 0, 45, 0, 46,
0, 47, 0, 48, 0, 49, 0, 50, 62, 0, 63, 0, 64, 0, 65,
0, 66, 0, 67, 0, 68, 0, 69, 0, 70, 82, 0, 83, 0, 84,
0, 85, 0, 86, 0, 87, 0, 88, 0, 89, 0, 90}));
}
TEST(ReferenceTest, RandomJaxReference151) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{2, 1, 2, -1},
1,
{2, 2},
{2, 1},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(10, 19));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5,
6, 6, 7, 7, 8, 8, 9, 9, 1, 11, 11,
24, 24, 39, 39, 56, 56, 75, 75, 96, 96, 119,
119, 144, 144, 171, 171, 1, 231, 231, 264, 264, 299,
299, 336, 336, 375, 375, 416, 416, 459, 459, 504, 504,
551, 551, 1, 651, 651, 704, 704, 759, 759, 816, 816,
875, 875, 936, 936, 999, 999, 1064, 1064, 1131, 1131, 1,
1271, 1271, 1344, 1344, 1419, 1419, 1496, 1496, 1575, 1575, 1656,
1656, 1739, 1739, 1824, 1824, 1911, 1911, 1, 2091, 2091, 2184,
2184, 2279, 2279, 2376, 2376, 2475, 2475, 2576, 2576, 2679, 2679,
2784, 2784, 2891, 2891, 1, 3111, 3111, 3224, 3224, 3339, 3339,
3456, 3456, 3575, 3575, 3696, 3696, 3819, 3819, 3944, 3944, 4071,
4071, 1, 4331, 4331, 4464, 4464, 4599, 4599, 4736, 4736, 4875,
4875, 5016, 5016, 5159, 5159, 5304, 5304, 5451, 5451, 1, 5751,
5751, 5904, 5904, 6059, 6059, 6216, 6216, 6375, 6375, 6536, 6536,
6699, 6699, 6864, 6864, 7031, 7031, 1, 7371, 7371, 7544, 7544,
7719, 7719, 7896, 7896, 8075, 8075, 8256, 8256, 8439, 8439, 8624,
8624, 8811, 8811}));
}
TEST(ReferenceTest, RandomJaxReference152) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, 2, -2, 1},
1,
{2, 2},
{2, 2},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(9, 7));
EXPECT_THAT(res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference153) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, 2, -1, 2},
1,
{2, 2},
{2, 2},
{2, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(9, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{88704, 139776, 209664, 302400, 600, 574464, 763776,
995904, 1276800, 1200, 2010624, 2477376, 3020544, 3648000,
2000, 5189184, 6120576, 7171584, 8352000, 3000, 11142144,
12773376, 14577024, 16564800, 4200, 21141504, 23755776, 26604864,
29702400, 5600, 36699264, 40627776, 44863104, 49420800, 7200,
59567424, 65189376, 71199744, 77616000, 9000, 8648, 9024,
9408, 9800, 100}));
}
TEST(ReferenceTest, RandomJaxReference154) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, 2, -1, 0},
-2147483647,
{1, 1},
{2, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(7, 18));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 2,
-2147483647, 3, -2147483647, 4, -2147483647,
5, -2147483647, 6, -2147483647, 7,
-2147483647, 8, -2147483647, 9, -2147483647,
10, -2147483647, 22, -2147483647, 23,
-2147483647, 24, -2147483647, 25, -2147483647,
26, -2147483647, 27, -2147483647, 28,
-2147483647, 29, -2147483647, 30, -2147483647,
42, -2147483647, 43, -2147483647, 44,
-2147483647, 45, -2147483647, 46, -2147483647,
47, -2147483647, 48, -2147483647, 49,
-2147483647, 50, -2147483647, 62, -2147483647,
63, -2147483647, 64, -2147483647, 65,
-2147483647, 66, -2147483647, 67, -2147483647,
68, -2147483647, 69, -2147483647, 70,
-2147483647, 82, -2147483647, 83, -2147483647,
84, -2147483647, 85, -2147483647, 86,
-2147483647, 87, -2147483647, 88, -2147483647,
89, -2147483647, 90, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647}));
}
TEST(ReferenceTest, RandomJaxReference155) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 2, 2, 1},
0,
{1, 2},
{1, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(11, 6));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 3, 7, 11, 15, 19, 0, 23, 27, 31, 35,
39, 0, 43, 47, 51, 55, 59, 0, 63, 67, 71,
75, 79, 0, 83, 87, 91, 95, 99, 0, 103, 107,
111, 115, 119, 0, 123, 127, 131, 135, 139, 0, 143,
147, 151, 155, 159, 0, 163, 167, 171, 175, 179, 0,
183, 187, 191, 195, 199, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference156) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{2, -1, -1, -1},
-2147483647,
{1, 2},
{1, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 3));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 4, 6, 8, 14,
16, 18, 24, 26, 28,
34, 36, 38, 44, 46,
48, 54, 56, 58, 64,
66, 68, 74, 76, 78,
84, 86, 88}));
}
TEST(ReferenceTest, RandomJaxReference157) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, -1, -2, -1},
-2147483647,
{2, 1},
{1, 2},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(16, 7));
EXPECT_THAT(
res.data,
ElementsAreArray(
{13, 14, 15, 16, 17, 18, 19, 13, 14, 15, 16, 17, 18, 19, 23, 24,
25, 26, 27, 28, 29, 23, 24, 25, 26, 27, 28, 29, 33, 34, 35, 36,
37, 38, 39, 33, 34, 35, 36, 37, 38, 39, 43, 44, 45, 46, 47, 48,
49, 43, 44, 45, 46, 47, 48, 49, 53, 54, 55, 56, 57, 58, 59, 53,
54, 55, 56, 57, 58, 59, 63, 64, 65, 66, 67, 68, 69, 63, 64, 65,
66, 67, 68, 69, 73, 74, 75, 76, 77, 78, 79, 73, 74, 75, 76, 77,
78, 79, 83, 84, 85, 86, 87, 88, 89, 83, 84, 85, 86, 87, 88, 89}));
}
TEST(ReferenceTest, RandomJaxReference158) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{2, -2, 2, 0},
0,
{2, 1},
{1, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(9, 6));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 1, 3, 5, 7, 9, 0, 11,
13, 15, 17, 19, 0, 21, 23, 25, 27, 29, 0, 31, 33, 35,
37, 39, 0, 41, 43, 45, 47, 49, 0, 51, 53, 55, 57, 59,
0, 61, 63, 65, 67, 69, 0, 71, 73, 75, 77, 79}));
}
TEST(ReferenceTest, RandomJaxReference159) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, 2, -1, 1},
2147483646,
{1, 2},
{2, 1},
{1, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(13, 9));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2,
3, 4, 5, 6, 7,
8, 9, 10, 12, 13,
14, 15, 16, 17, 18,
19, 20, 22, 23, 24,
25, 26, 27, 28, 29,
30, 32, 33, 34, 35,
36, 37, 38, 39, 40,
42, 43, 44, 45, 46,
47, 48, 49, 50, 52,
53, 54, 55, 56, 57,
58, 59, 60, 62, 63,
64, 65, 66, 67, 68,
69, 70, 72, 73, 74,
75, 76, 77, 78, 79,
80, 82, 83, 84, 85,
86, 87, 88, 89, 90,
92, 93, 94, 95, 96,
97, 98, 99, 100, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference160) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-1, 0, -2, 1},
0,
{2, 2},
{1, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(4, 4));
EXPECT_THAT(res.data,
ElementsAreArray({74, 82, 90, 98, 154, 162, 170, 178, 234, 242,
250, 258, 314, 322, 330, 338}));
}
TEST(ReferenceTest, RandomJaxReference161) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 2, -1, 1},
0,
{1, 2},
{1, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(11, 5));
EXPECT_THAT(
res.data,
ElementsAreArray({5, 9, 13, 17, 10, 25, 29, 33, 37, 20, 45,
49, 53, 57, 30, 65, 69, 73, 77, 40, 85, 89,
93, 97, 50, 105, 109, 113, 117, 60, 125, 129, 133,
137, 70, 145, 149, 153, 157, 80, 165, 169, 173, 177,
90, 185, 189, 193, 197, 100, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference162) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, -1, 0},
0,
{2, 2},
{1, 1},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(10, 17));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7,
7, 8, 8, 9, 9, 10, 14, 14, 16, 16, 18, 18, 20, 20,
22, 22, 24, 24, 26, 26, 28, 28, 30, 34, 34, 36, 36, 38,
38, 40, 40, 42, 42, 44, 44, 46, 46, 48, 48, 50, 54, 54,
56, 56, 58, 58, 60, 60, 62, 62, 64, 64, 66, 66, 68, 68,
70, 74, 74, 76, 76, 78, 78, 80, 80, 82, 82, 84, 84, 86,
86, 88, 88, 90, 94, 94, 96, 96, 98, 98, 100, 100, 102, 102,
104, 104, 106, 106, 108, 108, 110, 114, 114, 116, 116, 118, 118, 120,
120, 122, 122, 124, 124, 126, 126, 128, 128, 130, 134, 134, 136, 136,
138, 138, 140, 140, 142, 142, 144, 144, 146, 146, 148, 148, 150, 154,
154, 156, 156, 158, 158, 160, 160, 162, 162, 164, 164, 166, 166, 168,
168, 170}));
}
TEST(ReferenceTest, RandomJaxReference163) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 0, 0, 2},
0,
{1, 1},
{1, 1},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(10, 12));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 0, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 0, 0, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 0, 0, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 0, 0, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 0, 0, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
0, 0, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 0, 0,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 0, 0, 81, 82,
83, 84, 85, 86, 87, 88, 89, 90, 0, 0, 91, 92, 93, 94,
95, 96, 97, 98, 99, 100, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference164) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-1, 0, 2, 1},
-2147483647,
{1, 1},
{1, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 7));
EXPECT_THAT(res.data,
ElementsAreArray({-2147483647, 11, 13, 15, 17, 19, -2147483647,
-2147483647, 21, 23, 25, 27, 29, -2147483647,
-2147483647, 31, 33, 35, 37, 39, -2147483647,
-2147483647, 41, 43, 45, 47, 49, -2147483647,
-2147483647, 51, 53, 55, 57, 59, -2147483647,
-2147483647, 61, 63, 65, 67, 69, -2147483647,
-2147483647, 71, 73, 75, 77, 79, -2147483647,
-2147483647, 81, 83, 85, 87, 89, -2147483647,
-2147483647, 91, 93, 95, 97, 99, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference165) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -2, 2, -1},
1,
{1, 2},
{1, 2},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(5, 18));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 6, 1,
12, 1, 20, 1, 30, 1, 42, 1, 56, 1, 72, 1,
21, 1, 462, 1, 506, 1, 552, 1, 600, 1, 650, 1,
702, 1, 756, 1, 812, 1, 41, 1, 1722, 1, 1806, 1,
1892, 1, 1980, 1, 2070, 1, 2162, 1, 2256, 1, 2352, 1,
61, 1, 3782, 1, 3906, 1, 4032, 1, 4160, 1, 4290, 1,
4422, 1, 4556, 1, 4692, 1}));
}
TEST(ReferenceTest, RandomJaxReference166) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, -1, 0, -2},
-2147483647,
{2, 2},
{2, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 3));
EXPECT_THAT(res.data, ElementsAreArray({13, 15, 17, 23, 25, 27, 33, 35,
37, 43, 45, 47, 53, 55, 57, 63,
65, 67, 73, 75, 77, 83, 85, 87}));
}
TEST(ReferenceTest, RandomJaxReference167) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, 2, 0, 1},
0,
{2, 2},
{2, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(9, 5));
EXPECT_THAT(res.data,
ElementsAreArray({66, 74, 82, 90, 98, 106, 114, 122, 130,
138, 146, 154, 162, 170, 178, 186, 194, 202,
210, 218, 226, 234, 242, 250, 258, 266, 274,
282, 290, 298, 306, 314, 322, 330, 338, 346,
354, 362, 370, 378, 183, 187, 191, 195, 199}));
}
TEST(ReferenceTest, RandomJaxReference168) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, -1, -1, -2},
-2147483647,
{2, 2},
{2, 2},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(4, 7));
EXPECT_THAT(
res.data,
ElementsAreArray({-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference169) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{1, -2, 0, 1},
1,
{1, 2},
{2, 2},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(9, 9));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 6, 12, 20, 30, 42, 56, 72, 90,
132, 156, 182, 210, 240, 272, 306, 342, 380,
462, 506, 552, 600, 650, 702, 756, 812, 870,
992, 1056, 1122, 1190, 1260, 1332, 1406, 1482, 1560,
1722, 1806, 1892, 1980, 2070, 2162, 2256, 2352, 2450,
2652, 2756, 2862, 2970, 3080, 3192, 3306, 3422, 3540,
3782, 3906, 4032, 4160, 4290, 4422, 4556, 4692, 4830,
5112, 5256, 5402, 5550, 5700, 5852, 6006, 6162, 6320}));
}
TEST(ReferenceTest, RandomJaxReference170) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, 0, -1},
1,
{1, 1},
{2, 1},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(6, 18));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 2, 1, 3, 1, 4, 1, 5, 1, 6, 1, 7, 1,
8, 1, 9, 1, 21, 1, 22, 1, 23, 1, 24, 1, 25, 1, 26, 1,
27, 1, 28, 1, 29, 1, 41, 1, 42, 1, 43, 1, 44, 1, 45, 1,
46, 1, 47, 1, 48, 1, 49, 1, 61, 1, 62, 1, 63, 1, 64, 1,
65, 1, 66, 1, 67, 1, 68, 1, 69, 1, 81, 1, 82, 1, 83, 1,
84, 1, 85, 1, 86, 1, 87, 1, 88, 1, 89, 1}));
}
TEST(ReferenceTest, RandomJaxReference171) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, -2, 2, 0},
1,
{2, 2},
{1, 1},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(7, 10));
EXPECT_THAT(res.data,
ElementsAreArray(
{1, 11, 24, 39, 56, 75, 96, 119, 144, 171,
1, 231, 264, 299, 336, 375, 416, 459, 504, 551,
1, 651, 704, 759, 816, 875, 936, 999, 1064, 1131,
1, 1271, 1344, 1419, 1496, 1575, 1656, 1739, 1824, 1911,
1, 2091, 2184, 2279, 2376, 2475, 2576, 2679, 2784, 2891,
1, 3111, 3224, 3339, 3456, 3575, 3696, 3819, 3944, 4071,
1, 4331, 4464, 4599, 4736, 4875, 5016, 5159, 5304, 5451}));
}
TEST(ReferenceTest, RandomJaxReference172) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-1, 1, 2, 2},
0,
{1, 1},
{2, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(10, 12));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference173) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{1, 1, 1, 0},
-2147483647,
{1, 2},
{1, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(21, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
11, 12, 13, 14, 15,
16, 17, 18, 19, 20,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
21, 22, 23, 24, 25,
26, 27, 28, 29, 30,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
31, 32, 33, 34, 35,
36, 37, 38, 39, 40,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
41, 42, 43, 44, 45,
46, 47, 48, 49, 50,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
51, 52, 53, 54, 55,
56, 57, 58, 59, 60,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
61, 62, 63, 64, 65,
66, 67, 68, 69, 70,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
71, 72, 73, 74, 75,
76, 77, 78, 79, 80,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
81, 82, 83, 84, 85,
86, 87, 88, 89, 90,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
91, 92, 93, 94, 95,
96, 97, 98, 99, 100,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference174) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, -1, -2, -1},
2147483646,
{1, 2},
{1, 2},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(18, 7));
EXPECT_THAT(res.data,
ElementsAreArray(
{2, 3, 4, 5, 6,
7, 8, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 12,
13, 14, 15, 16, 17,
18, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 22, 23,
24, 25, 26, 27, 28,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 32, 33, 34,
35, 36, 37, 38, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 42, 43, 44, 45,
46, 47, 48, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
52, 53, 54, 55, 56,
57, 58, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 62,
63, 64, 65, 66, 67,
68, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 72, 73,
74, 75, 76, 77, 78,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 82, 83, 84,
85, 86, 87, 88, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646}));
}
TEST(ReferenceTest, RandomJaxReference175) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 2, 0, 0},
1,
{2, 2},
{1, 2},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(9, 9));
EXPECT_THAT(
res.data,
ElementsAreArray(
{458304, 534336, 619344, 714000, 819000, 935064, 1062936,
1203384, 1357200, 1708224, 1907136, 2122824, 2356200, 2608200,
2879784, 3171936, 3485664, 3822000, 4566744, 4977336, 5414904,
5880600, 6375600, 6901104, 7458336, 8048544, 8673000, 10029864,
10764936, 11539584, 12355200, 13213200, 14115024, 15062136, 16056024,
17098200, 19333584, 20529936, 21780864, 23088000, 24453000, 25877544,
27363336, 28912104, 30525600, 33953904, 35772336, 37662744, 39627000,
41667000, 43784664, 45981936, 48260784, 50623200, 55606824, 58232136,
60949224, 63760200, 66667200, 69672384, 72777936, 75986064, 79299000,
8372, 8556, 8742, 8930, 9120, 9312, 9506,
9702, 9900, 1, 1, 1, 1, 1,
1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference176) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, -2, -1, 2},
2147483646,
{2, 2},
{2, 1},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference177) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 1, 0, 2},
0,
{1, 2},
{2, 2},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(10, 5));
EXPECT_THAT(res.data, ElementsAreArray(
{4, 8, 12, 16, 9, 24, 28, 32, 36, 19,
44, 48, 52, 56, 29, 64, 68, 72, 76, 39,
84, 88, 92, 96, 49, 104, 108, 112, 116, 59,
124, 128, 132, 136, 69, 144, 148, 152, 156, 79,
164, 168, 172, 176, 89, 184, 188, 192, 196, 99}));
}
TEST(ReferenceTest, RandomJaxReference178) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{2, 1, 2, 1},
2147483646,
{1, 2},
{2, 2},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(7, 11));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 1, 2, 1, 2,
3, 4, 5, 6, 7,
8, 9, 21, 22, 21,
22, 23, 24, 25, 26,
27, 28, 29, 41, 42,
41, 42, 43, 44, 45,
46, 47, 48, 49, 61,
62, 61, 62, 63, 64,
65, 66, 67, 68, 69,
81, 82, 81, 82, 83,
84, 85, 86, 87, 88,
89, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference179) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, -2, 2, 0},
1,
{1, 1},
{1, 2},
{2, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(3, 11));
EXPECT_THAT(res.data,
ElementsAreArray({1, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
1, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70}));
}
TEST(ReferenceTest, RandomJaxReference180) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, -2, 1, 0},
-2147483647,
{1, 1},
{1, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 6));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, 2, 4, 6,
8, 10, -2147483647, 12, 14,
16, 18, 20, -2147483647, 22,
24, 26, 28, 30, -2147483647,
32, 34, 36, 38, 40,
-2147483647, 42, 44, 46, 48,
50, -2147483647, 52, 54, 56,
58, 60, -2147483647, 62, 64,
66, 68, 70, -2147483647, 72,
74, 76, 78, 80}));
}
TEST(ReferenceTest, RandomJaxReference181) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, -1, -1, -2},
2147483646,
{1, 1},
{1, 1},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(7, 8));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646}));
}
TEST(ReferenceTest, RandomJaxReference182) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-1, -1, 2, -1},
0,
{2, 1},
{2, 1},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(3, 20));
EXPECT_THAT(res.data, ElementsAreArray(
{0, 0, 42, 0, 44, 0, 46, 0, 48, 0, 50, 0,
52, 0, 54, 0, 56, 0, 58, 0, 0, 0, 82, 0,
84, 0, 86, 0, 88, 0, 90, 0, 92, 0, 94, 0,
96, 0, 98, 0, 0, 0, 122, 0, 124, 0, 126, 0,
128, 0, 130, 0, 132, 0, 134, 0, 136, 0, 138, 0}));
}
TEST(ReferenceTest, RandomJaxReference183) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{1, -1, -2, -1},
1,
{2, 2},
{2, 1},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(17, 8));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 1, 1, 1, 1, 24, 39, 56,
75, 96, 119, 144, 171, 1, 1, 1, 1, 1, 1,
1, 1, 264, 299, 336, 375, 416, 459, 504, 551, 1,
1, 1, 1, 1, 1, 1, 1, 704, 759, 816, 875,
936, 999, 1064, 1131, 1, 1, 1, 1, 1, 1, 1,
1, 1344, 1419, 1496, 1575, 1656, 1739, 1824, 1911, 1, 1,
1, 1, 1, 1, 1, 1, 2184, 2279, 2376, 2475, 2576,
2679, 2784, 2891, 1, 1, 1, 1, 1, 1, 1, 1,
3224, 3339, 3456, 3575, 3696, 3819, 3944, 4071, 1, 1, 1,
1, 1, 1, 1, 1, 4464, 4599, 4736, 4875, 5016, 5159,
5304, 5451, 1, 1, 1, 1, 1, 1, 1, 1, 5904,
6059, 6216, 6375, 6536, 6699, 6864, 7031, 1, 1, 1, 1,
1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference184) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, 1, 2, -1},
2147483646,
{2, 2},
{2, 2},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference185) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, -2, 0},
0,
{1, 1},
{2, 2},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(11, 9));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18,
19, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 32, 33, 34, 35, 36, 37,
38, 39, 40, 42, 43, 44, 45, 46, 47, 48, 49, 50, 52, 53, 54, 55, 56,
57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 68, 69, 70, 72, 73, 74, 75,
76, 77, 78, 79, 80, 82, 83, 84, 85, 86, 87, 88, 89, 90}));
}
TEST(ReferenceTest, RandomJaxReference186) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 0, 0, -2},
-2147483647,
{2, 2},
{1, 1},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 7));
EXPECT_THAT(res.data, ElementsAreArray(
{12, 13, 14, 15, 16, 17, 18, 22, 23, 24, 25, 26, 27,
28, 32, 33, 34, 35, 36, 37, 38, 42, 43, 44, 45, 46,
47, 48, 52, 53, 54, 55, 56, 57, 58, 62, 63, 64, 65,
66, 67, 68, 72, 73, 74, 75, 76, 77, 78, 82, 83, 84,
85, 86, 87, 88, 92, 93, 94, 95, 96, 97, 98}));
}
TEST(ReferenceTest, RandomJaxReference187) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 0, 0, -2},
2147483646,
{2, 1},
{2, 1},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(4, 4));
EXPECT_THAT(res.data, ElementsAreArray({1, 3, 5, 7, 21, 23, 25, 27, 41, 43,
45, 47, 61, 63, 65, 67}));
}
TEST(ReferenceTest, RandomJaxReference188) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{2, 2, -1, -1},
-2147483647,
{2, 1},
{2, 1},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 17));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, 2, -2147483647, 3, -2147483647,
4, -2147483647, 5, -2147483647, 6,
-2147483647, 7, -2147483647, 8, -2147483647,
9, -2147483647, -2147483647, 12, -2147483647,
13, -2147483647, 14, -2147483647, 15,
-2147483647, 16, -2147483647, 17, -2147483647,
18, -2147483647, 19, -2147483647, -2147483647,
22, -2147483647, 23, -2147483647, 24,
-2147483647, 25, -2147483647, 26, -2147483647,
27, -2147483647, 28, -2147483647, 29,
-2147483647, -2147483647, 32, -2147483647, 33,
-2147483647, 34, -2147483647, 35, -2147483647,
36, -2147483647, 37, -2147483647, 38,
-2147483647, 39, -2147483647, -2147483647, 42,
-2147483647, 43, -2147483647, 44, -2147483647,
45, -2147483647, 46, -2147483647, 47,
-2147483647, 48, -2147483647, 49, -2147483647,
-2147483647, 52, -2147483647, 53, -2147483647,
54, -2147483647, 55, -2147483647, 56,
-2147483647, 57, -2147483647, 58, -2147483647,
59, -2147483647, -2147483647, 62, -2147483647,
63, -2147483647, 64, -2147483647, 65,
-2147483647, 66, -2147483647, 67, -2147483647,
68, -2147483647, 69, -2147483647, -2147483647,
72, -2147483647, 73, -2147483647, 74,
-2147483647, 75, -2147483647, 76, -2147483647,
77, -2147483647, 78, -2147483647, 79,
-2147483647, -2147483647, 82, -2147483647, 83,
-2147483647, 84, -2147483647, 85, -2147483647,
86, -2147483647, 87, -2147483647, 88,
-2147483647, 89, -2147483647, -2147483647, 92,
-2147483647, 93, -2147483647, 94, -2147483647,
95, -2147483647, 96, -2147483647, 97,
-2147483647, 98, -2147483647, 99, -2147483647,
-2147483647, 92, -2147483647, 93, -2147483647,
94, -2147483647, 95, -2147483647, 96,
-2147483647, 97, -2147483647, 98, -2147483647,
99, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference189) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, 0, -2, 2},
0,
{2, 2},
{1, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(5, 5));
EXPECT_THAT(res.data,
ElementsAreArray({7, 11, 15, 19, 0, 74, 82, 90, 98,
0, 154, 162, 170, 178, 0, 234, 242, 250,
258, 0, 314, 322, 330, 338, 0}));
}
TEST(ReferenceTest, RandomJaxReference190) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, -1, 2, 0},
2147483646,
{2, 1},
{2, 1},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 6));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference191) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, 0, 2, -2},
0,
{1, 2},
{2, 1},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(11, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 3, 7, 11, 15, 0, 23, 27, 31,
35, 0, 43, 47, 51, 55, 0, 63, 67, 71, 75, 0, 83, 87,
91, 95, 0, 103, 107, 111, 115, 0, 123, 127, 131, 135, 0, 143,
147, 151, 155, 0, 163, 167, 171, 175, 0, 183, 187, 191, 195}));
}
TEST(ReferenceTest, RandomJaxReference192) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-1, 2, 0, -1},
2147483646,
{1, 2},
{2, 1},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 4));
EXPECT_THAT(res.data,
ElementsAreArray(
{11, 13, 15, 17, 21,
23, 25, 27, 31, 33,
35, 37, 41, 43, 45,
47, 51, 53, 55, 57,
61, 63, 65, 67, 71,
73, 75, 77, 81, 83,
85, 87, 91, 93, 95,
97, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference193) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{2, 2, 1, 0},
1,
{2, 1},
{2, 2},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(21, 6));
EXPECT_THAT(res.data, ElementsAreArray(
{1, 2, 4, 6, 8, 10, 1, 1, 1, 1, 1, 1,
1, 24, 56, 96, 144, 200, 1, 1, 1, 1, 1, 1,
1, 264, 336, 416, 504, 600, 1, 1, 1, 1, 1, 1,
1, 704, 816, 936, 1064, 1200, 1, 1, 1, 1, 1, 1,
1, 1344, 1496, 1656, 1824, 2000, 1, 1, 1, 1, 1, 1,
1, 2184, 2376, 2576, 2784, 3000, 1, 1, 1, 1, 1, 1,
1, 3224, 3456, 3696, 3944, 4200, 1, 1, 1, 1, 1, 1,
1, 4464, 4736, 5016, 5304, 5600, 1, 1, 1, 1, 1, 1,
1, 5904, 6216, 6536, 6864, 7200, 1, 1, 1, 1, 1, 1,
1, 7544, 7896, 8256, 8624, 9000, 1, 1, 1, 1, 1, 1,
1, 92, 94, 96, 98, 100}));
}
TEST(ReferenceTest, RandomJaxReference194) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-2, -1, -2, -1},
-2147483647,
{2, 2},
{2, 1},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(7, 8));
EXPECT_THAT(res.data,
ElementsAreArray({22, 23, 24, 25, 26, 27, 28, 29, 32, 33, 34, 35,
36, 37, 38, 39, 42, 43, 44, 45, 46, 47, 48, 49,
52, 53, 54, 55, 56, 57, 58, 59, 62, 63, 64, 65,
66, 67, 68, 69, 72, 73, 74, 75, 76, 77, 78, 79,
82, 83, 84, 85, 86, 87, 88, 89}));
}
TEST(ReferenceTest, RandomJaxReference195) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 1, -2, 2},
-2147483647,
{1, 2},
{2, 2},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(5, 9));
EXPECT_THAT(
res.data,
ElementsAreArray(
{23, 24, 25, 26, 27,
28, 29, 30, 30, 43,
44, 45, 46, 47, 48,
49, 50, 50, 63, 64,
65, 66, 67, 68, 69,
70, 70, 83, 84, 85,
86, 87, 88, 89, 90,
90, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference196) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, 1, 1, -1},
1,
{1, 2},
{2, 1},
{2, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(6, 5));
EXPECT_THAT(res.data,
ElementsAreArray({1, 1, 1, 1, 1, 11, 156, 210,
272, 342, 31, 1056, 1190, 1332, 1482, 51,
2756, 2970, 3192, 3422, 71, 5256, 5550, 5852,
6162, 91, 8556, 8930, 9312, 9702}));
}
TEST(ReferenceTest, RandomJaxReference197) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, -2, -2, -2},
-2147483647,
{2, 1},
{2, 2},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(7, 3));
EXPECT_THAT(res.data,
ElementsAreArray({23, 25, 27, 33, 35, 37, 43, 45, 47, 53, 55,
57, 63, 65, 67, 73, 75, 77, 83, 85, 87}));
}
TEST(ReferenceTest, RandomJaxReference198) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{1, 1, -2, 0},
2147483646,
{1, 1},
{2, 1},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(12, 9));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2,
3, 4, 5, 6, 7,
8, 9, 10, 12, 13,
14, 15, 16, 17, 18,
19, 20, 22, 23, 24,
25, 26, 27, 28, 29,
30, 32, 33, 34, 35,
36, 37, 38, 39, 40,
42, 43, 44, 45, 46,
47, 48, 49, 50, 52,
53, 54, 55, 56, 57,
58, 59, 60, 62, 63,
64, 65, 66, 67, 68,
69, 70, 72, 73, 74,
75, 76, 77, 78, 79,
80, 82, 83, 84, 85,
86, 87, 88, 89, 90,
92, 93, 94, 95, 96,
97, 98, 99, 100, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference199) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-1, 1, -1, -2},
-2147483647,
{2, 1},
{2, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(17, 8));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_reduce_window_test_util.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_reduce_window_test_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cc5b1af9-28e1-4a89-9dd4-8d8871cdac9a | cpp | tensorflow/tensorflow | hlo_proto_util | third_party/xla/xla/service/hlo_proto_util.cc | third_party/xla/xla/service/hlo_proto_util_test.cc | #include "xla/service/hlo_proto_util.h"
#include <memory>
#include <string>
#include <vector>
#include "xla/service/hlo_verifier.h"
#include "xla/util.h"
namespace xla {
HloProto MakeHloProto(const HloModule& module,
const BufferAssignment& assignment) {
BufferAssignmentProto proto_assignment = assignment.ToProto();
HloProto proto = MakeHloProto(module);
proto.mutable_buffer_assignment()->Swap(&proto_assignment);
return proto;
}
HloProto MakeHloProto(const HloModule& module) {
HloModuleProto proto_module = module.ToProto();
HloProto proto;
proto.mutable_hlo_module()->Swap(&proto_module);
return proto;
}
absl::StatusOr<std::unique_ptr<HloModule>> CreateModuleFromProto(
const HloModuleProto& proto, const HloModuleConfig& module_config,
bool is_module_post_optimizations) {
VLOG(4) << proto.ShortDebugString();
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
HloModule::CreateFromProto(proto, module_config));
TF_RETURN_IF_ERROR(
HloVerifier(false,
is_module_post_optimizations)
.Run(module.get())
.status());
return module;
}
absl::StatusOr<std::vector<const ShapeProto*>> EntryComputationParameterShapes(
const HloProto& hlo_proto) {
if (!hlo_proto.has_hlo_module()) {
return NotFound("HloProto missing HloModuleProto.");
}
if (!hlo_proto.hlo_module().has_host_program_shape()) {
return NotFound("HloProto missing program shape.");
}
std::vector<const ShapeProto*> parameter_shapes;
const auto& program_shape = hlo_proto.hlo_module().host_program_shape();
for (const ShapeProto& shape : program_shape.parameters()) {
parameter_shapes.push_back(&shape);
}
return parameter_shapes;
}
absl::StatusOr<const ShapeProto*> EntryComputationOutputShape(
const HloProto& hlo_proto) {
if (!hlo_proto.has_hlo_module()) {
return NotFound("HloProto missing HloModuleProto.");
}
if (!hlo_proto.hlo_module().has_host_program_shape()) {
return NotFound("HloProto missing program shape.");
}
if (!hlo_proto.hlo_module().host_program_shape().has_result()) {
return NotFound("HloProto missing result in its program shape");
}
return &hlo_proto.hlo_module().host_program_shape().result();
}
} | #include "xla/service/hlo_proto_util.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo.pb.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
namespace xla {
namespace {
class HloProtoUtilTest : public ::testing::Test {};
TEST_F(HloProtoUtilTest, ParamsAndOutputShapeMissingModule) {
HloProto hlo_proto;
auto status = EntryComputationParameterShapes(hlo_proto).status();
ASSERT_FALSE(status.ok());
ASSERT_THAT(status.message(), ::testing::HasSubstr("missing HloModuleProto"));
}
TEST_F(HloProtoUtilTest, MissingProgramShape) {
HloProto hlo_proto;
HloModuleProto* module = hlo_proto.mutable_hlo_module();
module->set_name("entry");
auto status = EntryComputationParameterShapes(hlo_proto).status();
ASSERT_FALSE(status.ok());
ASSERT_THAT(status.message(), ::testing::HasSubstr("missing program shape"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_proto_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_proto_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
99856370-42e8-403a-a6aa-3269bbcf2b79 | cpp | tensorflow/tensorflow | tpu | tensorflow/core/grappler/utils/tpu.cc | tensorflow/core/grappler/utils/tpu_test.cc | #include "tensorflow/core/grappler/utils/tpu.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
namespace tensorflow {
namespace grappler {
bool IsLegacyTPUBridgeGraphDef(const GraphDef& def) {
for (const auto& node : def.node()) {
if (node.op() == "TPUCompile" || node.op() == "TPUPartitionedCall") {
return true;
}
}
if (!def.has_library()) return false;
for (const auto& function_def : def.library().function()) {
for (const auto& node : function_def.node_def()) {
if (node.op() == "TPUCompile" || node.op() == "TPUPartitionedCall") {
return true;
}
}
}
return false;
}
}
} | #include "tensorflow/core/grappler/utils/tpu.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
class TpuTest : public ::testing::Test {};
TEST_F(TpuTest, NotTpuGraph) {
{
GraphDef tpu_graph;
tpu_graph.add_node()->set_op("Add");
FunctionDefLibrary* library = tpu_graph.mutable_library();
FunctionDef* function_def = library->add_function();
function_def->add_node_def()->set_op("Mul");
EXPECT_FALSE(IsLegacyTPUBridgeGraphDef(tpu_graph));
}
}
TEST_F(TpuTest, TpuMainGraph) {
{
GraphDef tpu_graph;
tpu_graph.add_node()->set_op("TPUPartitionedCall");
EXPECT_TRUE(IsLegacyTPUBridgeGraphDef(tpu_graph));
}
}
TEST_F(TpuTest, TpuLibraryGraph) {
{
GraphDef tpu_graph;
tpu_graph.add_node()->set_op("BatchFunction");
FunctionDefLibrary* library = tpu_graph.mutable_library();
FunctionDef* function_def = library->add_function();
function_def->add_node_def()->set_op("TPUPartitionedCall");
EXPECT_TRUE(IsLegacyTPUBridgeGraphDef(tpu_graph));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/tpu.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/tpu_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cdb712dc-200b-4711-ab75-2e714a826f2e | cpp | google/arolla | matchers | arolla/jagged_shape/testing/matchers.h | arolla/jagged_shape/testing/matchers_test.cc | #ifndef AROLLA_JAGGED_SHAPE_TESTING_MATCHERS_H_
#define AROLLA_JAGGED_SHAPE_TESTING_MATCHERS_H_
#include <ostream>
#include "gtest/gtest.h"
#include "arolla/jagged_shape/jagged_shape.h"
#include "arolla/util/repr.h"
namespace arolla::testing {
namespace matchers_impl {
template <typename Edge>
class JaggedShapeEquivalentToMatcher {
public:
using is_gtest_matcher = void;
explicit JaggedShapeEquivalentToMatcher(JaggedShape<Edge> expected_shape)
: expected_shape_(std::move(expected_shape)) {}
bool MatchAndExplain(const JaggedShape<Edge>& shape,
::testing::MatchResultListener* listener) const {
bool is_equivalent = shape.IsEquivalentTo(expected_shape_);
*listener << Repr(shape)
<< (is_equivalent ? " which is equivalent"
: " which is not equivalent");
return is_equivalent;
}
void DescribeTo(::std::ostream* os) const {
*os << "is equivalent to " << Repr(expected_shape_);
}
void DescribeNegationTo(::std::ostream* os) const {
*os << "is not equivalent to " << Repr(expected_shape_);
}
private:
JaggedShape<Edge> expected_shape_;
};
}
template <typename Edge>
auto IsEquivalentTo(const JaggedShape<Edge>& expected_shape) {
return matchers_impl::JaggedShapeEquivalentToMatcher<Edge>(expected_shape);
}
}
#endif | #include "arolla/jagged_shape/testing/matchers.h"
#include <cstdint>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/edge.h"
#include "arolla/jagged_shape/dense_array/jagged_shape.h"
namespace arolla {
namespace {
using ::arolla::testing::IsEquivalentTo;
using ::testing::Eq;
using ::testing::Not;
using ::testing::StringMatchResultListener;
template <typename MatcherType, typename Value>
std::string Explain(const MatcherType& m, const Value& x) {
StringMatchResultListener listener;
ExplainMatchResult(m, x, &listener);
return listener.str();
}
TEST(QTypeTest, JaggedShapeIsEquivalentTo) {
ASSERT_OK_AND_ASSIGN(auto edge1, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 2})));
ASSERT_OK_AND_ASSIGN(auto edge2, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 1, 3})));
ASSERT_OK_AND_ASSIGN(auto shape1,
JaggedDenseArrayShape::FromEdges({edge1, edge2}));
ASSERT_OK_AND_ASSIGN(auto edge3, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 1, 4})));
ASSERT_OK_AND_ASSIGN(auto shape2,
JaggedDenseArrayShape::FromEdges({edge1, edge3}));
EXPECT_THAT(shape1, IsEquivalentTo(shape1));
EXPECT_THAT(shape1, Not(IsEquivalentTo(shape2)));
auto m = IsEquivalentTo(shape1);
EXPECT_THAT(::testing::DescribeMatcher<JaggedDenseArrayShape>(m),
Eq("is equivalent to JaggedShape(2, [1, 2])"));
EXPECT_THAT(
::testing::DescribeMatcher<JaggedDenseArrayShape>(m, true),
Eq("is not equivalent to JaggedShape(2, [1, 2])"));
EXPECT_THAT(Explain(m, shape1),
Eq("JaggedShape(2, [1, 2]) which is equivalent"));
EXPECT_THAT(Explain(m, shape2),
Eq("JaggedShape(2, [1, 3]) which is not equivalent"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/jagged_shape/testing/matchers.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/jagged_shape/testing/matchers_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
ef04366f-e460-4ab7-b5bb-64cc8e582f89 | cpp | tensorflow/tensorflow | resource_util | tensorflow/compiler/tf2xla/resource_util.cc | tensorflow/compiler/tf2xla/resource_util_test.cc | #include "tensorflow/compiler/tf2xla/resource_util.h"
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/compiler/tf2xla/resource_operation_table.h"
#include "xla/status_macros.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
using tsl::StatusOr;
const char kIdentityNOp[] = "IdentityN";
const char kIfOp[] = "If";
const char kWhileOp[] = "While";
const char kArgOp[] = "_Arg";
const char kRetvalOp[] = "_Retval";
const int kMaxCallDepth = 100;
Status AnalyzeResourceUsage(
const Graph* graph, const std::optional<std::string>& function_name,
const int call_depth, const absl::flat_hash_set<int>& resource_arg_indices,
FunctionLibraryRuntime* lib_runtime,
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>*
source_to_path);
bool IsControlFlowV1Node(const Node* n) {
return (n->IsEnter() || n->IsExit() || n->IsSwitch() || n->IsMerge() ||
n->IsNextIteration());
}
absl::StatusOr<absl::InlinedVector<const Edge*, 1>> OutputEdgesByIndex(
const Node& n, int idx) {
absl::InlinedVector<const Edge*, 1> res;
if (idx >= n.num_outputs()) {
return errors::InvalidArgument("Invalid out_edge index: ", idx, ", Node ",
n.name(), " only has ", n.num_outputs(),
" outputs.");
}
for (const Edge* o : n.out_edges()) {
if (o->src_output() == idx) res.emplace_back(o);
}
return res;
}
bool IsStackOrTensorArraySource(const Node& n) {
const XlaResourceOpInfo* op_info = GetResourceOpInfoForOp(n.type_string());
if (!op_info) return false;
if (op_info->resource_kind() != XlaResourceKind::kStack &&
op_info->resource_kind() != XlaResourceKind::kTensorArray)
return false;
return n.num_outputs() > 0 && n.output_type(0) == DataType::DT_RESOURCE;
}
void PropagateFromStackOrTensorArraySourceOp(
const Node& n, const std::optional<std::string>& function_name,
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>*
user_to_source) {
ResourceUsageAnalysis::NodeInfo src_node_info(function_name, n.name(),
n.type_string());
for (const Edge* o : n.out_edges()) {
if (o->IsControlEdge()) continue;
if (o->dst()->input_type(o->dst_input()) != DataType::DT_RESOURCE) {
continue;
}
(*user_to_source)[o] = src_node_info;
}
}
Status PropagateFromArgOp(
const Node& n, const std::optional<std::string>& function_name,
const absl::flat_hash_set<int>& resource_arg_indices,
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>*
user_to_source) {
TF_RET_CHECK(n.type_string() == kArgOp);
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n.attrs(), "index", &index));
if (!resource_arg_indices.contains(index)) return absl::OkStatus();
TF_RET_CHECK(function_name.has_value())
<< "ResourceUsageAnalysis does not support analyzing _Arg nodes "
"carrying Stack/TensorArray resource in given graph unless they "
"are in function calls.";
const ResourceUsageAnalysis::NodeInfo src_node_info(function_name, n.name(),
n.type_string());
for (const Edge* o : n.out_edges()) {
if (o->IsControlEdge()) continue;
if (o->dst()->input_type(o->dst_input()) != DataType::DT_RESOURCE) {
continue;
}
(*user_to_source)[o] = src_node_info;
}
return absl::OkStatus();
}
Status UpdateResourceUsageFromFunctionBodyAnalysis(
const Node& call_node,
const std::optional<absl::string_view>& caller_function_name,
const FunctionBody& fbody,
const absl::flat_hash_map<
ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>&
called_function_source_to_path,
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>*
user_to_source,
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>*
caller_source_to_path) {
std::unordered_map<std::string, Node*> node_name_index =
fbody.graph->BuildNodeNameIndex();
for (const auto& it : called_function_source_to_path) {
ResourceUsageAnalysis::NodeInfo src_node_info = it.first;
if (src_node_info.op_ == kArgOp) {
const Node* arg_src = node_name_index[src_node_info.node_name_];
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(arg_src->attrs(), "index", &index));
const Edge* e;
TF_RETURN_IF_ERROR(call_node.input_edge(index, &e));
src_node_info = (*user_to_source)[e];
}
for (const auto& dst_node_info : it.second) {
if (dst_node_info.op_ == kRetvalOp) {
const Node* ret_user = node_name_index[dst_node_info.node_name_];
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(ret_user->attrs(), "index", &index));
absl::InlinedVector<const Edge*, 1> outs;
TF_ASSIGN_OR_RETURN(outs, OutputEdgesByIndex(call_node, index));
for (const Edge* o : outs) (*user_to_source)[o] = src_node_info;
} else {
(*caller_source_to_path)[src_node_info].emplace(dst_node_info);
}
}
}
return absl::OkStatus();
}
Status PropagateThroughCallOp(
const Node& n, const std::optional<std::string>& function_name,
const int call_depth, FunctionLibraryRuntime* lib_runtime,
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>*
user_to_source,
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>*
source_to_path) {
if (call_depth > kMaxCallDepth) {
return errors::InvalidArgument(
"Function call stack in given graph is too deep, last function ",
"name is: ", function_name.value());
}
absl::flat_hash_set<int> resource_arg_indices;
for (const Edge* e : n.in_edges()) {
if (user_to_source->contains(e)) {
resource_arg_indices.emplace(e->dst_input());
}
}
FunctionLibraryRuntime::Handle handle;
TF_RETURN_IF_ERROR(InstantiateFunctionCall(n.def(), lib_runtime, &handle));
auto release_handle_on_return = gtl::MakeCleanup(
[&] { TF_CHECK_OK(lib_runtime->ReleaseHandle(handle)); });
const FunctionBody* fbody = lib_runtime->GetFunctionBody(handle);
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>
called_function_source_to_path;
TF_RETURN_IF_ERROR(AnalyzeResourceUsage(
fbody->graph, n.type_string(), call_depth + 1, resource_arg_indices,
lib_runtime, &called_function_source_to_path));
TF_RETURN_IF_ERROR(UpdateResourceUsageFromFunctionBodyAnalysis(
n, function_name, *fbody, called_function_source_to_path, user_to_source,
source_to_path));
return absl::OkStatus();
}
Status PropagateThroughIdentityOp(
const Node& n,
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>*
user_to_source) {
TF_RET_CHECK(n.IsIdentity() || n.type_string() == kIdentityNOp);
if (n.IsIdentity()) {
for (const Edge* o : n.out_edges()) {
if (o->IsControlEdge()) continue;
const Edge* in;
TF_RETURN_IF_ERROR(n.input_edge(0, &in));
if (!user_to_source->contains(in)) continue;
user_to_source->emplace(std::make_pair(o, (*user_to_source)[in]));
}
} else {
for (const Edge* o : n.out_edges()) {
if (o->IsControlEdge()) continue;
const Edge* in;
TF_RETURN_IF_ERROR(n.input_edge(o->src_output(), &in));
if (!user_to_source->contains(in)) continue;
user_to_source->emplace(std::make_pair(o, (*user_to_source)[in]));
}
}
return absl::OkStatus();
}
Status AnalyzeResourceUsage(
const Graph* graph, const std::optional<std::string>& function_name,
const int call_depth, const absl::flat_hash_set<int>& resource_arg_indices,
FunctionLibraryRuntime* lib_runtime,
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>*
source_to_path) {
source_to_path->clear();
std::vector<Node*> reverse_post_order;
GetReversePostOrder(*graph, &reverse_post_order, NodeComparatorName{});
absl::flat_hash_map<const Edge*, ResourceUsageAnalysis::NodeInfo>
user_to_source;
for (const Node* n : reverse_post_order) {
if (IsControlFlowV1Node(n)) {
return errors::InvalidArgument(
"AnalyzeResourceUsage does not support control flow v1 node: ",
n->DebugString());
}
if (n->type_string() == kIfOp || n->type_string() == kWhileOp) {
return errors::InvalidArgument(
"AnalyzeResourceUsage does not yet support control flow v2 "
"node: ",
n->DebugString());
}
if (IsStackOrTensorArraySource(*n)) {
PropagateFromStackOrTensorArraySourceOp(*n, function_name,
&user_to_source);
continue;
}
if (n->IsArg()) {
TF_RETURN_IF_ERROR(PropagateFromArgOp(
*n, function_name, resource_arg_indices, &user_to_source));
continue;
}
if (IsFunctionCall(*lib_runtime->GetFunctionLibraryDefinition(), *n)) {
TF_RETURN_IF_ERROR(PropagateThroughCallOp(*n, function_name, call_depth,
lib_runtime, &user_to_source,
source_to_path));
continue;
}
if (n->IsIdentity() || n->type_string() == kIdentityNOp) {
TF_RETURN_IF_ERROR(PropagateThroughIdentityOp(*n, &user_to_source));
}
}
for (const auto& it : user_to_source) {
(*source_to_path)[it.second].emplace(function_name, it.first->dst()->name(),
it.first->dst()->type_string());
}
return absl::OkStatus();
}
}
Status ResourceUsageAnalysis::Analyze(
const Graph* graph, FunctionLibraryRuntime* lib_runtime,
absl::flat_hash_map<NodeInfo, absl::flat_hash_set<NodeInfo>>*
source_to_path) {
return AnalyzeResourceUsage(
graph, {}, 0,
absl::flat_hash_set<int>(), lib_runtime,
source_to_path);
}
} | #include "tensorflow/compiler/tf2xla/resource_util.h"
#include <memory>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/memory/memory.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
ResourceUsageAnalysis::NodeInfo node_info_from_string(absl::string_view s) {
std::vector<std::string> tokens = absl::StrSplit(s, ':');
EXPECT_EQ(tokens.size(), 3);
ResourceUsageAnalysis::NodeInfo node_info;
if (tokens[0].empty()) {
node_info.function_name_ = std::nullopt;
} else {
node_info.function_name_ = std::move(tokens[0]);
}
node_info.node_name_ = std::move(tokens[1]);
node_info.op_ = std::move(tokens[2]);
return node_info;
}
void AnalyzeAndVerify(
const GraphDef& graphdef, FunctionLibraryDefinition* flib_def,
const absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>>&
expected) {
auto graph = std::make_unique<Graph>(flib_def);
TF_EXPECT_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), graphdef, graph.get()));
auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
nullptr, Env::Default(), nullptr, TF_GRAPH_DEF_VERSION,
flib_def, OptimizerOptions());
FunctionLibraryRuntime* lib_runtime =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>
source_to_path;
TF_EXPECT_OK(ResourceUsageAnalysis::Analyze(graph.get(), lib_runtime,
&source_to_path));
absl::flat_hash_map<ResourceUsageAnalysis::NodeInfo,
absl::flat_hash_set<ResourceUsageAnalysis::NodeInfo>>
expected_source_to_path;
for (auto it : expected) {
auto src_node_info = node_info_from_string(it.first);
for (const std::string& user : it.second) {
expected_source_to_path[src_node_info].emplace(
node_info_from_string(user));
}
}
EXPECT_EQ(source_to_path, expected_source_to_path);
}
}
TEST(ResourceOpAnalyzerTest, SingleResourceSingleUserNoPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder stack_close_builder("stack_close", "StackCloseV2", op_reg);
stack_close_builder.Input(stack_op);
opts.FinalizeBuilder(&stack_close_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] =
absl::flat_hash_set<std::string>({":stack_close:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, SingleResourceSingleUserWithPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder resource_identity_builder("resource_identity", "Identity",
op_reg);
resource_identity_builder.Input(stack_op);
Node* resource_identity = opts.FinalizeBuilder(&resource_identity_builder);
NodeBuilder stack_close_builder("stack_close", "StackCloseV2", op_reg);
stack_close_builder.Input(resource_identity);
opts.FinalizeBuilder(&stack_close_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] = absl::flat_hash_set<std::string>(
{":resource_identity:Identity", ":stack_close:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, SingleResourceMultipleUserNoPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder stack_close0_builder("stack_close0", "StackCloseV2", op_reg);
stack_close0_builder.Input(stack_op);
opts.FinalizeBuilder(&stack_close0_builder);
NodeBuilder stack_close1_builder("stack_close1", "StackCloseV2", op_reg);
stack_close1_builder.Input(stack_op);
opts.FinalizeBuilder(&stack_close1_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close0:StackCloseV2", ":stack_close1:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, SingleResourceMultipleUserWithPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder resource_identity_builder("resource_identity", "Identity",
op_reg);
resource_identity_builder.Input(stack_op);
Node* resource_identity = opts.FinalizeBuilder(&resource_identity_builder);
NodeBuilder stack_close0_builder("stack_close0", "StackCloseV2", op_reg);
stack_close0_builder.Input(resource_identity);
opts.FinalizeBuilder(&stack_close0_builder);
NodeBuilder stack_close1_builder("stack_close1", "StackCloseV2", op_reg);
stack_close1_builder.Input(resource_identity);
opts.FinalizeBuilder(&stack_close1_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] = absl::flat_hash_set<std::string>(
{":resource_identity:Identity", ":stack_close0:StackCloseV2",
":stack_close1:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, MultipleResourceMultipleUserNoPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op0_builder("stack_op0", "StackV2", op_reg);
stack_op0_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op0 = opts.FinalizeBuilder(&stack_op0_builder);
NodeBuilder stack_close0_builder("stack_close0", "StackCloseV2", op_reg);
stack_close0_builder.Input(stack_op0);
opts.FinalizeBuilder(&stack_close0_builder);
NodeBuilder stack_close1_builder("stack_close1", "StackCloseV2", op_reg);
stack_close1_builder.Input(stack_op0);
opts.FinalizeBuilder(&stack_close1_builder);
NodeBuilder stack_op1_builder("stack_op1", "StackV2", op_reg);
stack_op1_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op1 = opts.FinalizeBuilder(&stack_op1_builder);
NodeBuilder stack_close2_builder("stack_close2", "StackCloseV2", op_reg);
stack_close2_builder.Input(stack_op1);
opts.FinalizeBuilder(&stack_close2_builder);
NodeBuilder stack_close3_builder("stack_close3", "StackCloseV2", op_reg);
stack_close3_builder.Input(stack_op1);
opts.FinalizeBuilder(&stack_close3_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op0:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close0:StackCloseV2", ":stack_close1:StackCloseV2"});
expected[":stack_op1:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close2:StackCloseV2", ":stack_close3:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, MultipleResourceMultipleUserWithPassThrough) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op0_builder("stack_op0", "StackV2", op_reg);
stack_op0_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op0 = opts.FinalizeBuilder(&stack_op0_builder);
NodeBuilder stack_op1_builder("stack_op1", "StackV2", op_reg);
stack_op1_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op1 = opts.FinalizeBuilder(&stack_op1_builder);
NodeBuilder identity_n_builder("identity_n", "IdentityN", op_reg);
identity_n_builder.Input({stack_op0, stack_size_placeholder, stack_op1});
NodeBuilder stack_close0_builder("stack_close0", "StackCloseV2", op_reg);
stack_close0_builder.Input(stack_op0);
opts.FinalizeBuilder(&stack_close0_builder);
NodeBuilder stack_close1_builder("stack_close1", "StackCloseV2", op_reg);
stack_close1_builder.Input(stack_op0);
opts.FinalizeBuilder(&stack_close1_builder);
NodeBuilder stack_close2_builder("stack_close2", "StackCloseV2", op_reg);
stack_close2_builder.Input(stack_op1);
opts.FinalizeBuilder(&stack_close2_builder);
NodeBuilder stack_close3_builder("stack_close3", "StackCloseV2", op_reg);
stack_close3_builder.Input(stack_op1);
opts.FinalizeBuilder(&stack_close3_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op0:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close0:StackCloseV2", ":stack_close1:StackCloseV2"});
expected[":stack_op1:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close2:StackCloseV2", ":stack_close3:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, ResourcePassThroughFunction) {
auto library = std::make_unique<FunctionDefLibrary>();
*library->add_function() = FunctionDefHelper::Define(
"pass_through_function",
{"in: resource"},
{"out: resource"},
{},
{{{"out"}, "Identity", {"in"}, {{"T", DataType::DT_RESOURCE}}}});
FunctionLibraryDefinition flib_def(OpRegistry::Global(), *library);
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder pass_through_fn_builder("pass_through_fn",
"pass_through_function", op_reg);
pass_through_fn_builder.Input(stack_op);
Node* pass_through_fn = opts.FinalizeBuilder(&pass_through_fn_builder);
NodeBuilder stack_close_builder("stack_close", "StackCloseV2", op_reg);
stack_close_builder.Input(pass_through_fn);
opts.FinalizeBuilder(&stack_close_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] = absl::flat_hash_set<std::string>(
{":stack_close:StackCloseV2", ":pass_through_fn:pass_through_function",
"pass_through_function:out:Identity"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, ResourceUserInFunction) {
auto library = std::make_unique<FunctionDefLibrary>();
*library->add_function() = FunctionDefHelper::Define(
"resource_user_function",
{"in: resource"},
{},
{},
{{{"stack_close"},
"StackCloseV2",
{"in"},
{{"T", DataType::DT_RESOURCE}}}});
FunctionLibraryDefinition flib_def(OpRegistry::Global(), *library);
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder stack_op_builder("stack_op", "StackV2", op_reg);
stack_op_builder.Input(stack_size_placeholder).Attr("elem_type", DT_FLOAT);
Node* stack_op = opts.FinalizeBuilder(&stack_op_builder);
NodeBuilder resource_user_fn_builder("resource_user_function",
"resource_user_function", op_reg);
resource_user_fn_builder.Input(stack_op);
opts.FinalizeBuilder(&resource_user_fn_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected[":stack_op:StackV2"] = absl::flat_hash_set<std::string>(
{":resource_user_function:resource_user_function",
"resource_user_function:stack_close:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
TEST(ResourceOpAnalyzerTest, ResourceSourceInFunction) {
auto library = std::make_unique<FunctionDefLibrary>();
*library->add_function() = FunctionDefHelper::Define(
"resource_source_function",
{"in: int32"},
{"out: resource"},
{},
{{{"out"}, "StackV2", {"in"}, {{"elem_type", DataType::DT_FLOAT}}}});
FunctionLibraryDefinition flib_def(OpRegistry::Global(), *library);
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
auto opts = builder.opts();
auto op_reg = opts.op_registry();
{
NodeBuilder stack_size_placeholder_builder("stack_size", "Placeholder",
op_reg);
stack_size_placeholder_builder.Attr("dtype", DT_INT32);
Node* stack_size_placeholder =
opts.FinalizeBuilder(&stack_size_placeholder_builder);
NodeBuilder resource_source_fn_builder("resource_source_function",
"resource_source_function", op_reg);
resource_source_fn_builder.Input(stack_size_placeholder);
Node* resource_source_function =
opts.FinalizeBuilder(&resource_source_fn_builder);
NodeBuilder stack_close_builder("stack_close", "StackCloseV2", op_reg);
stack_close_builder.Input(resource_source_function);
opts.FinalizeBuilder(&stack_close_builder);
}
GraphDef graphdef;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef));
absl::flat_hash_map<std::string, absl::flat_hash_set<std::string>> expected;
expected["resource_source_function:out:StackV2"] =
absl::flat_hash_set<std::string>({":stack_close:StackCloseV2"});
AnalyzeAndVerify(graphdef, &flib_def, expected);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/resource_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/resource_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0e7b4015-16a6-4cca-abaa-bdd39f477c8e | cpp | google/libaddressinput | region_data_constants | cpp/src/region_data_constants.cc | cpp/test/region_data_constants_test.cc | #include "region_data_constants.h"
#include <libaddressinput/address_field.h>
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <map>
#include <string>
#include <vector>
#include "address_field_util.h"
#include "format_element.h"
#include "lookup_key.h"
#include "util/size.h"
namespace i18n {
namespace addressinput {
namespace {
struct RegionData {
const char* const region_code;
const char* const data;
};
const RegionData kRegionData[] = {
{"AC", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("zipex":"ASCN 1ZZ",)"
R"("languages":"en")"
"}"},
{"AD", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"AD100,AD501,AD700",)"
R"("posturl":"http:
R"("languages":"ca")"
"}"},
{"AE", "{"
R"("fmt":"%N%n%O%n%A%n%S",)"
R"("lfmt":"%N%n%O%n%A%n%S",)"
R"("require":"AS",)"
R"("state_name_type":"emirate",)"
R"("languages":"ar")"
"}"},
{"AF", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("zipex":"1001,2601,3801",)"
R"("languages":"fa~ps~uz-Arab~tk")"
"}"},
{"AG", "{"
R"("require":"A",)"
R"("languages":"en")"
"}"},
{"AI", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("zipex":"2640",)"
R"("languages":"en")"
"}"},
{"AL", "{"
R"("fmt":"%N%n%O%n%A%n%Z%n%C",)"
R"("zipex":"1001,1017,3501",)"
R"("languages":"sq")"
"}"},
{"AM", "{"
R"("fmt":"%N%n%O%n%A%n%Z%n%C%n%S",)"
R"("lfmt":"%N%n%O%n%A%n%Z%n%C%n%S",)"
R"("zipex":"375010,0002,0010",)"
R"("languages":"hy")"
"}"},
{"AO", "{"
R"("languages":"pt")"
"}"},
{"AQ", "{"
"}"},
{"AR", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C%n%S",)"
R"("zipex":"C1070AAM,C1000WAM,B1000TBU,X5187XAB",)"
R"("posturl":"http:
R"("languages":"es")"
"}"},
{"AS", "{"
R"("fmt":"%N%n%O%n%A%n%C %S %Z",)"
R"("require":"ACSZ",)"
R"("zip_name_type":"zip",)"
R"("state_name_type":"state",)"
R"("zipex":"96799",)"
R"("posturl":"http:
R"("languages":"sm~en")"
"}"},
{"AT", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"1010,3741",)"
R"("posturl":"http:
R"("languages":"de~hr~sl~hu")"
"}"},
{"AU", "{"
R"("fmt":"%O%n%N%n%A%n%C %S %Z",)"
R"("require":"ACSZ",)"
R"("state_name_type":"state",)"
R"("locality_name_type":"suburb",)"
R"("zipex":"2060,3171,6430,4000,4006,3001",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"AW", "{"
R"("languages":"nl~pap")"
"}"},
{"AX", "{"
R"("fmt":"%O%n%N%n%A%nAX-%Z %C%nÅLAND",)"
R"("require":"ACZ",)"
R"("zipex":"22150,22550,22240,22710,22270,22730,22430",)"
R"("posturl":"https:
R"("languages":"sv")"
"}"},
{"AZ", "{"
R"("fmt":"%N%n%O%n%A%nAZ %Z %C",)"
R"("zipex":"1000",)"
R"("languages":"az~az-Cyrl")"
"}"},
{"BA", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"71000",)"
R"("languages":"bs~bs-Cyrl~hr~sr~sr-Latn")"
"}"},
{"BB", "{"
R"("fmt":"%N%n%O%n%A%n%C, %S %Z",)"
R"("state_name_type":"parish",)"
R"("zipex":"BB23026,BB22025",)"
R"("languages":"en")"
"}"},
{"BD", "{"
R"("fmt":"%N%n%O%n%A%n%C - %Z",)"
R"("zipex":"1340,1000",)"
R"("posturl":"https:
R"("languages":"bn")"
"}"},
{"BE", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"4000,1000",)"
R"("posturl":"https:
R"("languages":"nl~fr~de")"
"}"},
{"BF", "{"
R"("fmt":"%N%n%O%n%A%n%C %X",)"
R"("languages":"fr")"
"}"},
{"BG", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"1000,1700",)"
R"("posturl":"http:
R"("languages":"bg")"
"}"},
{"BH", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"317",)"
R"("languages":"ar")"
"}"},
{"BI", "{"
R"("languages":"rn~fr~en")"
"}"},
{"BJ", "{"
R"("languages":"fr")"
"}"},
{"BL", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C %X",)"
R"("require":"ACZ",)"
R"("zipex":"97100",)"
R"("posturl":"https:
R"("languages":"fr")"
"}"},
{"BM", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"FL 07,HM GX,HM 12",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"BN", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"BT2328,KA1131,BA1511",)"
R"("posturl":"http:
R"("languages":"ms~ms-Arab")"
"}"},
{"BO", "{"
R"("languages":"es~qu~ay")"
"}"},
{"BQ", "{"
R"("languages":"nl")"
"}"},
{"BR", "{"
R"("fmt":"%O%n%N%n%A%n%D%n%C-%S%n%Z",)"
R"("require":"ASCZ",)"
R"("state_name_type":"state",)"
R"("sublocality_name_type":"neighborhood",)"
R"("zipex":"40301-110,70002-900",)"
R"("posturl":"http:
R"("languages":"pt")"
"}"},
{"BS", "{"
R"("fmt":"%N%n%O%n%A%n%C, %S",)"
R"("state_name_type":"island",)"
R"("languages":"en")"
"}"},
{"BT", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"11001,31101,35003",)"
R"("posturl":"https:
R"("languages":"dz")"
"}"},
{"BV", "{"
"}"},
{"BW", "{"
R"("languages":"en~tn")"
"}"},
{"BY", "{"
R"("fmt":"%O%n%N%n%A%n%Z, %C%n%S",)"
R"("zipex":"223016,225860,220050",)"
R"("posturl":"http:
R"("languages":"be~ru")"
"}"},
{"BZ", "{"
R"("languages":"en")"
"}"},
{"CA", "{"
R"("fmt":"%N%n%O%n%A%n%C %S %Z",)"
R"("require":"ACSZ",)"
R"("zipex":"H3Z 2Y7,V8X 3X4,T0L 1K0,T0H 1A0,K1A 0B1",)"
R"("posturl":"https:
R"("languages":"en~fr")"
"}"},
{"CC", "{"
R"("fmt":"%O%n%N%n%A%n%C %S %Z",)"
R"("zipex":"6799",)"
R"("languages":"en")"
"}"},
{"CD", "{"
R"("languages":"sw~lua~fr~ln~kg")"
"}"},
{"CF", "{"
R"("languages":"fr~sg")"
"}"},
{"CG", "{"
R"("languages":"fr")"
"}"},
{"CH", "{"
R"("fmt":"%O%n%N%n%A%nCH-%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"2544,1211,1556,3030",)"
R"("posturl":"http:
R"("languages":"de~gsw~fr~it~rm")"
"}"},
{"CI", "{"
R"("fmt":"%N%n%O%n%X %A %C %X",)"
R"("languages":"fr")"
"}"},
{"CK", "{"
R"("languages":"en")"
"}"},
{"CL", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C%n%S",)"
R"("zipex":"8340457,8720019,1230000,8329100",)"
R"("languages":"es")"
"}"},
{"CM", "{"
R"("languages":"fr~en")"
"}"},
{"CN", "{"
R"("fmt":"%Z%n%S%C%D%n%A%n%O%n%N",)"
R"("lfmt":"%N%n%O%n%A%n%D%n%C%n%S, %Z",)"
R"("require":"ACSZ",)"
R"("sublocality_name_type":"district",)"
R"("zipex":"266033,317204,100096,100808",)"
R"("posturl":"http:
R"("languages":"zh")"
"}"},
{"CO", "{"
R"("fmt":"%N%n%O%n%A%n%D%n%C, %S, %Z",)"
R"("require":"AS",)"
R"("state_name_type":"department",)"
R"("zipex":"111221,130001,760011",)"
R"("posturl":"http:
R"("languages":"es")"
"}"},
{"CR", "{"
R"("fmt":"%N%n%O%n%A%n%S, %C%n%Z",)"
R"("require":"ACS",)"
R"("zipex":"1000,2010,1001",)"
R"("posturl":"https:
R"("languages":"es")"
"}"},
{"CU", "{"
R"("fmt":"%N%n%O%n%A%n%C %S%n%Z",)"
R"("zipex":"10700",)"
R"("languages":"es")"
"}"},
{"CV", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C%n%S",)"
R"("state_name_type":"island",)"
R"("zipex":"7600",)"
R"("languages":"pt")"
"}"},
{"CW", "{"
R"("languages":"pap~nl")"
"}"},
{"CX", "{"
R"("fmt":"%O%n%N%n%A%n%C %S %Z",)"
R"("zipex":"6798",)"
R"("languages":"en")"
"}"},
{"CY", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"2008,3304,1900",)"
R"("languages":"el~tr")"
"}"},
{"CZ", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"100 00,251 66,530 87,110 00,225 99",)"
R"("posturl":"http:
R"("languages":"cs")"
"}"},
{"DE", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"26133,53225",)"
R"("posturl":"http:
R"("languages":"de~frr")"
"}"},
{"DJ", "{"
R"("languages":"ar~fr")"
"}"},
{"DK", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"8660,1566",)"
R"("posturl":"http:
R"("languages":"da~de~kl")"
"}"},
{"DM", "{"
R"("languages":"en")"
"}"},
{"DO", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"11903,10101",)"
R"("posturl":"http:
R"("languages":"es")"
"}"},
{"DZ", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"40304,16027",)"
R"("languages":"ar~fr")"
"}"},
{"EC", "{"
R"("fmt":"%N%n%O%n%A%n%Z%n%C",)"
R"("zipex":"090105,092301",)"
R"("posturl":"http:
R"("languages":"es~qu")"
"}"},
{"EE", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C %S",)"
R"("require":"ACZ",)"
R"("zipex":"69501,11212",)"
R"("posturl":"https:
R"("languages":"et")"
"}"},
{"EG", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%S%n%Z",)"
R"("lfmt":"%N%n%O%n%A%n%C%n%S%n%Z",)"
R"("zipex":"4460232,5734356",)"
R"("languages":"ar")"
"}"},
{"EH", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"70000,72000",)"
R"("languages":"ar")"
"}"},
{"ER", "{"
R"("languages":"ti~en~ar")"
"}"},
{"ES", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C %S",)"
R"("require":"ACSZ",)"
R"("zipex":"28039,28300,28070",)"
R"("posturl":"http:
R"("languages":"es~ca~gl~eu")"
"}"},
{"ET", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"1000",)"
R"("languages":"am")"
"}"},
{"FI", "{"
R"("fmt":"%O%n%N%n%A%nFI-%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"00550,00011",)"
R"("posturl":"https:
R"("languages":"fi~sv~sms")"
"}"},
{"FJ", "{"
R"("languages":"en~hif~fj")"
"}"},
{"FK", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"FIQQ 1ZZ",)"
R"("languages":"en")"
"}"},
{"FM", "{"
R"("fmt":"%N%n%O%n%A%n%C %S %Z",)"
R"("require":"ACSZ",)"
R"("zip_name_type":"zip",)"
R"("state_name_type":"state",)"
R"("zipex":"96941,96944",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"FO", "{"
R"("fmt":"%N%n%O%n%A%nFO%Z %C",)"
R"("zipex":"100",)"
R"("posturl":"https:
R"("languages":"fo")"
"}"},
{"FR", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"33380,34092,33506",)"
R"("posturl":"https:
R"("languages":"fr")"
"}"},
{"GA", "{"
R"("languages":"fr")"
"}"},
{"GB", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("require":"ACZ",)"
R"("locality_name_type":"post_town",)"
R"("zipex":"EC1Y 8SY,GIR 0AA,M2 5BQ,M34 4AB,CR0 2YR,DN16 9AA,W1A 4ZZ,EC1A 1HQ,OX14 4PG,BS18 8HF,NR25 7HG,RH6 0NP,BH23 6AA,B6 5BA,SO23 9AP,PO1 3AX,BFPO 61",)"
R"("posturl":"http:
R"("languages":"en~cy~ga~gd")"
"}"},
{"GD", "{"
R"("languages":"en")"
"}"},
{"GE", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"0101",)"
R"("posturl":"http:
R"("languages":"ka~ab~os")"
"}"},
{"GF", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C %X",)"
R"("require":"ACZ",)"
R"("zipex":"97300",)"
R"("posturl":"https:
R"("languages":"fr")"
"}"},
{"GG", "{"
R"("fmt":"%N%n%O%n%A%n%C%nGUERNSEY%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"GY1 1AA,GY2 2BT",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"GH", "{"
R"("languages":"ak~en~ee~gaa")"
"}"},
{"GI", "{"
R"("fmt":"%N%n%O%n%A%nGIBRALTAR%n%Z",)"
R"("require":"A",)"
R"("zipex":"GX11 1AA",)"
R"("languages":"en")"
"}"},
{"GL", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"3900,3950,3911",)"
R"("languages":"kl")"
"}"},
{"GM", "{"
R"("languages":"en")"
"}"},
{"GN", "{"
R"("fmt":"%N%n%O%n%Z %A %C",)"
R"("zipex":"001,200,100",)"
R"("languages":"fr")"
"}"},
{"GP", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C %X",)"
R"("require":"ACZ",)"
R"("zipex":"97100",)"
R"("posturl":"https:
R"("languages":"fr")"
"}"},
{"GQ", "{"
R"("languages":"es~fr~pt")"
"}"},
{"GR", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"151 24,151 10,101 88",)"
R"("posturl":"https:
R"("languages":"el")"
"}"},
{"GS", "{"
R"("fmt":"%N%n%O%n%A%n%n%C%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"SIQQ 1ZZ")"
"}"},
{"GT", "{"
R"("fmt":"%N%n%O%n%A%n%Z- %C",)"
R"("zipex":"09001,01501",)"
R"("languages":"es~quc")"
"}"},
{"GU", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("require":"ACZ",)"
R"("zip_name_type":"zip",)"
R"("zipex":"96910,96931",)"
R"("posturl":"http:
R"("languages":"en~ch")"
"}"},
{"GW", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"1000,1011",)"
R"("languages":"pt")"
"}"},
{"GY", "{"
R"("languages":"en")"
"}"},
{"HK", "{"
R"("fmt":"%S%n%C%n%A%n%O%n%N",)"
R"("lfmt":"%N%n%O%n%A%n%C%n%S",)"
R"("require":"AS",)"
R"("state_name_type":"area",)"
R"("locality_name_type":"district",)"
R"("languages":"zh-Hant~en")"
"}"},
{"HM", "{"
R"("fmt":"%O%n%N%n%A%n%C %S %Z",)"
R"("zipex":"7050")"
"}"},
{"HN", "{"
R"("fmt":"%N%n%O%n%A%n%C, %S%n%Z",)"
R"("require":"ACS",)"
R"("state_name_type":"department",)"
R"("zipex":"31301",)"
R"("languages":"es")"
"}"},
{"HR", "{"
R"("fmt":"%N%n%O%n%A%nHR-%Z %C",)"
R"("zipex":"10000,21001,10002",)"
R"("posturl":"http:
R"("languages":"hr~it~vec")"
"}"},
{"HT", "{"
R"("fmt":"%N%n%O%n%A%nHT%Z %C",)"
R"("zipex":"6120,5310,6110,8510",)"
R"("languages":"ht~fr")"
"}"},
{"HU", "{"
R"("fmt":"%N%n%O%n%C%n%A%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"1037,2380,1540",)"
R"("posturl":"http:
R"("languages":"hu")"
"}"},
{"ID", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%S %Z",)"
R"("require":"AS",)"
R"("zipex":"40115",)"
R"("languages":"id")"
"}"},
{"IE", "{"
R"("fmt":"%N%n%O%n%A%n%D%n%C%n%S%n%Z",)"
R"("zip_name_type":"eircode",)"
R"("state_name_type":"county",)"
R"("sublocality_name_type":"townland",)"
R"("zipex":"A65 F4E2",)"
R"("posturl":"https:
R"("languages":"en")"
"}"},
{"IL", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"9614303",)"
R"("posturl":"http:
R"("languages":"iw~ar")"
"}"},
{"IM", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"IM2 1AA,IM99 1PS",)"
R"("posturl":"https:
R"("languages":"en~gv")"
"}"},
{"IN", "{"
R"("fmt":"%N%n%O%n%A%n%T%n%F%n%L%n%C %Z%n%S",)"
R"("require":"ACSZ",)"
R"("zip_name_type":"pin",)"
R"("state_name_type":"state",)"
R"("zipex":"110034,110001",)"
R"("posturl":"https:
R"("languages":"en~hi")"
"}"},
{"IO", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"BBND 1ZZ",)"
R"("languages":"en")"
"}"},
{"IQ", "{"
R"("fmt":"%O%n%N%n%A%n%C, %S%n%Z",)"
R"("require":"ACS",)"
R"("zipex":"31001",)"
R"("languages":"ar")"
"}"},
{"IR", "{"
R"("fmt":"%O%n%N%n%S%n%C, %D%n%A%n%Z",)"
R"("sublocality_name_type":"neighborhood",)"
R"("zipex":"11936-12345",)"
R"("languages":"fa")"
"}"},
{"IS", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"320,121,220,110",)"
R"("posturl":"https:
R"("languages":"is")"
"}"},
{"IT", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C %S",)"
R"("require":"ACSZ",)"
R"("zipex":"00144,47037,39049",)"
R"("posturl":"http:
R"("languages":"it")"
"}"},
{"JE", "{"
R"("fmt":"%N%n%O%n%A%n%C%nJERSEY%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"JE1 1AA,JE2 2BT",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"JM", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%S %X",)"
R"("require":"ACS",)"
R"("state_name_type":"parish",)"
R"("languages":"en")"
"}"},
{"JO", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"11937,11190",)"
R"("languages":"ar")"
"}"},
{"JP", "{"
R"("fmt":"〒%Z%n%S%n%A%n%O%n%N",)"
R"("lfmt":"%N%n%O%n%A, %S%n%Z",)"
R"("require":"ASZ",)"
R"("state_name_type":"prefecture",)"
R"("zipex":"154-0023,350-1106,951-8073,112-0001,208-0032,231-0012",)"
R"("posturl":"http:
R"("languages":"ja")"
"}"},
{"KE", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("zipex":"20100,00100",)"
R"("languages":"sw~en")"
"}"},
{"KG", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"720001",)"
R"("languages":"ky~ru")"
"}"},
{"KH", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"120101,120108",)"
R"("posturl":"https:
R"("languages":"km")"
"}"},
{"KI", "{"
R"("fmt":"%N%n%O%n%A%n%S%n%C",)"
R"("state_name_type":"island",)"
R"("languages":"en~gil")"
"}"},
{"KM", "{"
R"("languages":"ar~fr~zdj~wni")"
"}"},
{"KN", "{"
R"("fmt":"%N%n%O%n%A%n%C, %S",)"
R"("require":"ACS",)"
R"("state_name_type":"island",)"
R"("languages":"en")"
"}"},
{"KP", "{"
R"("fmt":"%Z%n%S%n%C%n%A%n%O%n%N",)"
R"("lfmt":"%N%n%O%n%A%n%C%n%S, %Z",)"
R"("languages":"ko")"
"}"},
{"KR", "{"
R"("fmt":"%S %C%D%n%A%n%O%n%N%n%Z",)"
R"("lfmt":"%N%n%O%n%A%n%D%n%C%n%S%n%Z",)"
R"("require":"ACSZ",)"
R"("state_name_type":"do_si",)"
R"("sublocality_name_type":"district",)"
R"("zipex":"03051",)"
R"("posturl":"http:
R"("languages":"ko")"
"}"},
{"KW", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"54541,54551,54404,13009",)"
R"("languages":"ar")"
"}"},
{"KY", "{"
R"("fmt":"%N%n%O%n%A%n%S %Z",)"
R"("require":"AS",)"
R"("state_name_type":"island",)"
R"("zipex":"KY1-1100,KY1-1702,KY2-2101",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"KZ", "{"
R"("fmt":"%Z%n%S%n%C%n%A%n%O%n%N",)"
R"("zipex":"040900,050012",)"
R"("languages":"kk~ru")"
"}"},
{"LA", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"01160,01000",)"
R"("languages":"lo")"
"}"},
{"LB", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"2038 3054,1107 2810,1000",)"
R"("languages":"ar")"
"}"},
{"LC", "{"
R"("languages":"en")"
"}"},
{"LI", "{"
R"("fmt":"%O%n%N%n%A%nFL-%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"9496,9491,9490,9485",)"
R"("posturl":"http:
R"("languages":"de~gsw")"
"}"},
{"LK", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("zipex":"20000,00100",)"
R"("posturl":"http:
R"("languages":"si~ta")"
"}"},
{"LR", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"1000",)"
R"("languages":"en")"
"}"},
{"LS", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"100",)"
R"("languages":"st~en")"
"}"},
{"LT", "{"
R"("fmt":"%O%n%N%n%A%nLT-%Z %C %S",)"
R"("require":"ACZ",)"
R"("zipex":"04340,03500",)"
R"("posturl":"http:
R"("languages":"lt")"
"}"},
{"LU", "{"
R"("fmt":"%O%n%N%n%A%nL-%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"4750,2998",)"
R"("posturl":"https:
R"("languages":"fr~lb~de")"
"}"},
{"LV", "{"
R"("fmt":"%N%n%O%n%A%n%S%n%C, %Z",)"
R"("require":"ACZ",)"
R"("zipex":"LV-1073,LV-1000",)"
R"("posturl":"https:
R"("languages":"lv")"
"}"},
{"LY", "{"
R"("languages":"ar")"
"}"},
{"MA", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"53000,10000,20050,16052",)"
R"("languages":"ar~fr~tzm")"
"}"},
{"MC", "{"
R"("fmt":"%N%n%O%n%A%nMC-%Z %C %X",)"
R"("zipex":"98000,98020,98011,98001",)"
R"("languages":"fr")"
"}"},
{"MD", "{"
R"("fmt":"%N%n%O%n%A%nMD-%Z %C",)"
R"("zipex":"2012,2019",)"
R"("languages":"ro")"
"}"},
{"ME", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"81257,81258,81217,84314,85366",)"
R"("languages":"sr-Latn")"
"}"},
{"MF", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C %X",)"
R"("require":"ACZ",)"
R"("zipex":"97100",)"
R"("posturl":"https:
R"("languages":"fr")"
"}"},
{"MG", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"501,101",)"
R"("languages":"mg~fr~en")"
"}"},
{"MH", "{"
R"("fmt":"%N%n%O%n%A%n%C %S %Z",)"
R"("require":"ACSZ",)"
R"("zip_name_type":"zip",)"
R"("state_name_type":"state",)"
R"("zipex":"96960,96970",)"
R"("posturl":"http:
R"("languages":"en~mh")"
"}"},
{"MK", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"1314,1321,1443,1062",)"
R"("languages":"mk~sq")"
"}"},
{"ML", "{"
R"("languages":"fr")"
"}"},
{"MM", "{"
R"("fmt":"%N%n%O%n%A%n%C, %Z",)"
R"("zipex":"11181",)"
R"("languages":"my")"
"}"},
{"MN", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%S %Z",)"
R"("zipex":"65030,65270",)"
R"("posturl":"http:
R"("languages":"mn")"
"}"},
{"MO", "{"
R"("fmt":"%A%n%O%n%N",)"
R"("lfmt":"%N%n%O%n%A",)"
R"("require":"A",)"
R"("languages":"zh-Hant~pt")"
"}"},
{"MP", "{"
R"("fmt":"%N%n%O%n%A%n%C %S %Z",)"
R"("require":"ACSZ",)"
R"("zip_name_type":"zip",)"
R"("state_name_type":"state",)"
R"("zipex":"96950,96951,96952",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"MQ", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C %X",)"
R"("require":"ACZ",)"
R"("zipex":"97220",)"
R"("posturl":"https:
R"("languages":"fr")"
"}"},
{"MR", "{"
R"("languages":"ar")"
"}"},
{"MS", "{"
R"("languages":"en")"
"}"},
{"MT", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"NXR 01,ZTN 05,GPO 01,BZN 1130,SPB 6031,VCT 1753",)"
R"("posturl":"https:
R"("languages":"mt~en")"
"}"},
{"MU", "{"
R"("fmt":"%N%n%O%n%A%n%Z%n%C",)"
R"("zipex":"42602",)"
R"("languages":"en~fr")"
"}"},
{"MV", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"20026",)"
R"("posturl":"http:
R"("languages":"dv")"
"}"},
{"MW", "{"
R"("fmt":"%N%n%O%n%A%n%C %X",)"
R"("languages":"en~ny")"
"}"},
{"MX", "{"
R"("fmt":"%N%n%O%n%A%n%D%n%Z %C, %S",)"
R"("require":"ACSZ",)"
R"("state_name_type":"state",)"
R"("sublocality_name_type":"neighborhood",)"
R"("zipex":"02860,77520,06082",)"
R"("posturl":"https:
R"("languages":"es")"
"}"},
{"MY", "{"
R"("fmt":"%N%n%O%n%A%n%D%n%Z %C%n%S",)"
R"("require":"ACZ",)"
R"("state_name_type":"state",)"
R"("sublocality_name_type":"village_township",)"
R"("zipex":"43000,50754,88990,50670",)"
R"("posturl":"http:
R"("languages":"ms")"
"}"},
{"MZ", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C%S",)"
R"("zipex":"1102,1119,3212",)"
R"("languages":"pt")"
"}"},
{"NA", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("zipex":"10001,10017",)"
R"("languages":"en")"
"}"},
{"NC", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C %X",)"
R"("require":"ACZ",)"
R"("zipex":"98814,98800,98810",)"
R"("posturl":"https:
R"("languages":"fr")"
"}"},
{"NE", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"8001",)"
R"("languages":"fr")"
"}"},
{"NF", "{"
R"("fmt":"%O%n%N%n%A%n%C %S %Z",)"
R"("zipex":"2899",)"
R"("languages":"en")"
"}"},
{"NG", "{"
R"("fmt":"%N%n%O%n%A%n%D%n%C %Z%n%S",)"
R"("state_name_type":"state",)"
R"("zipex":"930283,300001,931104",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"NI", "{"
R"("fmt":"%N%n%O%n%A%n%Z%n%C, %S",)"
R"("state_name_type":"department",)"
R"("zipex":"52000",)"
R"("posturl":"http:
R"("languages":"es")"
"}"},
{"NL", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"1234 AB,2490 AA",)"
R"("posturl":"http:
R"("languages":"nl~fy")"
"}"},
{"NO", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("locality_name_type":"post_town",)"
R"("zipex":"0025,0107,6631",)"
R"("posturl":"http:
R"("languages":"no~nn~se")"
"}"},
{"NP", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"44601",)"
R"("posturl":"http:
R"("languages":"ne")"
"}"},
{"NR", "{"
R"("fmt":"%N%n%O%n%A%n%S",)"
R"("require":"AS",)"
R"("state_name_type":"district",)"
R"("languages":"en")"
"}"},
{"NU", "{"
R"("languages":"en~niu")"
"}"},
{"NZ", "{"
R"("fmt":"%N%n%O%n%A%n%D%n%C %Z",)"
R"("require":"ACZ",)"
R"("zipex":"6001,6015,6332,8252,1030",)"
R"("posturl":"https:
R"("languages":"en~mi")"
"}"},
{"OM", "{"
R"("fmt":"%N%n%O%n%A%n%Z%n%C",)"
R"("zipex":"133,112,111",)"
R"("languages":"ar")"
"}"},
{"PA", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%S",)"
R"("languages":"es")"
"}"},
{"PE", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z%n%S",)"
R"("locality_name_type":"district",)"
R"("zipex":"LIMA 23,LIMA 42,CALLAO 2,02001",)"
R"("posturl":"http:
R"("languages":"es")"
"}"},
{"PF", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C %S",)"
R"("require":"ACSZ",)"
R"("state_name_type":"island",)"
R"("zipex":"98709",)"
R"("languages":"fr~ty")"
"}"},
{"PG", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z %S",)"
R"("require":"ACS",)"
R"("zipex":"111",)"
R"("languages":"tpi~en~ho")"
"}"},
{"PH", "{"
R"("fmt":"%N%n%O%n%A%n%D, %C%n%Z %S",)"
R"("zipex":"1008,1050,1135,1207,2000,1000",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"PK", "{"
R"("fmt":"%N%n%O%n%A%n%D%n%C-%Z",)"
R"("zipex":"44000",)"
R"("posturl":"http:
R"("languages":"ur~en")"
"}"},
{"PL", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"00-950,05-470,48-300,32-015,00-940",)"
R"("posturl":"http:
R"("languages":"pl~de~csb~lt")"
"}"},
{"PM", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C %X",)"
R"("require":"ACZ",)"
R"("zipex":"97500",)"
R"("languages":"fr")"
"}"},
{"PN", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"PCRN 1ZZ",)"
R"("languages":"en")"
"}"},
{"PR", "{"
R"("fmt":"%N%n%O%n%A%n%C PR %Z",)"
R"("require":"ACZ",)"
R"("zip_name_type":"zip",)"
R"("zipex":"00930",)"
R"("posturl":"http:
R"("languages":"es~en")"
"}"},
{"PS", "{"
R"("languages":"ar")"
"}"},
{"PT", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"2725-079,1250-096,1201-950,2860-571,1208-148",)"
R"("posturl":"http:
R"("languages":"pt")"
"}"},
{"PW", "{"
R"("fmt":"%N%n%O%n%A%n%C %S %Z",)"
R"("require":"ACSZ",)"
R"("zip_name_type":"zip",)"
R"("state_name_type":"state",)"
R"("zipex":"96940",)"
R"("posturl":"http:
R"("languages":"pau~en")"
"}"},
{"PY", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"1536,1538,1209",)"
R"("languages":"gn~es")"
"}"},
{"QA", "{"
R"("languages":"ar")"
"}"},
{"RE", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C %X",)"
R"("require":"ACZ",)"
R"("zipex":"97400",)"
R"("posturl":"https:
R"("languages":"fr")"
"}"},
{"RO", "{"
R"("fmt":"%N%n%O%n%A%n%Z %S %C",)"
R"("require":"ACZ",)"
R"("zipex":"060274,061357,200716",)"
R"("posturl":"http:
R"("languages":"ro")"
"}"},
{"RS", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"106314",)"
R"("posturl":"http:
R"("languages":"sr~sr-Latn~hu~ro~hr~sk~uk")"
"}"},
{"RU", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%S%n%Z",)"
R"("lfmt":"%N%n%O%n%A%n%C%n%S%n%Z",)"
R"("require":"ACSZ",)"
R"("state_name_type":"oblast",)"
R"("zipex":"247112,103375,188300",)"
R"("posturl":"https:
R"("languages":"ru")"
"}"},
{"RW", "{"
R"("languages":"rw~en~fr")"
"}"},
{"SA", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"11564,11187,11142",)"
R"("languages":"ar")"
"}"},
{"SB", "{"
R"("languages":"en")"
"}"},
{"SC", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%S",)"
R"("state_name_type":"island",)"
R"("languages":"fr~en")"
"}"},
{"SD", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("locality_name_type":"district",)"
R"("zipex":"11042,11113",)"
R"("languages":"ar~en")"
"}"},
{"SE", "{"
R"("fmt":"%O%n%N%n%A%nSE-%Z %C",)"
R"("require":"ACZ",)"
R"("locality_name_type":"post_town",)"
R"("zipex":"11455,12345,10500",)"
R"("posturl":"https:
R"("languages":"sv~fi")"
"}"},
{"SG", "{"
R"("fmt":"%N%n%O%n%A%nSINGAPORE %Z",)"
R"("require":"AZ",)"
R"("zipex":"546080,308125,408600",)"
R"("posturl":"https:
R"("languages":"en~zh~ms~ta")"
"}"},
{"SH", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"STHL 1ZZ",)"
R"("languages":"en")"
"}"},
{"SI", "{"
R"("fmt":"%N%n%O%n%A%nSI-%Z %C",)"
R"("zipex":"4000,1001,2500",)"
R"("languages":"sl~vec")"
"}"},
{"SJ", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("locality_name_type":"post_town",)"
R"("zipex":"9170",)"
R"("posturl":"http:
R"("languages":"no")"
"}"},
{"SK", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"010 01,023 14,972 48,921 01,975 99",)"
R"("posturl":"http:
R"("languages":"sk")"
"}"},
{"SL", "{"
R"("languages":"en")"
"}"},
{"SM", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"AZ",)"
R"("zipex":"47890,47891,47895,47899",)"
R"("posturl":"http:
R"("languages":"it")"
"}"},
{"SN", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"12500,46024,16556,10000",)"
R"("languages":"wo~fr~ff~srr~dyo~sav~mfv~bjt~snf~knf~bsc~mey~tnr")"
"}"},
{"SO", "{"
R"("fmt":"%N%n%O%n%A%n%C, %S %Z",)"
R"("require":"ACS",)"
R"("zipex":"JH 09010,AD 11010",)"
R"("languages":"so")"
"}"},
{"SR", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%S",)"
R"("languages":"nl")"
"}"},
{"SS", "{"
R"("languages":"en")"
"}"},
{"ST", "{"
R"("languages":"pt")"
"}"},
{"SV", "{"
R"("fmt":"%N%n%O%n%A%n%Z-%C%n%S",)"
R"("require":"ACS",)"
R"("zipex":"1101",)"
R"("languages":"es")"
"}"},
{"SX", "{"
R"("languages":"en~nl")"
"}"},
{"SY", "{"
R"("locality_name_type":"district",)"
R"("languages":"ar~fr")"
"}"},
{"SZ", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("zipex":"H100",)"
R"("posturl":"https:
R"("languages":"en~ss")"
"}"},
{"TA", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("zipex":"TDCU 1ZZ",)"
R"("languages":"en")"
"}"},
{"TC", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"TKCA 1ZZ",)"
R"("languages":"en")"
"}"},
{"TD", "{"
R"("languages":"fr~ar")"
"}"},
{"TF", "{"
R"("languages":"fr")"
"}"},
{"TG", "{"
R"("languages":"fr")"
"}"},
{"TH", "{"
R"("fmt":"%N%n%O%n%A%n%D %C%n%S %Z",)"
R"("lfmt":"%N%n%O%n%A%n%D, %C%n%S %Z",)"
R"("zipex":"10150,10210",)"
R"("languages":"th")"
"}"},
{"TJ", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"735450,734025",)"
R"("languages":"tg")"
"}"},
{"TK", "{"
R"("languages":"en~tkl")"
"}"},
{"TL", "{"
R"("languages":"pt~tet")"
"}"},
{"TM", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"744000",)"
R"("languages":"tk")"
"}"},
{"TN", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"1002,8129,3100,1030",)"
R"("posturl":"http:
R"("languages":"ar~fr")"
"}"},
{"TO", "{"
R"("languages":"to~en")"
"}"},
{"TR", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C/%S",)"
R"("require":"ACZ",)"
R"("locality_name_type":"district",)"
R"("zipex":"01960,06101",)"
R"("posturl":"http:
R"("languages":"tr")"
"}"},
{"TT", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("zipex":"500234",)"
R"("languages":"en")"
"}"},
{"TV", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%S",)"
R"("state_name_type":"island",)"
R"("languages":"tyv")"
"}"},
{"TW", "{"
R"("fmt":"%Z%n%S%C%n%A%n%O%n%N",)"
R"("lfmt":"%N%n%O%n%A%n%C, %S %Z",)"
R"("require":"ACSZ",)"
R"("state_name_type":"county",)"
R"("locality_name_type":"district",)"
R"("zipex":"104,106,10603,40867",)"
R"("posturl":"http:
R"("languages":"zh-Hant")"
"}"},
{"TZ", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"6090,34413",)"
R"("languages":"sw~en")"
"}"},
{"UA", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%S%n%Z",)"
R"("lfmt":"%N%n%O%n%A%n%C%n%S%n%Z",)"
R"("require":"ACZ",)"
R"("state_name_type":"oblast",)"
R"("zipex":"15432,01055,01001",)"
R"("posturl":"http:
R"("languages":"uk")"
"}"},
{"UG", "{"
R"("languages":"sw~en")"
"}"},
{"UM", "{"
R"("fmt":"%N%n%O%n%A%n%C %S %Z",)"
R"("require":"ACS",)"
R"("zip_name_type":"zip",)"
R"("state_name_type":"state",)"
R"("zipex":"96898",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"US", "{"
R"("fmt":"%N%n%O%n%A%n%C, %S %Z",)"
R"("require":"ACSZ",)"
R"("zip_name_type":"zip",)"
R"("state_name_type":"state",)"
R"("zipex":"95014,22162-1010",)"
R"("posturl":"https:
R"("languages":"en")"
"}"},
{"UY", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C %S",)"
R"("zipex":"11600",)"
R"("posturl":"http:
R"("languages":"es")"
"}"},
{"UZ", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C%n%S",)"
R"("zipex":"702100,700000",)"
R"("posturl":"https:
R"("languages":"uz~ru")"
"}"},
{"VA", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"00120",)"
R"("languages":"it")"
"}"},
{"VC", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"VC0100,VC0110,VC0400",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"VE", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z, %S",)"
R"("require":"ACS",)"
R"("state_name_type":"state",)"
R"("zipex":"1010,3001,8011,1020",)"
R"("posturl":"http:
R"("languages":"es")"
"}"},
{"VG", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("require":"A",)"
R"("zipex":"VG1110,VG1150,VG1160",)"
R"("languages":"en")"
"}"},
{"VI", "{"
R"("fmt":"%N%n%O%n%A%n%C %S %Z",)"
R"("require":"ACSZ",)"
R"("zip_name_type":"zip",)"
R"("state_name_type":"state",)"
R"("zipex":"00802-1222,00850-9802",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"VN", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%S %Z",)"
R"("lfmt":"%N%n%O%n%A%n%C%n%S %Z",)"
R"("zipex":"70010,55999",)"
R"("posturl":"http:
R"("languages":"vi")"
"}"},
{"VU", "{"
R"("languages":"bi~en~fr")"
"}"},
{"WF", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C %X",)"
R"("require":"ACZ",)"
R"("zipex":"98600",)"
R"("languages":"fr")"
"}"},
{"WS", "{"
R"("languages":"sm~en")"
"}"},
{"XK", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"10000",)"
R"("languages":"sq~sr~sr-Latn")"
"}"},
{"YE", "{"
R"("languages":"ar")"
"}"},
{"YT", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C %X",)"
R"("require":"ACZ",)"
R"("zipex":"97600",)"
R"("languages":"fr")"
"}"},
{"ZA", "{"
R"("fmt":"%N%n%O%n%A%n%D%n%C%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"0083,1451,0001",)"
R"("posturl":"https:
R"("languages":"en~zu~xh~af~nso~tn~st~ts~ss~ve~nr")"
"}"},
{"ZM", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"50100,50101",)"
R"("languages":"en")"
"}"},
{"ZW", "{"
R"("languages":"sn~en~nd")"
"}"},
};
}
const std::string& RegionDataConstants::GetDefaultRegionData() {
static const std::string kDefaultRegionData(
"{"
R"("fmt":"%N%n%O%n%A%n%C",)"
R"("require":"AC",)"
R"("zip_name_type":"postal",)"
R"("state_name_type":"province",)"
R"("locality_name_type":"city",)"
R"("sublocality_name_type":"suburb")"
"}");
return kDefaultRegionData;
}
namespace {
bool FindPositionOfRegionCode(const std::string& region_code,
size_t* position_out) {
assert(position_out != nullptr);
size_t left = 0;
size_t right = size(kRegionData);
while (left < right) {
size_t mid = left + (right - left) / 2;
int comparison = region_code.compare(kRegionData[mid].region_code);
if (comparison == 0) {
*position_out = mid;
return true;
} else if (comparison > 0) {
left = mid + 1;
} else {
right = mid;
}
}
return false;
}
std::vector<std::string> InitRegionCodes() {
std::vector<std::string> region_codes(size(kRegionData));
std::transform(std::begin(kRegionData),
std::end(kRegionData),
region_codes.begin(),
[](const RegionData& region_data) {
return region_data.region_code;
});
return region_codes;
}
const std::map<std::string, size_t> InitMaxLookupKeyDepth() {
std::map<std::string, size_t> max_depth;
for (const auto& region_data : kRegionData) {
std::vector<FormatElement> fields;
ParseFormatRule(region_data.data, &fields);
size_t depth = 1;
for (; depth < size(LookupKey::kHierarchy); ++depth) {
AddressField field = LookupKey::kHierarchy[depth];
if (std::find(fields.begin(), fields.end(), FormatElement(field)) ==
fields.end()) {
break;
}
}
max_depth.emplace(region_data.region_code, depth - 1);
}
return max_depth;
}
}
bool RegionDataConstants::IsSupported(const std::string& region_code) {
size_t unused;
return FindPositionOfRegionCode(region_code, &unused);
}
const std::vector<std::string>& RegionDataConstants::GetRegionCodes() {
static const std::vector<std::string> kRegionCodes(InitRegionCodes());
return kRegionCodes;
}
std::string RegionDataConstants::GetRegionData(
const std::string& region_code) {
static const std::string kEmptyString;
size_t position;
bool found = FindPositionOfRegionCode(region_code, &position);
return found ? kRegionData[position].data : kEmptyString;
}
size_t RegionDataConstants::GetMaxLookupKeyDepth(
const std::string& region_code) {
static const std::map<std::string, size_t> kMaxDepth(InitMaxLookupKeyDepth());
auto it = kMaxDepth.find(region_code);
return it != kMaxDepth.end() ? it->second : 0;
}
}
} | #include "region_data_constants.h"
#include <algorithm>
#include <string>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::RegionDataConstants;
class RegionCodeTest : public testing::TestWithParam<std::string> {
public:
RegionCodeTest(const RegionCodeTest&) = delete;
RegionCodeTest& operator=(const RegionCodeTest&) = delete;
protected:
RegionCodeTest() = default;
};
TEST_P(RegionCodeTest, RegionCodeHasTwoCharacters) {
EXPECT_EQ(2, GetParam().length());
}
INSTANTIATE_TEST_SUITE_P(
AllRegionCodes, RegionCodeTest,
testing::ValuesIn(RegionDataConstants::GetRegionCodes()));
testing::AssertionResult HasCurlyBraces(const std::string& data) {
if (data.empty()) {
return testing::AssertionFailure() << "data is empty";
}
if (data[0] != '{') {
return testing::AssertionFailure() << data << " does not start with '{'";
}
if (data[data.length() - 1] != '}') {
return testing::AssertionFailure() << data << " does not end with '}'";
}
return testing::AssertionSuccess();
}
TEST(DefaultRegionDataTest, DefaultRegionHasCurlyBraces) {
EXPECT_TRUE(HasCurlyBraces(RegionDataConstants::GetDefaultRegionData()));
}
class RegionDataTest : public testing::TestWithParam<std::string> {
public:
RegionDataTest(const RegionDataTest&) = delete;
RegionDataTest& operator=(const RegionDataTest&) = delete;
protected:
RegionDataTest() = default;
std::string GetData() const {
return RegionDataConstants::GetRegionData(GetParam());
}
};
TEST_P(RegionDataTest, RegionDataHasCurlyBraces) {
EXPECT_TRUE(HasCurlyBraces(GetData()));
}
INSTANTIATE_TEST_SUITE_P(
AllRegionData, RegionDataTest,
testing::ValuesIn(RegionDataConstants::GetRegionCodes()));
TEST(RegionDataConstantsTest, GetMaxLookupKeyDepth) {
EXPECT_EQ(0, RegionDataConstants::GetMaxLookupKeyDepth("NZ"));
EXPECT_EQ(1, RegionDataConstants::GetMaxLookupKeyDepth("KY"));
EXPECT_EQ(2, RegionDataConstants::GetMaxLookupKeyDepth("US"));
EXPECT_EQ(3, RegionDataConstants::GetMaxLookupKeyDepth("CN"));
}
TEST(RegionDataConstantsTest, RegionCodesSorted) {
EXPECT_TRUE(std::is_sorted(RegionDataConstants::GetRegionCodes().begin(),
RegionDataConstants::GetRegionCodes().end()));
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/region_data_constants.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/region_data_constants_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
0ab1eb70-b950-474e-92b9-0937ea336b35 | cpp | tensorflow/tensorflow | timestamp_utils | third_party/xla/xla/tsl/profiler/utils/timestamp_utils.cc | third_party/xla/xla/tsl/profiler/utils/timestamp_utils_test.cc | #include "xla/tsl/profiler/utils/timestamp_utils.h"
#include <cstdint>
#include "absl/log/log.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
void SetSessionTimestamps(uint64_t start_walltime_ns, uint64_t stop_walltime_ns,
tensorflow::profiler::XSpace& space) {
if (start_walltime_ns != 0 && stop_walltime_ns != 0) {
tsl::profiler::XPlaneBuilder plane(
tsl::profiler::FindOrAddMutablePlaneWithName(
&space, tsl::profiler::kTaskEnvPlaneName));
plane.AddStatValue(*plane.GetOrCreateStatMetadata(
GetTaskEnvStatTypeStr(kEnvProfileStartTime)),
start_walltime_ns);
plane.AddStatValue(*plane.GetOrCreateStatMetadata(
GetTaskEnvStatTypeStr(kEnvProfileStopTime)),
stop_walltime_ns);
} else {
LOG(WARNING) << "Not Setting Session Timestamps, (start_walltime_ns, "
"stop_walltime_ns) : "
<< start_walltime_ns << ", " << stop_walltime_ns;
}
}
}
} | #include "xla/tsl/profiler/utils/timestamp_utils.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
using ::testing::Eq;
TEST(TimestampUtilsTest, StartAndStopTimestampAreAdded) {
XSpace xspace;
SetSessionTimestamps(1000, 2000, xspace);
const XPlane* xplane = FindPlaneWithName(xspace, kTaskEnvPlaneName);
XPlaneVisitor visitor(xplane, {}, {FindTaskEnvStatType});
auto start_time = visitor.GetStat(TaskEnvStatType::kEnvProfileStartTime);
auto stop_time = visitor.GetStat(TaskEnvStatType::kEnvProfileStopTime);
EXPECT_THAT(start_time->IntOrUintValue(), Eq(1000));
EXPECT_THAT(stop_time->IntOrUintValue(), Eq(2000));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/timestamp_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/timestamp_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0f6a6d64-faaf-48bf-abf2-9998a1cf5baa | cpp | tensorflow/tensorflow | function_utils | tensorflow/core/grappler/optimizers/data/function_utils.cc | tensorflow/core/grappler/optimizers/data/function_utils_test.cc | #include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/strings/scanner.h"
namespace tensorflow {
namespace grappler {
namespace function_utils {
FunctionDefTensorDesc::FunctionDefTensorDesc(const string& node_name,
const string& output, int position)
: node_name(node_name), node_output(output), position(position) {
full_str = strings::StrCat(node_name, ":", node_output, ":", position);
}
FunctionDefTensorDesc::FunctionDefTensorDesc(const string& input) {
full_str = input;
StringPiece capture;
StringPiece remaining;
if (strings::Scanner(input)
.One(strings::Scanner::LETTER_DIGIT_DOT_UNDERSCORE)
.Any(strings::Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE)
.GetResult(&remaining, &capture)) {
node_name = string(capture.data(), capture.size());
}
if (strings::Scanner(remaining)
.OneLiteral(":")
.RestartCapture()
.One(strings::Scanner::LETTER)
.Any(strings::Scanner::LETTER_DIGIT_UNDERSCORE)
.GetResult(&remaining, &capture)) {
node_output = string(capture.data(), capture.size());
}
if (strings::Scanner(remaining)
.OneLiteral(":")
.RestartCapture()
.Many(strings::Scanner::DIGIT)
.GetResult(nullptr, &capture)) {
CHECK(strings::safe_strto32(capture, &position));
}
}
void ReplaceReferences(const string& from, const string& to,
FunctionDef* func) {
for (NodeDef& n : *func->mutable_node_def()) {
std::replace(n.mutable_input()->begin(), n.mutable_input()->end(), from,
to);
}
for (auto& p : *func->mutable_ret()) {
if (p.second == from) {
p.second = to;
}
}
}
void AddFunctionOutputWithUniqueName(StringPiece prefix,
StringPiece output_tensor_name,
FunctionDef* fdef, DataType dtype) {
string name = string(prefix);
int id = fdef->signature().output_arg_size();
while (ContainsFunctionOutputWithName(name, *fdef)) {
name = strings::StrCat(prefix, "/_", id);
++id;
}
auto* output = fdef->mutable_signature()->mutable_output_arg()->Add();
output->set_name(name);
output->set_type(dtype);
(*fdef->mutable_ret())[name] = string(output_tensor_name);
}
OpDef_ArgDef* AddFunctionInput(const string& name, FunctionDef* fdef,
DataType dtype) {
auto* input_arg = fdef->mutable_signature()->mutable_input_arg()->Add();
input_arg->set_type(dtype);
input_arg->set_name(name);
return input_arg;
}
NodeDef* AddNode(StringPiece name, StringPiece op,
const std::vector<string>& inputs,
const std::vector<std::pair<string, AttrValue>>& attributes,
FunctionDef* fd) {
NodeDef* node = fd->add_node_def();
if (!name.empty()) {
node->set_name(string(name));
} else {
SetUniqueFunctionNodeName(op, fd, node);
}
node->set_op(string(op));
for (const string& input : inputs) {
node->add_input(input);
}
for (const auto& attr : attributes) {
(*node->mutable_attr())[attr.first] = attr.second;
}
return node;
}
bool ContainsFunctionNodeWithName(StringPiece name,
const FunctionDef& function) {
return FindFunctionNodeWithName(name, function) != -1;
}
bool ContainsFunctionNodeWithOp(StringPiece op, const FunctionDef& function) {
return FindFunctionNodeWithOp(op, function) != -1;
}
bool ContainsFunctionOutputWithName(StringPiece name,
const FunctionDef& function) {
return FindFunctionOutputWithName(name, function) != -1;
}
int FindFunctionInputWithName(StringPiece name, const FunctionDef& function) {
return graph_utils::GetFirstElementIndexWithPredicate(
[&name](const OpDef_ArgDef& arg) { return arg.name() == name; },
function.signature().input_arg());
}
int FindFunctionOutputWithName(StringPiece name, const FunctionDef& function) {
return graph_utils::GetFirstElementIndexWithPredicate(
[&name](const OpDef_ArgDef& arg) { return arg.name() == name; },
function.signature().output_arg());
}
int FindFunctionNodeWithName(StringPiece name, const FunctionDef& function) {
return graph_utils::GetFirstElementIndexWithPredicate(
[&name](const NodeDef& node) { return node.name() == name; },
function.node_def());
}
int FindFunctionNodeWithOp(StringPiece op, const FunctionDef& function) {
return graph_utils::GetFirstElementIndexWithPredicate(
[&op](const NodeDef& node) { return node.op() == op; },
function.node_def());
}
void SetUniqueFunctionNodeName(StringPiece prefix, FunctionDef* function,
NodeDef* node) {
string name = string(prefix);
int id = function->node_def_size();
while (ContainsFunctionNodeWithName(name, *function)) {
name = strings::StrCat(prefix, "/_", id);
++id;
}
node->set_name(std::move(name));
}
bool IsFunctionStateful(const FunctionLibraryDefinition& library,
const FunctionDef& function_def, bool skip_assert) {
if (!function_def.signature().is_stateful()) return false;
for (const NodeDef& node_def : function_def.node_def()) {
if (IsNodeStateful(library, node_def, skip_assert)) return true;
}
return false;
}
bool IsNodeStateful(const FunctionLibraryDefinition& library,
const NodeDef& node, bool skip_assert) {
const OpDef* op_def;
Status s = OpRegistry::Global()->LookUpOpDef(node.op(), &op_def);
if (!s.ok()) return true;
if (!op_def->is_stateful()) return false;
if (skip_assert && op_def->name() == "Assert") {
return false;
}
if (op_def->name() == "If") {
const FunctionDef* then_func =
library.Find(node.attr().at("then_branch").func().name());
const FunctionDef* else_func =
library.Find(node.attr().at("else_branch").func().name());
if ((then_func != nullptr &&
!IsFunctionStateful(library, *then_func, skip_assert)) &&
(else_func != nullptr &&
!IsFunctionStateful(library, *else_func, skip_assert))) {
return false;
}
}
if (op_def->name() == "While") {
const FunctionDef* cond_func =
library.Find(node.attr().at("cond").func().name());
const FunctionDef* body_func =
library.Find(node.attr().at("body").func().name());
if ((cond_func != nullptr &&
!IsFunctionStateful(library, *cond_func, skip_assert)) &&
(body_func != nullptr &&
!IsFunctionStateful(library, *body_func, skip_assert))) {
return false;
}
}
return true;
}
}
}
} | #include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace grappler {
namespace function_utils {
namespace {
TEST(FunctionDefTensorDesc, Parsing) {
FunctionDefTensorDesc f("Cast:y:0");
EXPECT_EQ(f.full_str, "Cast:y:0");
EXPECT_EQ(f.node_name, "Cast");
EXPECT_EQ(f.node_output, "y");
EXPECT_EQ(f.position, 0);
FunctionDefTensorDesc f2("Arg0");
EXPECT_EQ(f2.full_str, "Arg0");
EXPECT_EQ(f2.node_name, "Arg0");
EXPECT_EQ(f2.node_output, "");
EXPECT_EQ(f2.position, -1);
}
TEST(ReplaceReferencesTest, ReplaceReferencesTest) {
FunctionDef outer = FunctionDefHelper::Create(
"outer", {"arg0: int32"}, {"out: int32", "out2: int64"}, {}, {},
{{"out", "MapDefun:output:0"}, {"out2", "Cast:y:0"}});
NodeDef* derive_node =
AddNode("X", "Some_Op", {"MapDefun:output:0"}, {}, &outer);
ReplaceReferences("MapDefun:output:0", "arg0", &outer);
EXPECT_EQ(outer.ret().at("out"), "arg0");
EXPECT_EQ(derive_node->input(0), "arg0");
}
TEST(FunctionUtilsTest, AddFunctionOutputWithUniqueName) {
FunctionDef function = test::function::XTimesTwo();
AddFunctionOutputWithUniqueName("y", "two", &function, DT_INT64);
EXPECT_TRUE(ContainsFunctionOutputWithName("y/_1", function));
EXPECT_EQ(function.ret().at("y/_1"), "two");
}
TEST(FunctionUtilsTest, AddFunctionInput) {
FunctionDef fdef;
auto arg0 = AddFunctionInput("arg0", &fdef, DT_INT32);
auto arg1 = AddFunctionInput("arg1", &fdef, DT_BOOL);
EXPECT_EQ(fdef.signature().input_arg().data()[0], arg0);
EXPECT_EQ(arg0->name(), "arg0");
EXPECT_EQ(arg0->type(), DT_INT32);
EXPECT_EQ(fdef.signature().input_arg().data()[1], arg1);
EXPECT_EQ(arg1->name(), "arg1");
EXPECT_EQ(arg1->type(), DT_BOOL);
}
TEST(FunctionUtilsTest, ContainsFunctionNodeWithName) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_FALSE(ContainsFunctionNodeWithName(
"weird_name_that_should_not_be_there", function));
EXPECT_TRUE(ContainsFunctionNodeWithName("two", function));
}
TEST(FunctionUtilsTest, ContainsFunctionNodeWithOp) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_FALSE(ContainsFunctionNodeWithOp("weird_op_that_should_not_be_there",
function));
EXPECT_TRUE(ContainsFunctionNodeWithOp("Mul", function));
}
TEST(FunctionUtilsTest, ContainsFunctionOutputWithName) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_TRUE(ContainsFunctionOutputWithName("y", function));
EXPECT_FALSE(ContainsFunctionOutputWithName("Add:z:0", function));
}
TEST(FunctionUtilsTest, FindFunctionNodeWithName) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_EQ(
FindFunctionNodeWithName("weird_name_that_should_not_be_there", function),
-1);
EXPECT_NE(FindFunctionNodeWithName("two", function), -1);
}
TEST(FunctionUtilsTest, FindFunctionNodeWithOp) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_EQ(
FindFunctionNodeWithOp("weird_op_that_should_not_be_there", function),
-1);
EXPECT_NE(FindFunctionNodeWithOp("Mul", function), -1);
}
TEST(FunctionUtilsTest, FindFunctionInputWithName) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_EQ(FindFunctionInputWithName("x", function), 0);
EXPECT_EQ(FindFunctionInputWithName("not_a_name", function), -1);
}
TEST(FunctionUtilsTest, FindFunctionOutputWithName) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_EQ(FindFunctionOutputWithName("y", function), 0);
EXPECT_EQ(FindFunctionOutputWithName("Add:z:0", function), -1);
}
TEST(FunctionUtilsTest, SetUniqueFunctionNodeName) {
FunctionDef function = test::function::XTimesTwo();
NodeDef node;
SetUniqueFunctionNodeName("abc", &function, &node);
for (const NodeDef& function_node : function.node_def()) {
EXPECT_NE(node.name(), function_node.name());
}
auto* new_node = function.add_node_def();
*new_node = node;
NodeDef other;
SetUniqueFunctionNodeName("abc", &function, &other);
EXPECT_NE(other.name(), new_node->name());
}
TEST(FunctionUtilsTest, AddNodeToFunctionDef) {
FunctionDef func;
const char* op_name = "xxx";
AddNode(op_name, op_name, {}, {}, &func);
const NodeDef& node1 = func.node_def(FindFunctionNodeWithName("xxx", func));
EXPECT_EQ(node1.op(), op_name);
EXPECT_EQ(node1.input_size(), 0);
EXPECT_EQ(node1.attr_size(), 0);
const std::vector<string> inputs({"input1", "input2"});
AddNode("", op_name, inputs, {}, &func);
const NodeDef& node2 =
func.node_def(FindFunctionNodeWithName("xxx/_2", func));
EXPECT_EQ(node2.op(), op_name);
EXPECT_EQ(node2.attr_size(), 0);
EXPECT_EQ(node2.input_size(), inputs.size());
for (size_t i = 0; i < inputs.size(); ++i) {
EXPECT_EQ(node2.input(i), inputs[i]);
}
AttrValue a1, a2;
a1.set_type(DT_INT32);
a2.set_type(DT_INT64);
const std::vector<std::pair<string, AttrValue>> attrs(
{{"attr1", a1}, {"attr2", a2}});
AddNode("", op_name, {}, attrs, &func);
const NodeDef& node3 =
func.node_def(FindFunctionNodeWithName("xxx/_3", func));
EXPECT_EQ(node3.op(), op_name);
EXPECT_EQ(node3.input_size(), 0);
EXPECT_EQ(node3.attr_size(), attrs.size());
for (size_t i = 0; i < attrs.size(); ++i) {
EXPECT_EQ(attrs[i].second.type(), node3.attr().at(attrs[i].first).type());
}
}
constexpr char kCondGraphProto[] = R"proto(
node {
name: "StatefulPartitionedCall"
op: "StatefulPartitionedCall"
attr {
key: "Tin"
value { list {} }
}
attr {
key: "Tout"
value { list { type: DT_BOOL } }
}
attr {
key: "_gradient_op_type"
value { s: "PartitionedCall-20" }
}
attr {
key: "config"
value { s: "" }
}
attr {
key: "config_proto"
value { s: "" }
}
attr {
key: "executor_type"
value { s: "" }
}
attr {
key: "f"
value { func { name: "__inference_test_function_19" } }
}
}
library {
function {
signature {
name: "cond_true_3"
input_arg { name: "identity_const" type: DT_BOOL }
output_arg { name: "identity_1" type: DT_BOOL }
}
node_def { name: "NoOp" op: "NoOp" }
node_def {
name: "Identity"
op: "Identity"
input: "identity_const"
input: "^NoOp"
attr {
key: "T"
value { type: DT_BOOL }
}
}
node_def {
name: "Identity_1"
op: "Identity"
input: "Identity:output:0"
attr {
key: "T"
value { type: DT_BOOL }
}
}
ret { key: "identity_1" value: "Identity_1:output:0" }
}
function {
signature {
name: "cond_false_4"
input_arg { name: "identity_const" type: DT_BOOL }
output_arg { name: "identity_1" type: DT_BOOL }
is_stateful: true
}
node_def {
name: "Assert/Const"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: "Wrong branch!!!"
}
}
}
}
node_def {
name: "Assert/Assert/condition"
op: "Const"
attr {
key: "dtype"
value { type: DT_BOOL }
}
attr {
key: "value"
value {
tensor {
dtype: DT_BOOL
tensor_shape {}
bool_val: false
}
}
}
}
node_def {
name: "Assert/Assert/data_0"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: "Wrong branch!!!"
}
}
}
}
node_def {
name: "Assert/Assert"
op: "Assert"
input: "Assert/Assert/condition:output:0"
input: "Assert/Assert/data_0:output:0"
attr {
key: "T"
value { list { type: DT_STRING } }
}
attr {
key: "summarize"
value { i: 3 }
}
}
node_def {
name: "Identity"
op: "Identity"
input: "identity_const"
input: "^Assert/Assert"
attr {
key: "T"
value { type: DT_BOOL }
}
}
node_def {
name: "Identity_1"
op: "Identity"
input: "Identity:output:0"
input: "^Assert/Assert"
attr {
key: "T"
value { type: DT_BOOL }
}
}
ret { key: "identity_1" value: "Identity_1:output:0" }
}
function {
signature {
name: "__inference_test_function_19"
output_arg { name: "identity" type: DT_BOOL }
is_stateful: true
}
node_def {
name: "Const"
op: "Const"
attr {
key: "dtype"
value { type: DT_BOOL }
}
attr {
key: "value"
value {
tensor {
dtype: DT_BOOL
tensor_shape {}
bool_val: true
}
}
}
}
node_def {
name: "cond"
op: "If"
input: "Const:output:0"
input: "Const:output:0"
attr {
key: "Tcond"
value { type: DT_BOOL }
}
attr {
key: "Tin"
value { list { type: DT_BOOL } }
}
attr {
key: "Tout"
value { list { type: DT_BOOL } }
}
attr {
key: "_lower_using_switch_merge"
value { b: true }
}
attr {
key: "else_branch"
value { func { name: "cond_false_4" } }
}
attr {
key: "output_shapes"
value { list { shape {} } }
}
attr {
key: "then_branch"
value { func { name: "cond_true_3" } }
}
}
node_def {
name: "cond/Identity"
op: "Identity"
input: "cond:output:0"
attr {
key: "T"
value { type: DT_BOOL }
}
}
node_def {
name: "Identity"
op: "Identity"
input: "cond/Identity:output:0"
input: "^cond"
attr {
key: "T"
value { type: DT_BOOL }
}
}
ret { key: "identity" value: "Identity:output:0" }
}
}
versions { producer: 27 min_consumer: 12 })proto";
constexpr char kWhileGraphProto[] = R"proto(
node {
name: "StatefulPartitionedCall"
op: "StatefulPartitionedCall"
attr {
key: "Tin"
value { list {} }
}
attr {
key: "Tout"
value { list { type: DT_INT32 } }
}
attr {
key: "_gradient_op_type"
value { s: "PartitionedCall-35" }
}
attr {
key: "config"
value { s: "" }
}
attr {
key: "config_proto"
value { s: "" }
}
attr {
key: "executor_type"
value { s: "" }
}
attr {
key: "f"
value { func { name: "__inference_test_function_34" } }
}
}
library {
function {
signature {
name: "while_body_5"
input_arg { name: "while_loop_counter" type: DT_INT32 }
input_arg { name: "const" type: DT_INT32 }
input_arg { name: "maximum_iterations" type: DT_INT32 }
output_arg { name: "identity" type: DT_INT32 }
output_arg { name: "identity_1" type: DT_INT32 }
output_arg { name: "identity_2" type: DT_INT32 }
}
node_def {
name: "add/y"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 1
}
}
}
}
node_def {
name: "add"
op: "Add"
input: "const"
input: "add/y:output:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "add_1/y"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 1
}
}
}
}
node_def {
name: "add_1"
op: "Add"
input: "while_loop_counter"
input: "add_1/y:output:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "Identity"
op: "Identity"
input: "add_1:z:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "Identity_1"
op: "Identity"
input: "add:z:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "Identity_2"
op: "Identity"
input: "maximum_iterations"
attr {
key: "T"
value { type: DT_INT32 }
}
}
ret { key: "identity" value: "Identity:output:0" }
ret { key: "identity_1" value: "Identity_1:output:0" }
ret { key: "identity_2" value: "Identity_2:output:0" }
}
function {
signature {
name: "__inference_test_function_34"
output_arg { name: "identity" type: DT_INT32 }
is_stateful: true
}
node_def {
name: "maximum_iterations"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 1
}
}
}
}
node_def {
name: "Const"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 0
}
}
}
}
node_def {
name: "while/loop_counter"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 0
}
}
}
}
node_def {
name: "while"
op: "While"
input: "while/loop_counter:output:0"
input: "Const:output:0"
input: "maximum_iterations:output:0"
attr {
key: "T"
value { list { type: DT_INT32 type: DT_INT32 type: DT_INT32 } }
}
attr {
key: "_lower_using_switch_merge"
value { b: true }
}
attr {
key: "body"
value { func { name: "while_body_5" } }
}
attr {
key: "cond"
value { func { name: "while_cond_4" } }
}
attr {
key: "output_shapes"
value {
list {
shape {}
shape {}
shape {}
}
}
}
}
node_def {
name: "while/Identity"
op: "Identity"
input: "while:output:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "while/Identity_1"
op: "Identity"
input: "while:output:1"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "while/Identity_2"
op: "Identity"
input: "while:output:2"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "Identity"
op: "Identity"
input: "while/Identity_1:output:0"
input: "^while"
attr {
key: "T"
value { type: DT_INT32 }
}
}
ret { key: "identity" value: "Identity:output:0" }
}
function {
signature {
name: "while_cond_4"
input_arg { name: "while_loop_counter" type: DT_INT32 }
input_arg { name: "const" type: DT_INT32 }
input_arg { name: "less_maximum_iterations" type: DT_INT32 }
output_arg { name: "identity" type: DT_BOOL }
}
node_def {
name: "Less"
op: "Less"
input: "while_loop_counter"
input: "less_maximum_iterations"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "Less_1/y"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 3
}
}
}
}
node_def {
name: "Less_1"
op: "Less"
input: "const"
input: "Less_1/y:output:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "LogicalAnd"
op: "LogicalAnd"
input: "Less:z:0"
input: "Less_1:z:0"
}
node_def {
name: "Identity"
op: "Identity"
input: "LogicalAnd:z:0"
attr {
key: "T"
value { type: DT_BOOL }
}
}
ret { key: "identity" value: "Identity:output:0" }
}
}
versions { producer: 27 min_consumer: 12 })proto";
TEST(FunctionUtilsTest, IsFunctionStateful) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* nodeA = graph_utils::AddNode("", "A", {}, {}, &graph);
FunctionDef* function = graph_def.mutable_library()->add_function();
*function = test::function::XTimesTwo();
FunctionLibraryDefinition lib_def(OpRegistry::Global(),
*graph_def.mutable_library());
EXPECT_FALSE(IsFunctionStateful(lib_def, *function));
EXPECT_TRUE(IsNodeStateful(lib_def, *nodeA));
GraphDef graph_def_cond;
protobuf::TextFormat::ParseFromString(kCondGraphProto, &graph_def_cond);
FunctionLibraryDefinition cond_lib(OpRegistry::Global(),
graph_def_cond.library());
const FunctionDef* no_op_fnc = cond_lib.Find("cond_true_3");
EXPECT_FALSE(IsFunctionStateful(cond_lib, *no_op_fnc));
EXPECT_FALSE(IsFunctionStateful(cond_lib, *no_op_fnc, true));
const FunctionDef* assert_func = cond_lib.Find("cond_false_4");
EXPECT_TRUE(IsFunctionStateful(cond_lib, *assert_func));
EXPECT_FALSE(IsFunctionStateful(cond_lib, *assert_func, true));
EXPECT_TRUE(ContainsFunctionNodeWithOp("Const", *assert_func));
EXPECT_TRUE(ContainsFunctionNodeWithOp("Assert", *assert_func));
for (auto node : assert_func->node_def()) {
if (node.op() == "Const") {
EXPECT_FALSE(IsNodeStateful(lib_def, node));
}
if (node.op() == "Assert") {
EXPECT_TRUE(IsNodeStateful(lib_def, node));
EXPECT_FALSE(IsNodeStateful(lib_def, node, true));
}
}
const FunctionDef* cond_func = cond_lib.Find("__inference_test_function_19");
EXPECT_TRUE(IsFunctionStateful(cond_lib, *cond_func));
EXPECT_FALSE(IsFunctionStateful(cond_lib, *cond_func, true));
GraphDef graph_def_while;
protobuf::TextFormat::ParseFromString(kWhileGraphProto, &graph_def_while);
FunctionLibraryDefinition while_lib(OpRegistry::Global(),
graph_def_while.library());
const FunctionDef* while_function =
while_lib.Find("__inference_test_function_34");
EXPECT_FALSE(IsFunctionStateful(while_lib, *while_function));
EXPECT_FALSE(IsFunctionStateful(while_lib, *while_function, true));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/function_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/function_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
204d4e8c-692c-433f-a27f-07a914eac2a7 | cpp | tensorflow/tensorflow | update_op_cost_in_tfrt_mlir | tensorflow/compiler/mlir/tfrt/transforms/update_op_cost_in_tfrt_mlir.cc | tensorflow/compiler/mlir/tfrt/tests/analysis/update_op_cost_in_tfrt_mlir_test.cc | #include "tensorflow/compiler/mlir/tfrt/transforms/update_op_cost_in_tfrt_mlir.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "tensorflow/compiler/mlir/tfrt/analysis/cost_analysis.h"
#include "tensorflow/core/tfrt/fallback/cost_recorder.h"
namespace tensorflow {
namespace tfrt_compiler {
constexpr char kCostAttrName[] = "_tfrt_cost";
constexpr char kOpKeyAttrName[] = "op_key";
void UpdateOpCostInTfrtMlir(mlir::ModuleOp op,
const tfrt_stub::CostRecorder& cost_recorder) {
mlir::Builder builder(op);
op.walk([&](mlir::Operation* op) {
if (HasCostFunctionRegistered(op->getName().getStringRef())) return;
const auto cost_attr = op->getAttrOfType<mlir::IntegerAttr>(kCostAttrName);
if (!cost_attr) return;
const auto op_key_attr =
op->getAttrOfType<mlir::IntegerAttr>(kOpKeyAttrName);
if (!op_key_attr) return;
const int64_t op_key = op_key_attr.getInt();
op->setAttr(kCostAttrName, builder.getI64IntegerAttr(
cost_recorder.GetCost(op_key)));
});
}
}
} | #include "tensorflow/compiler/mlir/tfrt/transforms/update_op_cost_in_tfrt_mlir.h"
#include <cstdint>
#include <cstdlib>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.h"
#include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_sync.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include "tfrt/init_tfrt_dialects.h"
namespace tensorflow {
namespace {
constexpr char kCostAttrName[] = "_tfrt_cost";
constexpr char kOpKeyAttrName[] = "op_key";
absl::flat_hash_map<int64_t, uint64_t> GetOpCostMap(mlir::ModuleOp op) {
absl::flat_hash_map<int64_t, uint64_t> op_cost_map;
op.walk([&](mlir::Operation* op) {
const auto cost_attr = op->getAttrOfType<mlir::IntegerAttr>(kCostAttrName);
if (!cost_attr) return;
const auto op_key_attr =
op->getAttrOfType<mlir::IntegerAttr>(kOpKeyAttrName);
if (!op_key_attr) return;
op_cost_map[op_key_attr.getInt()] = cost_attr.getInt();
});
return op_cost_map;
}
TEST(CostUpdateTest, Basic) {
std::string saved_model_mlir_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tfrt/tests/analysis/testdata/test.mlir");
mlir::DialectRegistry registry;
tfrt::RegisterTFRTDialects(registry);
registry.insert<tfrt::fallback_async::FallbackAsyncDialect>();
registry.insert<tfrt::fallback_sync::FallbackSyncDialect>();
mlir::MLIRContext context(registry);
auto module =
mlir::parseSourceFile<mlir::ModuleOp>(saved_model_mlir_path, &context);
ASSERT_TRUE(module);
auto expected_op_cost_map = GetOpCostMap(module.get());
EXPECT_EQ(expected_op_cost_map.size(), 1);
unsigned int seed = 23579;
for (auto& [op_key, cost] : expected_op_cost_map) {
cost = rand_r(&seed) % 1000;
}
tensorflow::tfrt_stub::CostRecorder cost_recorder;
for (const auto& [op_key, cost] : expected_op_cost_map) {
cost_recorder.RecordCost(op_key, cost);
}
tfrt_compiler::UpdateOpCostInTfrtMlir(module.get(), cost_recorder);
const auto got_op_cost_map = GetOpCostMap(module.get());
EXPECT_THAT(got_op_cost_map, ::testing::ContainerEq(expected_op_cost_map));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/transforms/update_op_cost_in_tfrt_mlir.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/tests/analysis/update_op_cost_in_tfrt_mlir_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f4b95792-084d-430f-879c-146a3b0e9a8b | cpp | tensorflow/tensorflow | convert_tf_quant_types | tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_types.cc | tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_types_test.cc | #include <memory>
#include <string>
#include <utility>
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/Value.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_attributes.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/lib/monitoring/counter.h"
namespace mlir::quant::stablehlo {
namespace {
using quant::tensorflow::GetDenseAttrFromTensorProtoAttr;
using quant::tensorflow::GetIntTypeFromTFQint;
using quant::tensorflow::IsTFQintType;
using quant::tensorflow::IsTFUniformQuantizedOp;
#define GEN_PASS_DEF_CONVERTTFQUANTTYPES
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/passes.h.inc"
auto *mlir_tf_quant_op_count = ::tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/tf_quant_op_count" ,
"Counts the number of ops that has qint types" ,
"op_name" );
bool IsIllegalType(Type type) {
return IsTFQintType(getElementTypeOrSelf(type));
}
Type ToLegalType(Type type) {
if (IsTFQintType(type)) return GetIntTypeFromTFQint(type);
if (auto shaped = mlir::dyn_cast<ShapedType>(type)) {
Type elem = shaped.getElementType();
if (IsTFQintType(elem)) return shaped.clone(ToLegalType(elem));
}
return type;
}
bool IsQintToIntCast(Operation *op) {
auto cast_op = llvm::dyn_cast<TF::CastOp>(op);
return cast_op && IsIllegalType(cast_op.getX().getType()) &&
ToLegalType(cast_op.getX().getType()) == cast_op.getY().getType();
}
bool IsIntToQintCast(Operation *op) {
auto cast_op = llvm::dyn_cast<TF::CastOp>(op);
return cast_op && IsIllegalType(cast_op.getY().getType()) &&
ToLegalType(cast_op.getY().getType()) == cast_op.getX().getType();
}
bool IsQintValueQintToIntCast(Value v) {
if (!IsIllegalType(v.getType())) {
return true;
}
if (v.getUsers().empty()) {
return false;
}
return llvm::all_of(v.getUsers(), [&](OpOperand operand) {
return IsQintToIntCast(operand.getOwner());
});
}
bool IsQintValueDefinedByIntToQintCast(Value v) {
if (!IsIllegalType(v.getType())) {
return true;
}
if (!v.getDefiningOp() || !llvm::isa<TF::CastOp>(v.getDefiningOp())) {
return false;
}
return IsIntToQintCast(v.getDefiningOp());
}
bool IsTFUniformQuantizedOpLegal(Operation *op) {
return op && llvm::all_of(op->getResults(), IsQintValueQintToIntCast) &&
llvm::all_of(op->getOperands(), IsQintValueDefinedByIntToQintCast);
}
bool IsCastOpLegal(TF::CastOp cast_op) {
if (IsIllegalType(cast_op.getSrcT()) && IsIllegalType(cast_op.getDstT())) {
return false;
}
if (IsIllegalType(cast_op.getSrcT()) &&
!(cast_op.getX().getDefiningOp() &&
IsTFUniformQuantizedOp(cast_op.getX().getDefiningOp()))) {
return false;
}
if (IsIllegalType(cast_op.getDstT()) &&
!IsTFUniformQuantizedOp(*cast_op.getY().getUsers().begin())) {
return false;
}
return true;
}
class TFQuantTypeConverter : public TypeConverter {
public:
TFQuantTypeConverter() {
addConversion([](Type type) -> Type {
return IsIllegalType(type) ? ToLegalType(type) : type;
});
}
};
class TFQuantTypeConversionTarget : public ConversionTarget {
public:
explicit TFQuantTypeConversionTarget(MLIRContext &ctx,
TFQuantTypeConverter &converter)
: ConversionTarget(ctx), converter_(converter) {
markUnknownOpDynamicallyLegal([this](Operation *op) {
if (IsTFUniformQuantizedOp(op)) {
return IsTFUniformQuantizedOpLegal(op);
} else if (auto cast_op = llvm::dyn_cast<TF::CastOp>(op)) {
return IsCastOpLegal(cast_op);
} else if (auto const_op = llvm::dyn_cast<TF::ConstOp>(op)) {
return !IsIllegalType(const_op.getOutput().getType());
}
if (auto func = dyn_cast<func::FuncOp>(op)) {
if (!converter_.isSignatureLegal(func.getFunctionType())) return false;
}
return converter_.isLegal(op);
});
}
private:
TFQuantTypeConverter &converter_;
};
class TFQuantTypePattern : public ConversionPattern {
public:
TFQuantTypePattern(MLIRContext *ctx, TypeConverter &converter)
: ConversionPattern(converter, MatchAnyOpTypeTag(), 1, ctx) {}
LogicalResult matchAndRewrite(
Operation *op, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override {
if (IsTFUniformQuantizedOp(op) || llvm::isa<TF::ConstOp>(op)) {
return failure();
}
llvm::SmallVector<Type, 4> new_results;
if (failed(getTypeConverter()->convertTypes(op->getResultTypes(),
new_results)))
return failure();
OperationState state(op->getLoc(), op->getName().getStringRef(), operands,
new_results, op->getAttrs(), op->getSuccessors());
for (Region ®ion : op->getRegions()) {
auto new_region = std::make_unique<Region>(op);
rewriter.inlineRegionBefore(region, *new_region, new_region->begin());
if (failed(rewriter.convertRegionTypes(new_region.get(),
*getTypeConverter()))) {
return failure();
}
state.addRegion(std::move(new_region));
}
rewriter.replaceOp(op, rewriter.create(state)->getResults());
mlir_tf_quant_op_count->GetCell(std::string(op->getName().getStringRef()))
->IncrementBy(1);
return success();
}
};
class TFUniformQuantizedOpsPattern : public ConversionPattern {
public:
explicit TFUniformQuantizedOpsPattern(MLIRContext *ctx)
: ConversionPattern(MatchAnyOpTypeTag(), 1, ctx) {}
LogicalResult matchAndRewrite(
Operation *op, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override {
if (!IsTFUniformQuantizedOp(op)) {
return failure();
}
llvm::SmallVector<Value, 4> new_operands;
for (int i = 0; i < operands.size(); ++i) {
Type orig_op_type = op->getOperandTypes()[i];
if (IsIllegalType(orig_op_type) &&
!IsQintValueDefinedByIntToQintCast(op->getOperand(i))) {
new_operands.push_back(rewriter.create<TF::CastOp>(
op->getLoc(), orig_op_type, operands[i]));
} else {
new_operands.push_back(operands[i]);
}
}
OperationState state(op->getLoc(), op->getName().getStringRef(),
new_operands, op->getResultTypes(), op->getAttrs(),
op->getSuccessors());
Operation *new_op = rewriter.create(state);
llvm::SmallVector<Value, 4> new_results = new_op->getResults();
for (int i = 0; i < new_results.size(); ++i) {
Value &result = new_results[i];
if (IsIllegalType(result.getType()) &&
!IsQintValueQintToIntCast(op->getResult(i))) {
result = rewriter.create<TF::CastOp>(
op->getLoc(), ToLegalType(result.getType()), result);
}
op->getResult(i).replaceUsesWithIf(
new_op->getResult(i), [](OpOperand &operand) {
return IsQintToIntCast(operand.getOwner());
});
}
rewriter.replaceOp(op, new_results);
return success();
}
};
class TFConstOpQuantToIntPattern : public OpConversionPattern<TF::ConstOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
TF::ConstOp op, TF::ConstOpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
if (!IsIllegalType(op.getOutput().getType())) return failure();
TF::TensorProtoAttr tensor_proto_attr;
if (!matchPattern(op.getOperation(), m_Constant(&tensor_proto_attr))) {
return rewriter.notifyMatchFailure(op, "operand must be constant.");
}
auto dense_attr_or = GetDenseAttrFromTensorProtoAttr(
tensor_proto_attr.getValue(),
mlir::dyn_cast<TensorType>(ToLegalType(op.getOutput().getType())));
if (failed(dense_attr_or)) {
op->emitError("failed to get DenseElementAttr.");
return failure();
}
rewriter.replaceOpWithNewOp<TF::ConstOp>(
op, ToLegalType(op.getOutput().getType()), *dense_attr_or);
return success();
}
};
struct ConvertTFQuantTypes
: public impl::ConvertTFQuantTypesBase<ConvertTFQuantTypes> {
void runOnOperation() override;
};
void ConvertTFQuantTypes::runOnOperation() {
TFQuantTypeConverter converter;
RewritePatternSet patterns(&getContext());
patterns.add<TFQuantTypePattern>(&getContext(), converter);
patterns.add<TFConstOpQuantToIntPattern, TFUniformQuantizedOpsPattern>(
&getContext());
populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(patterns,
converter);
TFQuantTypeConversionTarget target(getContext(), converter);
if (failed(applyFullConversion(getOperation(), target, std::move(patterns))))
return signalPassFailure();
}
}
std::unique_ptr<OperationPass<func::FuncOp>> CreateConvertTFQuantTypesPass() {
return std::make_unique<ConvertTFQuantTypes>();
}
} | #include <cstdint>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/passes.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tsl/platform/statusor.h"
namespace mlir::quant::stablehlo {
namespace {
using ::mlir::DialectRegistry;
using ::mlir::MLIRContext;
using ::mlir::ModuleOp;
using ::mlir::OwningOpRef;
using ::tensorflow::monitoring::testing::CellReader;
using ::testing::Test;
static constexpr char kMetricsName[] =
"/tensorflow/core/tf2xla/tf_quant_op_count";
class LegalizeTfTypesTest : public Test {
protected:
void CreateModule(const char* module_string) {
DialectRegistry mlir_registry;
RegisterCommonToolingDialects(mlir_registry);
context_.appendDialectRegistry(mlir_registry);
TF_ASSERT_OK(
tensorflow::DeserializeMlirModule(module_string, &context_, &module_));
pm_ = std::make_unique<mlir::PassManager>(&context_);
pm_->addNestedPass<mlir::func::FuncOp>(
quant::stablehlo::CreateConvertTFQuantTypesPass());
}
mlir::LogicalResult Run() { return pm_->run(module_.get()); }
private:
MLIRContext context_;
OwningOpRef<ModuleOp> module_;
std::unique_ptr<mlir::PassManager> pm_;
};
TEST_F(LegalizeTfTypesTest, RecordsStreamzQuantOps) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main(%arg0: tensor<3x3x!tf_type.qint8>, %arg1: tensor<3x3x!tf_type.qint8>) -> tensor<6x3x!tf_type.qint8> {
%axis = "tf.Const"() { value = dense<0> : tensor<i64> } : () -> tensor<i64>
%1 = "tf.ConcatV2"(%arg0, %arg1, %axis) : (tensor<3x3x!tf_type.qint8>, tensor<3x3x!tf_type.qint8>, tensor<i64>) -> tensor<6x3x!tf_type.qint8>
func.return %1 : tensor<6x3x!tf_type.qint8>
}
})";
CreateModule(kMlirModuleStr);
CellReader<int64_t> reader(kMetricsName);
auto result = Run();
EXPECT_TRUE(result.succeeded());
EXPECT_EQ(reader.Delta("tf.ConcatV2"), 1);
EXPECT_EQ(reader.Delta("func.return"), 1);
EXPECT_EQ(reader.Delta("func.func"), 0);
}
TEST_F(LegalizeTfTypesTest, RecordsStreamzNoQuantOps) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main(%arg0: tensor<3x3xf32>, %arg1: tensor<3x3xf32>) -> tensor<6x3xf32> {
%axis = "tf.Const"() { value = dense<0> : tensor<i64> } : () -> tensor<i64>
%1 = "tf.ConcatV2"(%arg0, %arg1, %axis) : (tensor<3x3xf32>, tensor<3x3xf32>, tensor<i64>) -> tensor<6x3xf32>
func.return %1 : tensor<6x3xf32>
}
})";
CreateModule(kMlirModuleStr);
CellReader<int64_t> reader(kMetricsName);
auto result = Run();
EXPECT_TRUE(result.succeeded());
EXPECT_EQ(reader.Delta("tf.ConcatV2"), 0);
EXPECT_EQ(reader.Delta("func.return"), 0);
EXPECT_EQ(reader.Delta("func.func"), 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_types.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_types_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5968201d-7ca3-4c7c-856a-00f65586c49f | cpp | tensorflow/tensorflow | shard_dataset_op | tensorflow/core/kernels/data/shard_dataset_op.cc | tensorflow/core/kernels/data/shard_dataset_op_test.cc | #include "tensorflow/core/kernels/data/shard_dataset_op.h"
#include <cstdlib>
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/util/batch_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const ShardDatasetOp::kDatasetType;
constexpr const char* const ShardDatasetOp::kInputDataset;
constexpr const char* const ShardDatasetOp::kNumShards;
constexpr const char* const ShardDatasetOp::kIndex;
constexpr const char* const ShardDatasetOp::kRequireNonEmpty;
constexpr const char* const ShardDatasetOp::kOutputTypes;
constexpr const char* const ShardDatasetOp::kOutputShapes;
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kNextIndex[] = "next_index";
constexpr char kFileShardErrorMessage[] =
"If you are using datasets with distribution strategy, consider setting "
"the auto sharding policy to either DATA or OFF using the "
"`experimental_distribute.auto_shard_policy` option of `tf.data.Options()`."
" Or, split your input files into a larger number of small files such that "
"number of files is greater than number of shards/workers.";
class ShardDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t num_shards, int64_t index,
bool require_non_empty, const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)),
num_shards_(num_shards),
index_(index),
input_(input),
require_non_empty_(require_non_empty),
traceme_metadata_(
{{"index", strings::Printf("%lld", static_cast<long long>(index))},
{"num_shards",
strings::Printf("%lld", static_cast<long long>(num_shards))}}) {
input_->Ref();
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.set_args(num_shards_, index_);
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return n / num_shards_ + (index_ < n % num_shards_ ? 1 : 0);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
return input_->Get(ctx, index_ + (num_shards_ * index), out_tensors);
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* num_shards = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(num_shards_, &num_shards));
Node* index = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(index_, &index));
AttrValue require_non_empty_attr;
b->BuildAttrValue(require_non_empty_, &require_non_empty_attr);
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, num_shards, index},
{{kRequireNonEmpty, require_non_empty_attr}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params), next_index_(0), element_count_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
if (dataset()->num_shards_ == kShardHint) {
return errors::FailedPrecondition(
"`tf.data.Dataset.shard(SHARD_HINT, ...)` can only be used in "
"`tf.distribute.Strategy.experimental_distribute_dataset()` with "
"`tf.data.experimental.AutoShardPolicy.HINT` policy, or tf.data "
"service with "
"`tf.data.experimental.service.ShardingPolicy.HINT` processing "
"mode.");
}
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
*end_of_sequence = false;
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (ctx->index_mapper() != nullptr) {
return Get(ctx, out_tensors, end_of_sequence);
}
int num_to_skip =
(dataset()->index_ - next_index_) % dataset()->num_shards_;
if (num_to_skip < 0) {
num_to_skip += dataset()->num_shards_;
}
int num_skipped;
TF_RETURN_IF_ERROR(
input_impl_->Skip(ctx, num_to_skip, end_of_sequence, &num_skipped));
next_index_ += num_skipped;
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
std::vector<Tensor> result;
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &result, end_of_sequence));
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
next_index_++;
if (dataset()->require_non_empty_ &&
next_index_ < dataset()->num_shards_) {
int num_skipped;
Status s = input_impl_->Skip(ctx, dataset()->num_shards_ - next_index_,
end_of_sequence, &num_skipped);
if (*end_of_sequence || errors::IsOutOfRange(s)) {
return absl::InvalidArgumentError(absl::StrCat(
"Could not apply FILE based sharding: the dataset only has ",
next_index_, " file(s), which is not enough for the required ",
dataset()->num_shards_, " shards/workers. ",
kFileShardErrorMessage));
} else if (!s.ok()) {
return s;
}
next_index_ = dataset()->num_shards_;
}
*out_tensors = std::move(result);
return absl::OkStatus();
}
Status Get(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
IteratorContextWithIndexMapper ctx_with_index_mapper(ctx, this);
auto merge_checkpoint = gtl::MakeCleanup([&ctx_with_index_mapper] {
ctx_with_index_mapper.MergeCheckpoint();
});
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx_with_index_mapper.Get(),
out_tensors, end_of_sequence));
if (*end_of_sequence && dataset()->require_non_empty_ &&
element_count_ == 0) {
return absl::InvalidArgumentError(absl::StrCat(
"Could not apply FILE based sharding: The dataset does not have "
"enough file(s) for the required ",
dataset()->num_shards_, " shards/workers. ",
kFileShardErrorMessage));
}
++element_count_;
return absl::OkStatus();
}
IndexMapperFn GetIndexMapper(
IndexMapperFn parent_index_mapper) const override {
int64_t num_shards = dataset()->num_shards_;
int64_t shard_index = dataset()->index_;
return [parent_index_mapper, num_shards,
shard_index](size_t element_position) -> absl::StatusOr<size_t> {
TF_ASSIGN_OR_RETURN(size_t output_index,
parent_index_mapper(element_position));
return output_index * num_shards + shard_index;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(
std::move(args), 1.0 / static_cast<double>(dataset()->num_shards_));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kNextIndex, next_index_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
if (ctx->restored_element_count().has_value()) {
element_count_ = *ctx->restored_element_count();
return RestoreInput(ctx, reader, input_impl_);
}
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kNextIndex, &next_index_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
int64_t next_index_ TF_GUARDED_BY(mu_);
size_t element_count_ TF_GUARDED_BY(mu_);
};
const int64_t num_shards_;
const int64_t index_;
const DatasetBase* const input_;
const bool require_non_empty_;
const TraceMeMetadata traceme_metadata_;
absl::Status random_indexing_compatible_;
};
ShardDatasetOp::ShardDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kRequireNonEmpty, &require_non_empty_));
}
void ShardDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t index = 0;
int64_t num_shards = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kNumShards, &num_shards));
OP_REQUIRES(
ctx, num_shards > 0 || num_shards == kShardHint,
errors::InvalidArgument("Number of shards must be greater than zero "
"(currently num_shards = ",
num_shards, ")."));
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kIndex, &index));
OP_REQUIRES(
ctx, (index >= 0 && index < num_shards) || num_shards == kShardHint,
errors::InvalidArgument("Index must be between 0 and ", num_shards - 1,
" (currently index = ", index, ")."));
*output = new Dataset(ctx, num_shards, index, require_non_empty_, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ShardDataset").Device(DEVICE_CPU),
ShardDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/shard_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "shard_dataset";
class ShardDatasetParams : public DatasetParams {
public:
template <typename T>
ShardDatasetParams(T input_dataset_params, int64_t num_shards, int64_t index,
bool require_non_empty, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
num_shards_(num_shards),
index_(index),
require_non_empty_(require_non_empty) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return CreateTensors<int64_t>(TensorShape({}), {{num_shards_}, {index_}});
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(ShardDatasetOp::kInputDataset);
input_names->emplace_back(ShardDatasetOp::kNumShards);
input_names->emplace_back(ShardDatasetOp::kIndex);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("require_non_empty", require_non_empty_);
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override { return ShardDatasetOp::kDatasetType; }
private:
int64_t num_shards_;
int64_t index_;
bool require_non_empty_;
};
class ShardDatasetOpTest : public DatasetOpsTestBase {};
ShardDatasetParams ShardDatasetParams1() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams2() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
0,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams3() {
return ShardDatasetParams(RangeDatasetParams(0, 1, 1),
5,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams4() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
7,
5,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams5() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
4,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams6() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
4,
3,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams7() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
20,
5,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParamsWithNoElemForEachShard() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
20,
5,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams1() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
7,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams2() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
-3,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams3() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
-3,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams4() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
0,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<ShardDatasetParams>> GetNextTestCases() {
return {
{ShardDatasetParams1(),
CreateTensors<int64_t>(TensorShape{}, {{2}, {7}})},
{ShardDatasetParams2(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {5}})},
{ShardDatasetParams3(),
{}},
{ShardDatasetParams4(),
CreateTensors<int64_t>(TensorShape{}, {{5}})},
{ShardDatasetParams5(),
CreateTensors<int64_t>(TensorShape{}, {{4}, {9}})},
{ShardDatasetParams6(),
CreateTensors<int64_t>(TensorShape{}, {{3}, {7}})},
{ShardDatasetParams7(),
CreateTensors<int64_t>(TensorShape{}, {{5}})}};
}
ITERATOR_GET_NEXT_TEST_P(ShardDatasetOpTest, ShardDatasetParams,
GetNextTestCases())
TEST_F(ShardDatasetOpTest, DatasetNodeName) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ShardDatasetOpTest, DatasetTypeString) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(ShardDatasetOp::kDatasetType)));
}
TEST_F(ShardDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(ShardDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
std::vector<CardinalityTestCase<ShardDatasetParams>> CardinalityTestCases() {
return {{ShardDatasetParams1(),
2},
{ShardDatasetParams2(),
2},
{ShardDatasetParams3(),
0},
{ShardDatasetParams4(),
1},
{ShardDatasetParams5(),
2},
{ShardDatasetParams6(),
2},
{ShardDatasetParams7(),
1}};
}
DATASET_CARDINALITY_TEST_P(ShardDatasetOpTest, ShardDatasetParams,
CardinalityTestCases())
TEST_F(ShardDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(ShardDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(ShardDatasetOpTest, IteratorPrefix) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ShardDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<ShardDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{ShardDatasetParams1(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{2}, {7}})},
{ShardDatasetParams2(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {5}})},
{ShardDatasetParams3(),
{0, 1},
{}},
{ShardDatasetParams4(),
{0, 5},
CreateTensors<int64_t>(TensorShape{}, {{5}})},
{ShardDatasetParams5(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{4}, {9}})},
{ShardDatasetParams6(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{3}, {7}})},
{ShardDatasetParams7(),
{0, 5},
CreateTensors<int64_t>(TensorShape{}, {{5}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ShardDatasetOpTest, ShardDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(ShardDatasetOpTest, NoElemForEachShard) {
auto dataset_params = InvalidShardDatasetParamsWithNoElemForEachShard();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(ShardDatasetOpTest, InvalidArguments) {
std::vector<ShardDatasetParams> invalid_dataset_params = {
InvalidShardDatasetParams1(), InvalidShardDatasetParams2(),
InvalidShardDatasetParams3(), InvalidShardDatasetParams4()};
for (const auto& dataset_params : invalid_dataset_params) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/shard_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/shard_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
931a8bc3-ba94-4243-bc39-9a972cf91eb2 | cpp | tensorflow/tensorflow | dynamic_dimension_inference | third_party/xla/xla/service/dynamic_dimension_inference.cc | third_party/xla/xla/service/dynamic_dimension_inference_test.cc | #include "xla/service/dynamic_dimension_inference.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/dynamic_parameter_binding.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/call_inliner.h"
#include "xla/service/dynamic_window_utils.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/tuple_util.h"
#include "xla/service/while_util.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<std::pair<HloComputation*, CallInliner::InlinedInstructionMap>>
WidenComputation(HloComputation* narrow_comp, const Shape& wide_shape) {
TF_RET_CHECK(wide_shape.IsTuple());
const Shape& narrow_shape = narrow_comp->parameter_instruction(0)->shape();
if (Shape::Equal()(wide_shape, narrow_shape)) {
return std::make_pair(narrow_comp, CallInliner::InlinedInstructionMap());
}
HloComputation* wide_comp = [&]() {
HloComputation::Builder builder(absl::StrCat("wide.", narrow_comp->name()));
builder.AddInstruction(HloInstruction::CreateParameter(
0, wide_shape,
absl::StrCat("wide.", narrow_comp->parameter_instruction(0)->name())));
return narrow_comp->parent()->AddEmbeddedComputation(builder.Build());
}();
HloInstruction* wide_parameter = wide_comp->parameter_instruction(0);
HloInstruction* truncated_parameter = TupleUtil::ExtractPrefix(
wide_parameter, narrow_shape.tuple_shapes_size(),
absl::StrCat("renarrowed.",
narrow_comp->parameter_instruction(0)->name()));
HloInstruction* call_narrow_comp = wide_comp->AddInstruction(
HloInstruction::CreateCall(narrow_comp->root_instruction()->shape(),
{truncated_parameter}, narrow_comp));
wide_comp->set_root_instruction(call_narrow_comp,
true);
TF_ASSIGN_OR_RETURN(auto inline_map, CallInliner::Inline(call_narrow_comp));
return std::make_pair(wide_comp, std::move(inline_map));
}
}
class DynamicDimensionInferenceVisitor : public DfsHloRewriteVisitor {
public:
explicit DynamicDimensionInferenceVisitor(
const DynamicParameterBinding& param_bindings,
HloDataflowAnalysis& dataflow_analysis, DynamicDimensionInference* parent,
DynamicDimensionInference::CustomCallInferenceHandler custom_call_handler,
DynamicDimensionInference::ShapeCheckMode shape_check_mode,
DynamicDimensionInference::AssertionGenerator assertion_generator)
: param_bindings_(param_bindings),
dataflow_analysis_(dataflow_analysis),
parent_(parent),
custom_call_handler_(std::move(custom_call_handler)),
shape_check_mode_(shape_check_mode),
assertion_generator_(assertion_generator) {}
absl::Status DefaultAction(HloInstruction* hlo) override;
static absl::StatusOr<bool> Run(
HloComputation* computation, HloDataflowAnalysis& dataflow_analysis,
const DynamicParameterBinding& param_bindings,
DynamicDimensionInference* parent,
DynamicDimensionInference::CustomCallInferenceHandler
custom_call_handler = nullptr,
DynamicDimensionInference::ShapeCheckMode shape_check_mode =
DynamicDimensionInference::ShapeCheckMode::kIgnore,
const DynamicDimensionInference::AssertionGenerator& assertion_generator =
nullptr) {
if (!HloInstruction::IsThreadIncluded(computation->execution_thread(),
parent->execution_threads_)) {
return false;
}
DynamicDimensionInferenceVisitor visitor(
param_bindings, dataflow_analysis, parent,
std::move(custom_call_handler), shape_check_mode, assertion_generator);
TF_RETURN_IF_ERROR(computation->Accept(&visitor));
if (visitor.shape_assertion_ != nullptr) {
CHECK(assertion_generator);
assertion_generator(visitor.shape_assertion_);
}
return visitor.changed();
}
absl::Status HandleParameter(HloInstruction* hlo) override;
absl::Status HandleInfeed(HloInstruction* hlo) override;
absl::Status HandleConstant(HloInstruction* hlo) override;
absl::Status HandleReduce(HloInstruction* hlo) override;
absl::Status HandleDot(HloInstruction* hlo) override;
absl::Status HandleTuple(HloInstruction* hlo) override;
absl::Status HandleTranspose(HloInstruction* hlo) override;
absl::Status HandleDynamicReshape(HloInstruction* hlo) override;
absl::Status HandleReshape(HloInstruction* hlo) override;
absl::Status HandleSort(HloInstruction* hlo) override;
absl::Status HandlePad(HloInstruction* hlo) override;
absl::Status HandleCustomCall(HloInstruction* hlo) override;
absl::Status HandleBroadcast(HloInstruction* hlo) override;
absl::Status HandleGetDimensionSize(HloInstruction* hlo) override;
absl::Status HandleSetDimensionSize(HloInstruction* hlo) override;
absl::Status HandleSelect(HloInstruction* hlo) override;
absl::Status HandleConvolution(HloInstruction* hlo) override;
absl::Status HandleConcatenate(HloInstruction* hlo) override;
absl::Status HandleReduceWindow(HloInstruction* hlo) override;
absl::Status HandleReverse(HloInstruction* hlo) override;
absl::Status HandleSelectAndScatter(HloInstruction* hlo) override;
absl::Status HandleGetTupleElement(HloInstruction* hlo) override;
absl::Status HandleElementwiseUnary(HloInstruction* hlo) override;
absl::Status HandleElementwiseNary(HloInstruction* hlo);
absl::Status HandleElementwiseBinary(HloInstruction* hlo) override;
absl::Status HandleClamp(HloInstruction* hlo) override;
absl::Status HandleConditional(HloInstruction* hlo) override;
absl::Status HandleWhile(HloInstruction* hlo) override;
absl::Status HandleSlice(HloInstruction* hlo) override;
absl::Status HandleDynamicSlice(HloInstruction* hlo) override;
absl::Status HandleDynamicUpdateSlice(HloInstruction* hlo) override;
absl::Status HandleGather(HloInstruction* hlo) override;
absl::Status HandleScatter(HloInstruction* hlo) override;
absl::Status HandleMap(HloInstruction* hlo) override;
absl::Status HandleDomain(HloInstruction* hlo) override;
absl::Status HandleAsyncStart(HloInstruction* hlo) override;
absl::Status HandleAsyncDone(HloInstruction* hlo) override;
private:
using OperandDynamicDimensionFn = absl::FunctionRef<absl::Status(
HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size)>;
using DynamicDimensionFn = std::function<absl::Status(
ShapeIndex index, int64_t dimension, HloInstruction* dynamic_size)>;
void SetDynamicSize(HloInstruction* inst, const ShapeIndex& index,
int64_t dim, HloInstruction* size,
bool clear_dynamic_dimension = true);
void SetDynamicSizes(HloInstruction* inst, const ShapeIndex& index,
absl::Span<HloInstruction* const> sizes);
absl::Status HandleDynamicConvolutionForward(HloInstruction* hlo,
int64_t operand_index,
int64_t dimension,
HloInstruction* dynamic_size);
absl::Status HandleDynamicConvolutionKernelGrad(HloInstruction* hlo,
int64_t operand_index,
int64_t dimension);
absl::Status HandleDynamicConvolutionInputGrad(HloInstruction* hlo,
int64_t operand_index,
int64_t dimension);
absl::Status HandleDynamicWindowSamePadding(HloInstruction* hlo,
HloInstruction* dynamic_size,
int64_t operand_index,
int64_t dimension);
absl::Status ForEachOperandDynamicDimension(HloInstruction* inst,
OperandDynamicDimensionFn);
absl::Status ForEachDynamicDimensionInOperand(HloInstruction* inst,
int64_t operand_index,
OperandDynamicDimensionFn);
absl::Status ForEachDynamicDimension(HloInstruction* inst,
const DynamicDimensionFn& fn);
bool CanInfer(HloInstruction* hlo) { return parent_->CanInfer(hlo); }
absl::StatusOr<bool> RequiresPadToStatic(HloInstruction* instr,
ShapeIndex shape_index);
absl::Status InsertPadToStaticOnInstruction(HloInstruction* inst);
absl::Status InsertShapeCheck(HloInstruction* dim1, HloInstruction* dim2,
bool support_implicit_broadcast);
absl::Status PassThroughDynamicDimension(HloInstruction*);
const DynamicParameterBinding& param_bindings_;
HloDataflowAnalysis& dataflow_analysis_;
DynamicDimensionInference* parent_;
DynamicDimensionInference::CustomCallInferenceHandler custom_call_handler_;
DynamicDimensionInference::ShapeCheckMode shape_check_mode_;
HloInstruction* shape_assertion_ = nullptr;
DynamicDimensionInference::AssertionGenerator assertion_generator_;
};
void DynamicDimensionInferenceVisitor::SetDynamicSize(
HloInstruction* inst, const ShapeIndex& index, int64_t dim,
HloInstruction* size, bool clear_dynamic_dimension) {
parent_->SetDynamicSize(inst, index, dim, size);
if (clear_dynamic_dimension) {
ShapeUtil::GetMutableSubshape(inst->mutable_shape(), index)
->set_dynamic_dimension(dim, false);
}
MarkAsChanged();
}
void DynamicDimensionInferenceVisitor::SetDynamicSizes(
HloInstruction* inst, const ShapeIndex& index,
absl::Span<HloInstruction* const> sizes) {
const Shape& subshape = ShapeUtil::GetSubshape(inst->shape(), index);
CHECK(subshape.IsArray() && subshape.rank() == sizes.size());
for (int64_t dimension = 0; dimension < subshape.rank(); ++dimension) {
if (sizes[dimension] != nullptr) {
SetDynamicSize(inst, index, dimension, sizes[dimension]);
}
}
}
absl::Status DynamicDimensionInferenceVisitor::DefaultAction(
HloInstruction* hlo) {
return ForEachOperandDynamicDimension(
hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
return UnimplementedStrCat(
"Asked to propagate a dynamic dimension from hlo ", operand->name(),
"@", index.ToString(), "@", dimension, " to hlo ", hlo->ToString(),
", which is not implemented.");
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleGetTupleElement(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
if (hlo->tuple_index() != index[0]) {
return absl::OkStatus();
}
ShapeIndex new_index(ShapeIndexView(index).subspan(1));
SetDynamicSize(hlo, new_index, dimension, dynamic_size);
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleTuple(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo, [&](HloInstruction*, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
index.push_front(operand_index);
SetDynamicSize(hlo, index, dimension, dynamic_size);
return absl::OkStatus();
}));
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleBroadcast(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
int64_t broadcast_dim = hlo->dimensions(dimension);
SetDynamicSize(hlo, {}, broadcast_dim, dynamic_size);
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleConstant(
HloInstruction* hlo) {
if (!hlo->shape().is_dynamic()) {
return absl::OkStatus();
}
auto* constant = Cast<HloConstantInstruction>(hlo);
ShapeTree<bool> do_pad(constant->shape(), false);
Shape padded_shape = constant->shape();
bool pad_any = false;
TF_RETURN_IF_ERROR(ShapeUtil::ForEachMutableSubshapeWithStatus(
&padded_shape,
[&](Shape* subshape, const ShapeIndex& index) -> absl::Status {
if (!subshape->IsArray()) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(bool requires_pad, RequiresPadToStatic(hlo, index));
if (requires_pad) {
pad_any = *do_pad.mutable_element(index) = true;
*subshape = ShapeUtil::MakeStaticShape(*subshape);
}
return absl::OkStatus();
}));
if (!pad_any) {
return absl::OkStatus();
}
Literal padded_literal(padded_shape);
do_pad.ForEachElement([&](const ShapeIndex& index, bool requires_pad) {
const Shape& subshape = ShapeUtil::GetSubshape(padded_shape, index);
if (!subshape.IsArray()) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(padded_literal.CopyFrom(constant->literal(), index,
index,
true));
if (!requires_pad) {
for (int64_t dimension = 0; dimension < subshape.rank(); ++dimension) {
if (subshape.is_dynamic_dimension(dimension)) {
padded_literal.SetDynamicSize(
dimension, index,
constant->literal().GetDynamicSize(dimension, index));
}
}
}
return absl::OkStatus();
});
auto* padded_constant = hlo->AddInstruction(
HloInstruction::CreateConstant(std::move(padded_literal)));
TF_RETURN_IF_ERROR(constant->ReplaceAllUsesWith(padded_constant));
SetVisited(*padded_constant);
TF_RETURN_IF_ERROR(do_pad.ForEachElementWithStatus(
[&](const ShapeIndex& index, bool requires_pad) -> absl::Status {
if (!requires_pad) {
return absl::OkStatus();
}
const Shape& subshape =
ShapeUtil::GetSubshape(constant->shape(), index);
TF_RET_CHECK(subshape.IsArray());
for (int64_t dimension = 0; dimension < subshape.rank(); ++dimension) {
if (!subshape.is_dynamic_dimension(dimension)) {
continue;
}
HloInstruction* dynamic_size = hlo->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
constant->literal().GetDynamicSize(dimension, index))));
SetVisited(*dynamic_size);
SetDynamicSize(padded_constant, index, dimension, dynamic_size);
}
return absl::OkStatus();
}));
MarkAsChanged();
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleCustomCall(
HloInstruction* hlo) {
if (hlo->custom_call_target() == "PadToStatic") {
for (int64_t i = 0; i < hlo->operand(0)->shape().rank(); ++i) {
if (hlo->operand(0)->shape().is_dynamic_dimension(i)) {
HloInstruction* dynamic_size =
hlo->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::MakeScalarShape(S32), hlo, i + 1));
ShapeIndex data_output = {0};
SetDynamicSize(hlo, data_output, i, dynamic_size);
}
}
return absl::OkStatus();
}
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
if (custom_call_handler_) {
TF_RETURN_IF_ERROR(custom_call_handler_(hlo, parent_));
} else {
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index,
HloInstruction* dynamic_size) -> absl::Status {
if (hlo->custom_call_target() == "SliceToDynamic" ||
hlo->custom_call_target() == "Sharding" ||
(absl::StartsWith(hlo->custom_call_target(), "Resize") &&
(dimension == 0 || dimension == 3))) {
SetDynamicSize(hlo, {}, dimension, dynamic_size);
return absl::OkStatus();
}
if (hlo->custom_call_target() == "DynamicReduceWindowSamePadding") {
if (hlo->operand_count() > 2) {
return Unimplemented(
"DynamicReduceWindowSamePadding doesn't support variadic "
"reduce window %s",
hlo->ToString());
}
return HandleDynamicWindowSamePadding(hlo, dynamic_size,
operand_index, dimension);
}
if (hlo->custom_call_target() ==
"DynamicSelectAndScatterSamePadding") {
if (operand_index == 1) {
return absl::OkStatus();
}
SetDynamicSize(hlo, {}, dimension, dynamic_size);
return absl::OkStatus();
}
if (hlo->custom_call_target() == "DynamicConvolutionInputGrad") {
return HandleDynamicConvolutionInputGrad(hlo, operand_index,
dimension);
}
if (hlo->custom_call_target() == "DynamicConvolutionKernelGrad") {
return HandleDynamicConvolutionKernelGrad(hlo, operand_index,
dimension);
}
if (hlo->custom_call_target() == "DynamicConvolutionForward") {
return HandleDynamicConvolutionForward(hlo, operand_index,
dimension, dynamic_size);
}
return Unimplemented(
"CustomCall \"%s\" is not supported to have a dynamic dimension",
hlo->custom_call_target());
}));
}
return InsertPadToStaticOnInstruction(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleSort(HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dynamic_dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
HloSortInstruction* sort = Cast<HloSortInstruction>(hlo);
if (sort->values_count() == 0) {
SetDynamicSize(hlo, {}, dynamic_dimension, dynamic_size);
} else {
SetDynamicSize(hlo, {operand_index}, dynamic_dimension, dynamic_size);
}
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandlePad(HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
if (operand_index != 0) {
return Unimplemented(
"Dynamic dimension on padding value is not supported");
}
const PaddingConfig_PaddingConfigDimension& padding_config =
hlo->padding_config().dimensions(dimension);
HloInstruction* dynamic_size_adjusted = dynamic_size;
if (padding_config.interior_padding() != 0) {
HloInstruction* one =
hlo->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(1)));
HloInstruction* zero =
hlo->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(0)));
HloInstruction* interior_padding = hlo->parent()->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
padding_config.interior_padding())));
dynamic_size_adjusted =
hlo->parent()->AddInstruction(HloInstruction::CreateBinary(
dynamic_size_adjusted->shape(), HloOpcode::kSubtract,
dynamic_size_adjusted, one));
dynamic_size_adjusted =
hlo->parent()->AddInstruction(HloInstruction::CreateBinary(
dynamic_size_adjusted->shape(), HloOpcode::kMaximum,
dynamic_size_adjusted, zero));
dynamic_size_adjusted =
hlo->parent()->AddInstruction(HloInstruction::CreateBinary(
dynamic_size_adjusted->shape(), HloOpcode::kMultiply,
dynamic_size_adjusted, interior_padding));
dynamic_size_adjusted =
hlo->parent()->AddInstruction(HloInstruction::CreateBinary(
dynamic_size_adjusted->shape(), HloOpcode::kAdd,
dynamic_size_adjusted, dynamic_size));
}
HloInstruction* adjustment = hlo->parent()->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
padding_config.edge_padding_low() +
padding_config.edge_padding_high())));
dynamic_size_adjusted =
hlo->parent()->AddInstruction(HloInstruction::CreateBinary(
dynamic_size_adjusted->shape(), HloOpcode::kAdd,
dynamic_size_adjusted, adjustment));
SetDynamicSize(hlo, {}, dimension, dynamic_size_adjusted);
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleReduce(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
auto* reduce = Cast<HloReduceInstruction>(hlo);
int64_t rank = -1;
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
reduce->shape(),
[&](const Shape& subshape, const ShapeIndex& index) -> absl::Status {
if (!subshape.IsArray()) {
return absl::OkStatus();
}
if (rank < 0) {
rank = subshape.rank();
} else {
TF_RET_CHECK(rank == subshape.rank());
}
return absl::OkStatus();
}));
TF_RET_CHECK(rank >= 0);
absl::InlinedVector<HloInstruction*, 4> dynamic_sizes(rank, nullptr);
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
int64_t operand_count = reduce->operand_count();
CHECK_EQ(operand_count % 2, 0);
if (operand_index >= reduce->input_count()) {
return absl::OkStatus();
}
if (absl::c_count(reduce->dimensions(), dimension) != 0) {
return absl::OkStatus();
}
int64_t dimensions_not_reduced_count = 0;
for (int64_t i = 0; i < operand->shape().rank(); ++i) {
if (dimension == i) {
dynamic_sizes[dimensions_not_reduced_count] = dynamic_size;
return absl::OkStatus();
}
if (!absl::c_linear_search(reduce->dimensions(), i)) {
dimensions_not_reduced_count++;
}
}
return absl::OkStatus();
}));
ShapeUtil::ForEachSubshape(
reduce->shape(), [&](const Shape& subshape, ShapeIndex shape_index) {
if (!subshape.IsArray()) {
return;
}
SetDynamicSizes(reduce, shape_index, dynamic_sizes);
});
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleDot(HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
absl::InlinedVector<HloInstruction*, 4> dynamic_sizes(hlo->shape().rank(),
nullptr);
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex operand_shape_index,
int64_t operand_dimension, int64_t operand_index,
HloInstruction* dynamic_size) -> absl::Status {
HloInstruction* dot = hlo;
const DotDimensionNumbers& dimension_numbers =
dot->dot_dimension_numbers();
absl::flat_hash_map<int64_t, int64_t> result_dim_mapping;
int64_t current_result_dims = 0;
bool lhs = operand_index == 0;
if (lhs) {
for (int64_t i : dimension_numbers.lhs_batch_dimensions()) {
result_dim_mapping[i] = current_result_dims++;
}
} else {
for (int64_t i : dimension_numbers.rhs_batch_dimensions()) {
result_dim_mapping[i] = current_result_dims++;
}
}
for (int64_t i = 0; i < dot->operand(0)->shape().rank(); i++) {
if (absl::c_linear_search(
dimension_numbers.lhs_contracting_dimensions(), i)) {
continue;
}
if (absl::c_linear_search(dimension_numbers.lhs_batch_dimensions(),
i)) {
continue;
}
if (lhs) {
result_dim_mapping[i] = current_result_dims;
}
current_result_dims++;
}
for (int64_t i = 0; i < dot->operand(1)->shape().rank(); i++) {
if (absl::c_linear_search(
dimension_numbers.rhs_contracting_dimensions(), i)) {
continue;
}
if (absl::c_linear_search(dimension_numbers.rhs_batch_dimensions(),
i)) {
continue;
}
if (!lhs) {
result_dim_mapping[i] = current_result_dims;
}
current_result_dims++;
}
auto iter = result_dim_mapping.find(operand_dimension);
if (iter != result_dim_mapping.end()) {
dynamic_sizes[iter->second] = dynamic_size;
}
return absl::OkStatus();
}));
SetDynamicSizes(hlo, {}, dynamic_sizes);
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleTranspose(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
int64_t permuted_dim = -1;
for (int64_t i = 0; i < hlo->dimensions().size(); ++i) {
if (hlo->dimensions()[i] == dimension) {
TF_RET_CHECK(permuted_dim == -1);
permuted_dim = i;
}
}
SetDynamicSize(hlo, {}, permuted_dim, dynamic_size);
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleConvolution(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
HloInstruction* conv = hlo;
const ConvolutionDimensionNumbers& dimension_numbers =
conv->convolution_dimension_numbers();
if (operand_index == 0) {
if (dimension == dimension_numbers.input_batch_dimension()) {
SetDynamicSize(conv, {}, dimension_numbers.output_batch_dimension(),
dynamic_size);
return absl::OkStatus();
}
if (dimension == dimension_numbers.input_feature_dimension()) {
return absl::OkStatus();
}
} else {
if (dimension == dimension_numbers.kernel_input_feature_dimension()) {
return absl::OkStatus();
}
}
return Unimplemented("Dynamic Spatial Convolution is not supported: %s",
conv->ToString());
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleConcatenate(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
int64_t static_size = 0;
std::vector<HloInstruction*> dynamic_concat_dims;
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
HloInstruction* concat_dim_size = nullptr;
for (int64_t dimension = 0; dimension < hlo->operand(i)->shape().rank();
++dimension) {
if (dimension == hlo->concatenate_dimension()) {
HloInstruction* dynamic_size =
parent_->GetDynamicSize(hlo->mutable_operand(i), {}, dimension);
concat_dim_size = dynamic_size;
}
}
if (concat_dim_size == nullptr) {
static_size +=
hlo->operand(i)->shape().dimensions(hlo->concatenate_dimension());
} else {
dynamic_concat_dims.push_back(concat_dim_size);
}
}
std::vector<HloInstruction*> dynamic_sizes(hlo->shape().rank(), nullptr);
if (!dynamic_concat_dims.empty()) {
HloInstruction* dim_size_total =
hlo->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(static_size)));
for (HloInstruction* dynamic_dim : dynamic_concat_dims) {
dim_size_total = hlo->parent()->AddInstruction(
HloInstruction::CreateBinary(dim_size_total->shape(), HloOpcode::kAdd,
dim_size_total, dynamic_dim));
}
dynamic_sizes[hlo->concatenate_dimension()] = dim_size_total;
}
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
TF_RET_CHECK(index.empty());
int64_t concatenate_dimension = hlo->concatenate_dimension();
if (concatenate_dimension == dimension) {
return absl::OkStatus();
}
dynamic_sizes[dimension] = dynamic_size;
return absl::OkStatus();
}));
SetDynamicSizes(hlo, {}, dynamic_sizes);
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleGetDimensionSize(
HloInstruction* gds) {
int64_t dim = gds->dimension();
TF_RET_CHECK(dim < gds->operand(0)->shape().rank()) << gds->ToString();
HloInstruction* operand = gds->mutable_operand(0);
TF_RET_CHECK(dim < operand->shape().rank());
HloInstruction* replacement = parent_->GetDynamicSize(operand, {}, dim);
HloComputation* computation = gds->parent();
if (replacement == nullptr &&
!gds->operand(0)->shape().is_dynamic_dimension(dim)) {
TF_RET_CHECK(dim < gds->operand(0)->shape().rank());
int32_t size = gds->operand(0)->shape().dimensions(dim);
replacement = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(size)),
gds->name());
}
if (replacement != nullptr) {
TF_RETURN_IF_ERROR(gds->ReplaceAllUsesWith(replacement));
parent_->ReplaceAllDynamicDimensionUsesWith(gds, replacement);
MarkAsChanged();
}
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleSetDimensionSize(
HloInstruction* hlo) {
bool dimension_is_static = false;
const HloInstruction* size = hlo->operand(1);
if (size->opcode() == HloOpcode::kConstant) {
TF_RET_CHECK(size->shape().rank() == 0);
if (size->literal().Get<int32_t>({}) ==
hlo->shape().dimensions(hlo->dimension()) &&
!hlo->shape().is_dynamic_dimension(hlo->dimension())) {
dimension_is_static = true;
}
}
if (!dimension_is_static) {
SetDynamicSize(hlo, {}, hlo->dimension(), hlo->mutable_operand(1),
false);
}
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
TF_RET_CHECK(operand_index == 0);
if (dimension != hlo->dimension()) {
SetDynamicSize(hlo, index, dimension, dynamic_size,
false);
}
return absl::OkStatus();
}));
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleDynamicConvolutionForward(
HloInstruction* hlo, int64_t operand_index, int64_t dimension,
HloInstruction* dynamic_size) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
TF_RET_CHECK(operand_index == 0);
const ConvolutionDimensionNumbers& dimension_numbers =
hlo->convolution_dimension_numbers();
if (dimension == dimension_numbers.input_batch_dimension()) {
SetDynamicSize(hlo, {}, dimension_numbers.output_batch_dimension(),
dynamic_size);
return absl::OkStatus();
}
for (int64_t spatial_dim_index = 0;
spatial_dim_index < dimension_numbers.input_spatial_dimensions_size();
++spatial_dim_index) {
int64_t input_spatial_dim =
dimension_numbers.input_spatial_dimensions(spatial_dim_index);
int64_t output_spatial_dim =
dimension_numbers.output_spatial_dimensions(spatial_dim_index);
if (dimension == input_spatial_dim) {
WindowDimension window_dim = hlo->window().dimensions(spatial_dim_index);
DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(
dynamic_size, window_dim.size(), window_dim.window_dilation(),
window_dim.stride(), hlo->padding_type());
TF_RET_CHECK(window_dim.base_dilation() == 1);
SetDynamicSize(hlo, {}, output_spatial_dim,
dynamic_window_dims.output_size);
return absl::OkStatus();
}
}
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleDynamicWindowSamePadding(
HloInstruction* hlo, HloInstruction* dynamic_size, int64_t operand_index,
int64_t dimension) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
const Window& window = hlo->window();
const WindowDimension& window_dim = window.dimensions(dimension);
if (!window_util::IsTrivialWindowDimension(window_dim)) {
DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(
dynamic_size, window_dim.size(), window_dim.window_dilation(),
window_dim.stride(), PaddingType::PADDING_SAME);
SetDynamicSize(hlo, {}, dimension, dynamic_window_dims.output_size);
} else {
SetDynamicSize(hlo, {}, dimension, dynamic_size);
}
return absl::OkStatus();
}
absl::Status
DynamicDimensionInferenceVisitor::HandleDynamicConvolutionInputGrad(
HloInstruction* hlo, int64_t operand_index, int64_t dimension) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
HloInstruction* input_sizes = hlo->mutable_operand(0);
HloComputation* comp = hlo->parent();
TF_RET_CHECK(input_sizes->shape().rank() == 1) << hlo->ToString();
TF_RET_CHECK(input_sizes->shape().element_type() == S32) << hlo->ToString();
TF_RET_CHECK(input_sizes->shape().dimensions(0) ==
hlo->shape().dimensions_size())
<< hlo->ToString();
HloInstruction* slice = comp->AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(S32, {1}), input_sizes,
{dimension}, {dimension + 1}, {1}));
HloInstruction* reshape = comp->AddInstruction(
HloInstruction::CreateReshape(ShapeUtil::MakeScalarShape(S32), slice));
SetDynamicSize(hlo, {}, dimension, reshape);
return absl::OkStatus();
}
absl::Status
DynamicDimensionInferenceVisitor::HandleDynamicConvolutionKernelGrad(
HloInstruction* hlo, int64_t operand_index, int64_t dimension) {
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::PassThroughDynamicDimension(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
ShapeTree<absl::InlinedVector<HloInstruction*, 2>> dynamic_sizes(
hlo->shape());
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
const Shape& subshape = ShapeUtil::GetSubshape(hlo->shape(), index);
auto* element = dynamic_sizes.mutable_element(index);
element->resize(subshape.rank(), nullptr);
(*element)[dimension] = dynamic_size;
return absl::OkStatus();
}));
dynamic_sizes.ForEachElement([&](const ShapeIndex& index, const auto& sizes) {
if (sizes.empty()) {
return;
}
SetDynamicSizes(hlo, index, sizes);
});
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleDomain(
HloInstruction* hlo) {
return PassThroughDynamicDimension(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleAsyncStart(
HloInstruction* hlo) {
if (!HloInstruction::IsThreadIncluded(hlo->async_execution_thread(),
parent_->execution_threads_)) {
return absl::OkStatus();
}
return DefaultAction(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleAsyncDone(
HloInstruction* hlo) {
if (!HloInstruction::IsThreadIncluded(hlo->async_execution_thread(),
parent_->execution_threads_)) {
return InsertPadToStaticOnInstruction(hlo);
}
return DefaultAction(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleElementwiseUnary(
HloInstruction* hlo) {
return PassThroughDynamicDimension(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleSelect(
HloInstruction* hlo) {
return HandleElementwiseNary(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleElementwiseNary(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
HloComputation* comp = hlo->parent();
absl::InlinedVector<absl::InlinedVector<HloInstruction*, 2>, 2> operand_sizes(
hlo->shape().rank(),
absl::InlinedVector<HloInstruction*, 2>(hlo->operand_count(), nullptr));
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
TF_RET_CHECK(index.empty());
operand_sizes[dimension][operand_index] = dynamic_size;
return absl::OkStatus();
}));
absl::InlinedVector<HloInstruction*, 2> existing_sizes(hlo->shape().rank(),
nullptr);
for (int operand_index = 0; operand_index < hlo->operand_count();
++operand_index) {
for (int64_t dimension = 0; dimension < hlo->shape().rank(); ++dimension) {
HloInstruction* dynamic_size = operand_sizes[dimension][operand_index];
if (dynamic_size == nullptr) {
continue;
}
HloInstruction* existing_size = existing_sizes[dimension];
if (existing_size == nullptr) {
existing_sizes[dimension] = dynamic_size;
} else if (existing_sizes[dimension] != dynamic_size) {
TF_RETURN_IF_ERROR(
InsertShapeCheck(existing_size, dynamic_size,
true));
auto one = comp->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::One(S32)));
auto operand_needs_broadcast =
comp->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), dynamic_size, existing_size,
ComparisonDirection::kLt));
auto is_one = comp->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), dynamic_size, one,
ComparisonDirection::kEq));
operand_needs_broadcast =
comp->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(PRED, {}), HloOpcode::kAnd, is_one,
operand_needs_broadcast));
auto existing_needs_broadcast =
comp->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), existing_size, dynamic_size,
ComparisonDirection::kLt));
is_one = comp->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), existing_size, one,
ComparisonDirection::kEq));
existing_needs_broadcast =
comp->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(PRED, {}), HloOpcode::kAnd, is_one,
existing_needs_broadcast));
auto needs_broadcast =
comp->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(PRED, {}), HloOpcode::kOr,
operand_needs_broadcast, existing_needs_broadcast));
auto max_size = comp->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeScalarShape(S32), HloOpcode::kMaximum, dynamic_size,
existing_size));
auto min_size = comp->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeScalarShape(S32), HloOpcode::kMinimum, dynamic_size,
existing_size));
auto select_size = comp->AddInstruction(HloInstruction::CreateTernary(
ShapeUtil::MakeScalarShape(S32), HloOpcode::kSelect,
needs_broadcast, max_size, min_size));
existing_sizes[dimension] = select_size;
}
}
}
SetDynamicSizes(hlo, {}, existing_sizes);
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleElementwiseBinary(
HloInstruction* hlo) {
return HandleElementwiseNary(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleClamp(
HloInstruction* hlo) {
return PassThroughDynamicDimension(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleDynamicReshape(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
HloDynamicReshapeInstruction* dynamic_reshape =
Cast<HloDynamicReshapeInstruction>(hlo);
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
if (hlo->shape().is_dynamic_dimension(i)) {
SetDynamicSize(hlo, {}, i, dynamic_reshape->dim_sizes(i));
}
}
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleReshape(
HloInstruction* const hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
VLOG(2) << "Handle reshape: " << hlo->ToString() << "\n";
absl::InlinedVector<HloInstruction*, 2> dynamic_sizes(hlo->shape().rank(),
nullptr);
using ReshapeGroup = std::pair<int64_t, int64_t>;
using ReshapeGroupPair = std::pair<ReshapeGroup, ReshapeGroup>;
auto is_reverse_reshape_group_pair =
[&](const HloInstruction* op1, const ReshapeGroupPair& p1,
const HloInstruction* op2, const ReshapeGroupPair& p2) -> bool {
return ShapeUtil::EqualStructure(
ShapeUtil::GetSubshape(
op1->operand(0)->shape(),
ShapeIndex(p1.first.first, p1.first.second)),
ShapeUtil::GetSubshape(
op2->operand(0)->shape(),
ShapeIndex(p2.second.first, p2.second.second))) &&
ShapeUtil::EqualStructure(
ShapeUtil::GetSubshape(
op1->shape(), ShapeIndex(p1.second.first, p1.second.second)),
ShapeUtil::GetSubshape(
op2->operand(0)->shape(),
ShapeIndex(p2.first.first, p2.first.second)));
};
auto find_reshape_group_pair = [](HloInstruction* reshape,
int64_t input_dynamic_dimension) {
VLOG(2) << "Find reshape pair: " << reshape->ToString() << "\n";
auto common_factors =
CommonFactors(reshape->operand(0)->shape().dimensions(),
reshape->shape().dimensions());
ReshapeGroup input_dim = {-1, -1}, output_dim = {-1, -1};
bool found = false;
for (int64_t i = 0; i < common_factors.size() - 1; ++i) {
auto start = common_factors[i];
auto end = common_factors[i + 1];
if (input_dynamic_dimension >= start.first &&
input_dynamic_dimension < end.first) {
input_dim.first = start.first;
input_dim.second = end.first;
output_dim.first = start.second;
output_dim.second = end.second;
VLOG(3) << "Found common_factor group pair: " << input_dim.first << ","
<< input_dim.second << "->" << output_dim.first << ","
<< output_dim.second << "\n";
found = true;
break;
}
}
CHECK(found);
return ReshapeGroupPair(input_dim, output_dim);
};
auto reshape_group_pair_needs_flatten =
[](const ReshapeGroupPair& reshape_pair) {
return reshape_pair.first.second - reshape_pair.first.first > 1 &&
reshape_pair.second.second - reshape_pair.second.first > 1;
};
std::function<bool(HloInstruction*, const ReshapeGroupPair&, int64_t)>
find_reverse_past_reshape = [&](HloInstruction* op,
const ReshapeGroupPair reshape_pair,
int64_t dynamic_dimension_size) {
VLOG(2) << "Find reverse past reshape from " << op->ToString()
<< " for " << dynamic_dimension_size << "\n";
absl::InlinedVector<int64_t, 4> found_dims;
for (int op_dim_index = 0; op_dim_index < op->shape().rank();
++op_dim_index) {
if (op->shape().dimensions(op_dim_index) == dynamic_dimension_size) {
found_dims.push_back(op_dim_index);
}
}
if (found_dims.empty()) {
return false;
}
VLOG(3) << "Found " << found_dims.size() << "\n";
if (op->opcode() == HloOpcode::kReshape) {
for (auto op_dim_index : found_dims) {
auto orig_reshape_pair = find_reshape_group_pair(op, op_dim_index);
if (is_reverse_reshape_group_pair(op, orig_reshape_pair, hlo,
reshape_pair)) {
TF_CHECK_OK(ForEachOperandDynamicDimension(
op,
[&](HloInstruction* operand, ShapeIndex index,
int64_t op_dynamic_dimension, int64_t operand_index,
HloInstruction* operand_dynamic_size) -> absl::Status {
if (op_dynamic_dimension >= orig_reshape_pair.first.first &&
op_dynamic_dimension < orig_reshape_pair.first.second) {
auto dynamic_size =
parent_->GetDynamicSize(op, {}, op_dynamic_dimension);
CHECK_NE(dynamic_size, nullptr);
auto hlo_dimension_index = op_dynamic_dimension -
orig_reshape_pair.first.first +
reshape_pair.second.first;
dynamic_sizes[hlo_dimension_index] = dynamic_size;
}
return absl::OkStatus();
}));
return true;
}
}
}
for (auto operand : op->mutable_operands()) {
if (find_reverse_past_reshape(operand, reshape_pair,
dynamic_dimension_size)) {
return true;
}
VLOG(3) << "Checking " << operand->ToString() << "\n";
}
return false;
};
absl::flat_hash_map<int64_t, ReshapeGroupPair> reshape_group_pairs;
bool need_flatten_unflatten =
hlo->inferred_dimension() != -1 &&
hlo->shape().dimensions(hlo->inferred_dimension()) == 1;
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index,
int64_t input_dynamic_dimension, int64_t operand_index,
HloInstruction* operand_dynamic_size) -> absl::Status {
auto reshape_pair =
find_reshape_group_pair(hlo, input_dynamic_dimension);
reshape_group_pairs[input_dynamic_dimension] = reshape_pair;
if (reshape_group_pair_needs_flatten(reshape_pair)) {
need_flatten_unflatten = true;
}
return absl::OkStatus();
}));
if (need_flatten_unflatten) {
if (hlo->inferred_dimension() != -1) {
HloInstruction* operand = hlo->mutable_operand(0);
HloComputation* comp = hlo->parent();
HloInstruction* dynamic_size = comp->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
int64_t static_size = 1;
for (int64_t i = 0; i < operand->shape().rank(); i++) {
HloInstruction* dynamic_dim_size =
parent_->GetDynamicSize(operand, {}, i);
if (dynamic_dim_size == nullptr) {
static_size *= operand->shape().dimensions(i);
} else {
dynamic_size = comp->AddInstruction(HloInstruction::CreateBinary(
dynamic_size->shape(), HloOpcode::kMultiply, dynamic_size,
dynamic_dim_size));
}
}
HloInstruction* static_size_hlo =
comp->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(static_size)));
dynamic_size = comp->AddInstruction(HloInstruction::CreateBinary(
dynamic_size->shape(), HloOpcode::kMultiply, dynamic_size,
static_size_hlo));
int64_t size_without_inferred_dim =
ShapeUtil::ElementsIn(hlo->shape()) /
hlo->shape().dimensions(hlo->inferred_dimension());
HloInstruction* size_without_inferred_dim_hlo =
comp->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(size_without_inferred_dim)));
dynamic_size = comp->AddInstruction(HloInstruction::CreateBinary(
dynamic_size->shape(), HloOpcode::kDivide, dynamic_size,
size_without_inferred_dim_hlo));
dynamic_sizes[hlo->inferred_dimension()] = dynamic_size;
VLOG(3)
<< "Need to decompose a dynamic reshape to flatten-unflatten pair. "
<< comp->parent()->ToString();
SetDynamicSizes(hlo, {}, dynamic_sizes);
return absl::OkStatus();
}
return Internal(
"Need inferred dimension to be set to "
"flatten-unflatten pair. %s",
hlo->ToString());
}
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index,
int64_t input_dynamic_dimension, int64_t operand_index,
HloInstruction* operand_dynamic_size) -> absl::Status {
HloInstruction* const reshape = hlo;
if (reshape->shape().rank() == 0) {
VLOG(0) << "Reshaping a dynamic dimension into a scalar, which has "
"undefined behavior when input size is 0. The offending "
"instruction is: "
<< reshape->ToString();
return absl::OkStatus();
}
auto iter = reshape_group_pairs.find(input_dynamic_dimension);
CHECK(iter != reshape_group_pairs.end());
ReshapeGroupPair reshape_group_pair = iter->second;
auto output_dim_start = reshape_group_pair.second.first,
output_dim_end = reshape_group_pair.second.second;
int64_t output_dynamic_dimension = -1;
if (operand->shape().dimensions(input_dynamic_dimension) == 1) {
if (input_dynamic_dimension == 0) {
output_dynamic_dimension = 0;
} else if (input_dynamic_dimension == operand->shape().rank() - 1) {
output_dynamic_dimension = reshape->shape().rank() - 1;
}
if (output_dynamic_dimension == -1) {
return Unimplemented(
"Dynamic degenerated dimension that's not most-minor nor "
"most-major is not supported %s",
reshape->ToString());
}
}
if (output_dynamic_dimension == -1 &&
output_dim_end - output_dim_start == 1) {
output_dynamic_dimension = output_dim_start;
}
if (output_dynamic_dimension == -1 &&
output_dim_end - output_dim_start > 1) {
output_dynamic_dimension = reshape->inferred_dimension();
if (output_dynamic_dimension == -1) {
for (int64_t i = output_dim_start; i < output_dim_end; ++i) {
if (reshape->shape().is_dynamic_dimension(i)) {
output_dynamic_dimension = i;
}
}
}
if (output_dynamic_dimension == -1) {
std::vector<int64_t> output_non_degenerated;
for (int64_t i = output_dim_start; i < output_dim_end; ++i) {
if (reshape->shape().dimensions(i) != 1) {
output_non_degenerated.push_back(i);
}
}
if (output_non_degenerated.size() == 1) {
output_dynamic_dimension = output_non_degenerated[0];
}
}
if (output_dynamic_dimension == -1 &&
find_reverse_past_reshape(
hlo->mutable_operand(0), reshape_group_pair,
hlo->mutable_operand(0)->shape().dimensions(
input_dynamic_dimension))) {
return absl::OkStatus();
}
if (output_dynamic_dimension == -1) {
return InvalidArgument(
"Reshape's input dynamic dimension is decomposed into "
"multiple output dynamic dimensions, but the constraint is "
"ambiguous and XLA can't infer the output dimension %s. ",
hlo->ToString());
}
}
CHECK_NE(output_dynamic_dimension, -1);
const int64_t input_dim_size =
operand->shape().dimensions(input_dynamic_dimension);
const int64_t output_dim_size =
reshape->shape().dimensions(output_dynamic_dimension);
VLOG(2) << "input_dim_size: " << input_dim_size
<< " output_dim_size: " << output_dim_size;
if (input_dim_size == output_dim_size) {
dynamic_sizes[output_dynamic_dimension] = operand_dynamic_size;
}
if (input_dim_size > output_dim_size) {
TF_RET_CHECK(input_dim_size % output_dim_size == 0)
<< reshape->ToString();
const int64_t divisor = input_dim_size / output_dim_size;
HloInstruction* divisor_hlo =
hlo->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(divisor)));
HloInstruction* new_dynamic_size =
hlo->parent()->AddInstruction(HloInstruction::CreateBinary(
operand_dynamic_size->shape(), HloOpcode::kDivide,
operand_dynamic_size, divisor_hlo));
dynamic_sizes[output_dynamic_dimension] = new_dynamic_size;
}
if (input_dim_size < output_dim_size) {
HloInstruction* output_dynamic_size =
dynamic_sizes[output_dynamic_dimension];
if (output_dynamic_size == nullptr) {
output_dynamic_size =
hlo->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(output_dim_size)));
}
HloInstruction* divisor_hlo = hlo->parent()->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
operand->shape().dimensions(input_dynamic_dimension))));
HloInstruction* new_dynamic_size =
hlo->parent()->AddInstruction(HloInstruction::CreateBinary(
output_dynamic_size->shape(), HloOpcode::kDivide,
output_dynamic_size, divisor_hlo));
new_dynamic_size =
hlo->parent()->AddInstruction(HloInstruction::CreateBinary(
output_dynamic_size->shape(), HloOpcode::kMultiply,
new_dynamic_size, operand_dynamic_size));
dynamic_sizes[output_dynamic_dimension] = new_dynamic_size;
}
return absl::OkStatus();
}));
SetDynamicSizes(hlo, {}, dynamic_sizes);
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleReduceWindow(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
ShapeTree<absl::InlinedVector<HloInstruction*, 2>> dynamic_sizes(
hlo->shape());
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
auto* reduce_window = Cast<HloReduceWindowInstruction>(hlo);
const WindowDimension& window_dim =
reduce_window->window().dimensions(dimension);
if (operand_index >= reduce_window->input_count()) {
return absl::OkStatus();
}
if (!window_util::IsTrivialWindowDimension(window_dim)) {
DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(
dynamic_size, window_dim.size(), window_dim.window_dilation(),
window_dim.stride(), PaddingType::PADDING_VALID);
dynamic_size = dynamic_window_dims.output_size;
}
ShapeUtil::ForEachSubshape(
reduce_window->shape(),
[&](const Shape& subshape, ShapeIndex reduce_window_result_index) {
if (!ShapeUtil::IsLeafIndex(reduce_window->shape(),
reduce_window_result_index)) {
return;
}
auto* leaf_dynamic_sizes =
dynamic_sizes.mutable_element(reduce_window_result_index);
leaf_dynamic_sizes->resize(subshape.rank(), nullptr);
(*leaf_dynamic_sizes)[dimension] = dynamic_size;
});
return absl::OkStatus();
}));
dynamic_sizes.ForEachElement(
[&](const ShapeIndex& shape_index,
const absl::InlinedVector<HloInstruction*, 2> sizes) {
if (sizes.empty()) {
return;
}
SetDynamicSizes(hlo, shape_index, sizes);
});
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleSelectAndScatter(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
if (operand_index == 1) {
return absl::OkStatus();
}
SetDynamicSize(hlo, {}, dimension, dynamic_size);
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleSlice(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex , int64_t dimension,
int64_t ,
HloInstruction* dynamic_size) -> absl::Status {
int64_t start = hlo->slice_starts(dimension);
int64_t limit = hlo->slice_limits(dimension);
int64_t stride = hlo->slice_strides(dimension);
int64_t size = CeilOfRatio<int64_t>(limit - start, stride);
if (size == 1) {
TF_RET_CHECK(!hlo->shape().is_dynamic_dimension(dimension));
return absl::OkStatus();
}
TF_RET_CHECK(hlo->shape().is_dynamic_dimension(dimension));
if (start != 0) {
dynamic_size = hlo->AddInstruction(HloInstruction::CreateBinary(
dynamic_size->shape(), HloOpcode::kSubtract, dynamic_size,
hlo->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(start)))));
}
if (stride != 1) {
dynamic_size = hlo->AddInstruction(HloInstruction::CreateBinary(
dynamic_size->shape(), HloOpcode::kAdd, dynamic_size,
hlo->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(stride - 1)))));
dynamic_size = hlo->AddInstruction(HloInstruction::CreateBinary(
dynamic_size->shape(), HloOpcode::kDivide, dynamic_size,
hlo->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(stride)))));
}
SetDynamicSize(hlo, {}, dimension, dynamic_size);
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleDynamicSlice(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
if (hlo->shape().dimensions(dimension) == 1) {
return absl::OkStatus();
}
if (hlo->shape().dimensions(dimension) !=
hlo->operand(0)->shape().dimensions(dimension)) {
return Unimplemented(
"Dynamic dimension propagation on DynamicSlice where a partial "
"dimension is selected %s",
hlo->ToString());
}
TF_RET_CHECK(operand_index == 0);
TF_RET_CHECK(index.empty());
SetDynamicSize(hlo, {}, dimension, dynamic_size);
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleDynamicUpdateSlice(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
absl::InlinedVector<HloInstruction*, 2> output_dynamic_sizes(
hlo->shape().rank(), nullptr);
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
TF_RET_CHECK(index.empty());
if (hlo->shape().dimensions(dimension) !=
hlo->operand(0)->shape().dimensions(dimension)) {
return Unimplemented(
"Dynamic dimension propagation on DynamicUpdateSlice where a "
"partial dimension is selected %s",
hlo->ToString());
}
if (operand_index == 1 &&
hlo->operand(1)->shape().dimensions(dimension) <
hlo->operand(0)->shape().dimensions(dimension)) {
hlo->mutable_shape()->set_dynamic_dimension(dimension, false);
return absl::OkStatus();
}
output_dynamic_sizes[dimension] = dynamic_size;
return absl::OkStatus();
}));
SetDynamicSizes(hlo, {}, output_dynamic_sizes);
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleReverse(
HloInstruction* hlo) {
return PassThroughDynamicDimension(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleGather(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
absl::InlinedVector<HloInstruction*, 2> output_dynamic_sizes(
hlo->shape().rank(), nullptr);
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex ,
int64_t input_dynamic_dimension, int64_t operand_index,
HloInstruction* dynamic_size) -> absl::Status {
const GatherDimensionNumbers& gather_dims =
hlo->gather_dimension_numbers();
if (operand_index == 0) {
if (hlo->gather_slice_sizes()[input_dynamic_dimension] == 1) {
return absl::OkStatus();
}
if (hlo->gather_slice_sizes()[input_dynamic_dimension] ==
operand->shape().dimensions(input_dynamic_dimension)) {
int64_t operand_dimension = 0;
for (int64_t output_dimension : gather_dims.offset_dims()) {
TF_RET_CHECK(output_dimension < hlo->shape().rank());
while (operand_dimension < operand->shape().rank() &&
absl::c_linear_search(gather_dims.collapsed_slice_dims(),
operand_dimension)) {
++operand_dimension;
}
TF_RET_CHECK(operand_dimension < operand->shape().rank());
if (operand_dimension == input_dynamic_dimension) {
output_dynamic_sizes[output_dimension] = dynamic_size;
return absl::OkStatus();
}
++operand_dimension;
}
return Internal("Invalid instruction: %s", hlo->ToString());
}
return Unimplemented(
"Detects a dynamic dimension on the data input of gather, which "
"is not supported: %s, %lld",
hlo->ToString(), input_dynamic_dimension);
}
int64_t indices_rank = hlo->operand(1)->shape().rank();
if (gather_dims.index_vector_dim() == indices_rank) {
++indices_rank;
}
int64_t output_rank = hlo->shape().rank();
int64_t indices_dim = 0;
for (int64_t output_dim = 0; output_dim < output_rank; ++output_dim) {
if (!absl::c_linear_search(gather_dims.offset_dims(), output_dim)) {
if (indices_dim == gather_dims.index_vector_dim()) {
indices_dim++;
}
if (indices_dim++ == input_dynamic_dimension) {
output_dynamic_sizes[output_dim] = dynamic_size;
return absl::OkStatus();
}
}
}
CHECK(indices_dim == indices_rank);
return Unimplemented(
"Detects a non-batch dynamic dimension of gather, "
"which is not supported: %s",
hlo->ToString());
}));
SetDynamicSizes(hlo, {}, output_dynamic_sizes);
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleConditional(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
std::vector<HloComputation*> new_branch_computations;
std::vector<HloInstruction*> new_operands;
ShapeTree<absl::flat_hash_map<int64_t, int64_t>> dynamic_output_mapping(
hlo->shape());
bool need_rewrite = false;
for (int64_t branch_index = 0; branch_index < hlo->branch_count();
++branch_index) {
std::vector<HloInstruction*> operands_to_add;
absl::flat_hash_map<HloInstruction*, int64_t>
dynamic_size_to_operand_id_index_map;
const int64_t operand_index = branch_index + 1;
int operand_count =
hlo->operand(operand_index)->shape().tuple_shapes_size();
TF_RETURN_IF_ERROR(ForEachDynamicDimensionInOperand(
hlo, operand_index,
[&](HloInstruction*, ShapeIndex, int64_t, int64_t,
HloInstruction* dynamic_size) -> absl::Status {
TF_RET_CHECK(hlo->operand(operand_index)->shape().IsTuple())
<< "Only tuple typed inputs can have dynamic dimension. Please "
"file a bug against XLA team.";
const HloInstruction* tuple_operand = hlo->operand(operand_index);
for (int64_t i = 0; i < tuple_operand->operand_count(); ++i) {
if (dynamic_size == tuple_operand->operand(i)) {
dynamic_size_to_operand_id_index_map[dynamic_size] = i;
return absl::OkStatus();
}
}
auto iter = dynamic_size_to_operand_id_index_map.find(dynamic_size);
if (iter == dynamic_size_to_operand_id_index_map.end()) {
operands_to_add.push_back(dynamic_size);
dynamic_size_to_operand_id_index_map[dynamic_size] =
operand_count++;
}
return absl::OkStatus();
}));
HloInstruction* original_input = hlo->mutable_operand(operand_index);
HloComputation* branch_computation = hlo->branch_computation(branch_index);
HloComputation* new_computation = branch_computation;
CallInliner::InlinedInstructionMap inline_map;
HloInstruction* new_operand = hlo->mutable_operand(operand_index);
Shape new_param_shape =
branch_computation->parameter_instruction(0)->shape();
if (!operands_to_add.empty()) {
TF_RET_CHECK(original_input->shape().IsTuple());
need_rewrite = true;
new_operand = TupleUtil::AppendSuffix(original_input, operands_to_add);
for (HloInstruction* operand : operands_to_add) {
ShapeUtil::AppendShapeToTuple(operand->shape(), &new_param_shape);
}
TF_ASSIGN_OR_RETURN(
std::tie(new_computation, inline_map),
WidenComputation(branch_computation, new_param_shape));
}
DynamicParameterBinding dynamic_parameter_binding;
TF_RETURN_IF_ERROR(ForEachDynamicDimensionInOperand(
hlo, operand_index,
[&](HloInstruction*, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
DynamicParameterBinding::DynamicSizeParameter dynamic_parameter{
0, {dynamic_size_to_operand_id_index_map[dynamic_size]}};
DynamicParameterBinding::DynamicDimension dynamic_dimension{
0, {index}, dimension};
TF_RETURN_IF_ERROR(dynamic_parameter_binding.Bind(dynamic_parameter,
dynamic_dimension));
return absl::OkStatus();
}));
VLOG(2) << "dynamic_parameter_binding for conditional branch"
<< dynamic_parameter_binding;
for (auto [old_inst, new_inst] : inline_map) {
parent_->CopyMapping(
old_inst,
new_inst,
&inline_map);
}
TF_ASSIGN_OR_RETURN(
bool changed,
DynamicDimensionInferenceVisitor::Run(
new_computation, dataflow_analysis_, dynamic_parameter_binding,
parent_, custom_call_handler_, shape_check_mode_,
assertion_generator_));
if (changed) {
MarkAsChanged();
}
new_branch_computations.push_back(new_computation);
new_operands.push_back(new_operand);
}
int tuple_count = hlo->shape().tuple_shapes_size();
ShapeUtil::ForEachSubshape(
hlo->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (!subshape.IsArray()) {
return;
}
for (int64_t i = 0; i < subshape.rank(); ++i) {
for (int64_t j = 0; j < new_branch_computations.size(); ++j) {
HloInstruction* dynamic_size = parent_->GetDynamicSize(
new_branch_computations[j]->root_instruction(), index, i);
if (dynamic_size) {
if (dynamic_output_mapping.element(index).contains(i)) {
continue;
}
dynamic_output_mapping.mutable_element(index)->emplace(
i, tuple_count++);
}
}
}
});
for (int64_t branch_index = 0; branch_index < hlo->branch_count();
++branch_index) {
std::vector<HloInstruction*> hlos_to_add_in_root;
ShapeUtil::ForEachSubshape(
hlo->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (!subshape.IsArray()) {
return;
}
for (int64_t i = 0; i < subshape.rank(); ++i) {
if (dynamic_output_mapping.element(index).contains(i)) {
HloInstruction* dynamic_size = parent_->GetDynamicSize(
new_branch_computations[branch_index]->root_instruction(),
index, i);
if (dynamic_size) {
hlos_to_add_in_root.push_back(dynamic_size);
} else {
HloInstruction* constant_size =
new_branch_computations[branch_index]->AddInstruction(
HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(
subshape.dimensions(i))));
hlos_to_add_in_root.push_back(constant_size);
}
}
}
});
VLOG(2) << "hlos_to_add_in_root:" << hlos_to_add_in_root.size();
if (!hlos_to_add_in_root.empty()) {
need_rewrite = true;
HloInstruction* new_branch_root = TupleUtil::AppendSuffix(
new_branch_computations[branch_index]->root_instruction(),
hlos_to_add_in_root);
new_branch_computations[branch_index]->set_root_instruction(
new_branch_root,
true);
}
}
if (!need_rewrite) {
return absl::OkStatus();
}
HloInstruction* new_conditional =
hlo->parent()->AddInstruction(HloInstruction::CreateConditional(
new_branch_computations[0]->root_instruction()->shape(),
hlo->mutable_operand(0), new_branch_computations, new_operands));
HloInstruction* new_conditional_extracted = TupleUtil::ExtractPrefix(
new_conditional, hlo->shape().tuple_shapes_size());
dynamic_output_mapping.ForEachElement(
[&](const ShapeIndex& index,
const absl::flat_hash_map<int64_t, int64_t>& dim_to_output) {
for (auto iter : dim_to_output) {
int64_t dim = iter.first;
int64_t output_index = iter.second;
HloInstruction* dynamic_size = hlo->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(
ShapeUtil::MakeScalarShape(S32), new_conditional,
output_index));
SetDynamicSize(new_conditional, index, dim, dynamic_size,
false);
SetDynamicSize(new_conditional_extracted, index, dim, dynamic_size,
false);
}
});
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(new_conditional_extracted));
TF_RETURN_IF_ERROR(hlo->parent()->RemoveInstruction(hlo));
SetVisited(*new_conditional);
SetVisited(*new_conditional_extracted);
MarkAsChanged();
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleMap(HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return HandleElementwiseNary(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleScatter(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex dynamic_index, int64_t dimension,
int64_t operand_index,
HloInstruction* operand_dynamic_size) -> absl::Status {
if (operand_index == 0) {
SetDynamicSize(hlo, {}, dimension, operand_dynamic_size);
return absl::OkStatus();
}
const ScatterDimensionNumbers& scatter_dims =
hlo->scatter_dimension_numbers();
if (operand_index == 2 &&
absl::c_linear_search(scatter_dims.update_window_dims(),
dimension)) {
std::vector<int64_t> update_window_dims_in_operand;
for (int64_t i = 0; i < hlo->operand(0)->shape().rank(); ++i) {
if (absl::c_linear_search(scatter_dims.inserted_window_dims(), i)) {
continue;
}
update_window_dims_in_operand.push_back(i);
}
for (int64_t i = 0; i < scatter_dims.update_window_dims_size(); ++i) {
if (scatter_dims.update_window_dims(i) == dimension) {
const Shape& operand_shape = hlo->operand(0)->shape();
const Shape& update_shape = hlo->operand(2)->shape();
int64_t dim_in_operand = update_window_dims_in_operand[i];
if (operand_shape.dimensions(dim_in_operand) !=
update_shape.dimensions(dimension)) {
return Unimplemented(
"Dynamic dimension of update window dims that are not the "
"same as corresponding operand dim is not supported: "
"%s : %d : %d : %d",
hlo->ToString(), i, update_shape.dimensions(dimension),
operand_shape.dimensions(dim_in_operand));
}
HloInstruction* base_dynamic_size = parent_->GetDynamicSize(
hlo->mutable_operand(0), {}, dim_in_operand);
if (base_dynamic_size == nullptr ||
!operand_shape.is_dynamic_dimension(dim_in_operand)) {
return absl::OkStatus();
}
if (base_dynamic_size != operand_dynamic_size) {
return Unimplemented(
"Dynamic dimension size of update window dims that are not "
"the same as corresponding operand dim is not supported: "
"%s.\n Dynamic dim size of base: %s, dynamic dim size of "
"update: %s",
hlo->ToString(), base_dynamic_size->ToString(),
operand_dynamic_size->ToString());
}
}
}
}
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleWhile(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
Shape original_shape = hlo->shape();
ShapeTree<absl::flat_hash_map<int64_t, int64_t>> dynamic_output_mapping(
original_shape);
std::vector<HloInstruction*> operands_to_add;
const int original_tuple_count = original_shape.tuple_shapes_size();
int operand_count = original_tuple_count;
DynamicParameterBinding binding_for_while;
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dim,
int64_t operand_num, HloInstruction* dynamic_size) -> absl::Status {
TF_RET_CHECK(operand_num == 0);
operands_to_add.push_back(dynamic_size);
dynamic_output_mapping.mutable_element(index)->emplace(dim,
operand_count);
DynamicParameterBinding::DynamicDimension dynamic_dimension{
0,
index,
dim,
};
DynamicParameterBinding::DynamicSizeParameter dynamic_size_param{
0,
{operand_count},
};
TF_RETURN_IF_ERROR(
binding_for_while.Bind(dynamic_size_param, dynamic_dimension));
++operand_count;
return absl::OkStatus();
}));
if (operands_to_add.empty()) {
return absl::OkStatus();
}
HloInstruction* old_tuple_operand = hlo->mutable_operand(0);
HloInstruction* old_body_root = hlo->while_body()->root_instruction();
TF_ASSIGN_OR_RETURN(WhileUtil::MakeInstructionsLiveInResult result,
WhileUtil::MakeInstructionsLiveIn(hlo, operands_to_add));
TF_RET_CHECK(result.replacement_instr->opcode() == HloOpcode::kTuple);
HloInstruction* new_tuple_operand =
result.new_while_instr->mutable_operand(0);
parent_->CopyMapping(old_tuple_operand,
new_tuple_operand);
hlo = result.new_while_instr;
SetVisited(*hlo);
for (auto [old_inst, new_inst] : result.while_body_instruction_map) {
parent_->CopyMapping(
old_inst,
new_inst,
&result.while_body_instruction_map);
}
parent_->CopyMapping(old_body_root,
hlo->while_body()->root_instruction(),
&result.while_body_instruction_map);
for (auto [old_inst, new_inst] : result.while_condition_instruction_map) {
parent_->CopyMapping(
old_inst,
new_inst,
&result.while_condition_instruction_map);
}
TF_RETURN_IF_ERROR(DynamicDimensionInferenceVisitor::Run(
hlo->while_body(), dataflow_analysis_,
binding_for_while, parent_, custom_call_handler_,
shape_check_mode_, assertion_generator_)
.status());
TF_RETURN_IF_ERROR(DynamicDimensionInferenceVisitor::Run(
hlo->while_condition(), dataflow_analysis_,
binding_for_while, parent_, custom_call_handler_,
shape_check_mode_, assertion_generator_)
.status());
HloInstruction* body_root = hlo->while_body()->root_instruction();
std::vector<HloInstruction*> new_root_operands(body_root->operand_count(),
nullptr);
for (int i = 0; i < original_tuple_count; ++i) {
new_root_operands[i] =
body_root->AddInstruction(HloInstruction::CreateGetTupleElement(
body_root->shape().tuple_shapes(i), body_root, i));
}
TF_RETURN_IF_ERROR(dynamic_output_mapping.ForEachElementWithStatus(
[&](const ShapeIndex& index,
const absl::flat_hash_map<int64_t, int64_t>& dim_to_size)
-> absl::Status {
for (auto [dimension, output_index] : dim_to_size) {
TF_RET_CHECK(new_root_operands[output_index] == nullptr);
HloInstruction* dynamic_size =
parent_->GetDynamicSize(body_root, index, dimension);
TF_RET_CHECK(dynamic_size != nullptr);
new_root_operands[output_index] = dynamic_size;
}
return absl::OkStatus();
}));
for (auto operand : new_root_operands) {
TF_RET_CHECK(operand != nullptr);
}
HloInstruction* new_body_root = hlo->while_body()->AddInstruction(
HloInstruction::CreateTuple(new_root_operands));
for (int i = 0; i < original_tuple_count; ++i) {
TF_RETURN_IF_ERROR(ForEachDynamicDimension(
body_root,
[&](ShapeIndex index, int64_t dimension,
HloInstruction* dynamic_size) -> absl::Status {
SetDynamicSize(new_body_root, index, dimension, dynamic_size);
if (index.empty() || index.front() != i) {
return absl::OkStatus();
}
index.pop_front();
SetDynamicSize(new_root_operands[i], index, dimension, dynamic_size);
return absl::OkStatus();
}));
}
hlo->while_body()->set_root_instruction(new_body_root);
MarkAsChanged();
return dynamic_output_mapping.ForEachElementWithStatus(
[&](const ShapeIndex& index,
const absl::flat_hash_map<int64_t, int64_t>& dim_to_size)
-> absl::Status {
for (auto [dimension, output_index] : dim_to_size) {
HloInstruction* dynamic_size = hlo->AddInstruction(
HloInstruction::CreateGetTupleElement(hlo, output_index));
SetDynamicSize(result.replacement_instr, index, dimension,
dynamic_size);
ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), index)
->set_dynamic_dimension(dimension, false);
TF_RET_CHECK(!index.empty());
HloInstruction* gte =
result.replacement_instr->mutable_operand(index.front());
TF_RET_CHECK(gte->opcode() == HloOpcode::kGetTupleElement);
TF_RET_CHECK(gte->operand(0) == hlo);
ShapeUtil::GetMutableSubshape(gte->mutable_shape(),
ShapeIndexView(index).subspan(1))
->set_dynamic_dimension(dimension, false);
}
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleParameter(
HloInstruction* hlo) {
if (hlo->parent()->IsEntryComputation()) {
TF_RET_CHECK(param_bindings_.empty());
return InsertPadToStaticOnInstruction(hlo);
}
return param_bindings_.ForEachBinding(
[&](const DynamicParameterBinding::DynamicSizeParameter& dynamic_size,
const DynamicParameterBinding::DynamicDimension& dynamic_dimension)
-> absl::Status {
if (dynamic_dimension.parameter_num == hlo->parameter_number()) {
SetDynamicSize(
hlo, dynamic_dimension.parameter_index,
dynamic_dimension.dimension,
TupleUtil::AddGetTupleElements(HloPosition{
hlo->parent()->parameter_instruction(
dynamic_size.parameter_num),
dynamic_size.parameter_index,
}));
}
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleInfeed(
HloInstruction* hlo) {
return InsertPadToStaticOnInstruction(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::ForEachDynamicDimension(
HloInstruction* inst, const DynamicDimensionFn& fn) {
auto iter = parent_->per_hlo_dynamic_dimensions_.find(inst);
if (iter != parent_->per_hlo_dynamic_dimensions_.end()) {
for (auto& dynamic_dimension : iter->second) {
HloInstruction* dynamic_size = parent_->GetDynamicSize(
dynamic_dimension.inst, dynamic_dimension.index,
dynamic_dimension.dim);
TF_RETURN_IF_ERROR(
fn(dynamic_dimension.index, dynamic_dimension.dim, dynamic_size));
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> DynamicDimensionInferenceVisitor::RequiresPadToStatic(
HloInstruction* instr, ShapeIndex shape_index) {
TF_RET_CHECK(ShapeUtil::IsLeafIndex(instr->shape(), shape_index))
<< instr->shape() << " @ " << shape_index;
if (ShapeUtil::GetSubshape(instr->shape(), shape_index).is_static()) {
return false;
}
auto uses =
dataflow_analysis_.GetValueDefinedAt(instr, shape_index).GetUses();
for (const auto& use : uses) {
if (use.instruction->opcode() == HloOpcode::kAsyncStart ||
use.instruction->opcode() == HloOpcode::kAsyncUpdate ||
use.instruction->opcode() == HloOpcode::kAsyncDone ||
use.instruction->opcode() == HloOpcode::kCall ||
use.instruction->opcode() == HloOpcode::kTuple ||
use.instruction->opcode() == HloOpcode::kGetTupleElement ||
use.instruction->opcode() == HloOpcode::kConditional) {
continue;
}
if (use.instruction->opcode() == HloOpcode::kWhile) {
TF_RET_CHECK(use.operand_number == 0);
HloInstruction* root = use.instruction->while_body()->root_instruction();
if (parent_->HasDynamicDimension(root, use.operand_index)) {
return true;
}
continue;
}
if (use.instruction->opcode() == HloOpcode::kSetDimensionSize) {
TF_RET_CHECK(use.operand_number == 0);
return true;
}
if (use.instruction->opcode() == HloOpcode::kGetDimensionSize) {
return true;
}
if (use.instruction->opcode() != HloOpcode::kCustomCall ||
use.instruction->custom_call_target() != "PadToStatic") {
if (parent_->op_supports_dynamism_handler_ == nullptr) {
return true;
}
if (parent_->op_supports_dynamism_handler_(use.instruction) ==
OpDynamismSupport::kNoSupport) {
return true;
}
}
}
return false;
}
absl::Status DynamicDimensionInferenceVisitor::InsertPadToStaticOnInstruction(
HloInstruction* inst) {
if (inst->shape().is_static()) {
return absl::OkStatus();
}
ShapeTree<bool> needs_pad(inst->shape(), false);
bool any_needs_pad = false;
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
inst->shape(), [&](const Shape& subshape, const ShapeIndex& shape_index) {
if (subshape.IsTuple()) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(bool do_pad,
RequiresPadToStatic(inst, shape_index));
if (do_pad) {
*needs_pad.mutable_element(shape_index) = true;
any_needs_pad = true;
}
return absl::OkStatus();
}));
if (!any_needs_pad) {
return absl::OkStatus();
}
auto users = inst->users();
ShapeTree<HloInstruction*> gtes =
TupleUtil::DisassembleTupleInstruction(inst);
ShapeTree<HloInstruction*> padded(inst->shape(), nullptr);
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapePostOrderWithStatus(
inst->shape(),
[&](const Shape& subshape,
const ShapeIndex& shape_index) -> absl::Status {
HloInstruction* element = gtes.element(shape_index);
SetVisited(*gtes.element(shape_index));
if (subshape.IsTuple()) {
absl::InlinedVector<HloInstruction*, 2> children;
ShapeIndex child_index = shape_index;
for (int i = 0; i < subshape.tuple_shapes_size(); ++i) {
child_index.push_back(i);
children.push_back(padded.element(child_index));
child_index.pop_back();
}
HloInstruction* tuple =
element->AddInstruction(HloInstruction::CreateVariadic(
subshape, HloOpcode::kTuple, children));
TF_CHECK_OK(ForEachOperandDynamicDimension(
tuple,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
index.push_front(operand_index);
SetDynamicSize(tuple, index, dimension, dynamic_size);
return absl::OkStatus();
}));
*padded.mutable_element(shape_index) = tuple;
return absl::OkStatus();
}
if (needs_pad.element(shape_index)) {
Shape data_output_shape =
ShapeUtil::MakeStaticShape(element->shape());
Shape output_shape = ShapeUtil::MakeTupleShape({data_output_shape});
for (int64_t i = 0; i < element->shape().rank(); ++i) {
ShapeUtil::AppendShapeToTuple(ShapeUtil::MakeScalarShape(S32),
&output_shape);
}
HloInstruction* pad_to_static = inst->parent()->AddInstruction(
HloInstruction::CreateCustomCall(output_shape, {element},
"PadToStatic"),
absl::StrCat(element->name(), ".padded"));
SetVisited(*pad_to_static);
HloInstruction* data_output = inst->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(data_output_shape,
pad_to_static, 0),
absl::StrCat(element->name(), ".data"));
SetVisited(*data_output);
for (int64_t i = 0; i < element->shape().rank(); ++i) {
if (!element->shape().is_dynamic_dimension(i)) {
continue;
}
HloInstruction* dynamic_size_output =
inst->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(
output_shape.tuple_shapes(i + 1), pad_to_static, i + 1),
absl::StrCat(element->name(), ".size"));
SetVisited(*dynamic_size_output);
SetDynamicSize(data_output, {}, i, dynamic_size_output,
false);
}
*padded.mutable_element(shape_index) = data_output;
} else {
*padded.mutable_element(shape_index) = element;
}
return absl::OkStatus();
}));
HloInstruction* result = padded.element({});
for (auto user : users) {
for (int64_t i : user->OperandIndices(inst)) {
TF_RETURN_IF_ERROR(user->ReplaceOperandWith(i, result));
}
}
if (inst->IsRoot()) {
inst->parent()->set_root_instruction(result);
}
MarkAsChanged();
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::InsertShapeCheck(
HloInstruction* dim1, HloInstruction* dim2,
bool support_implicit_broadcast) {
switch (shape_check_mode_) {
case DynamicDimensionInference::kIgnore:
return absl::OkStatus();
case DynamicDimensionInference::kCompileTime:
return InvalidArgument(
"Fail to proof the equality of two dimensions at compile time: "
"%s vs %s",
dim1->ToString(), dim2->ToString());
case DynamicDimensionInference::kRuntime: {
TF_ASSIGN_OR_RETURN(
HloInstruction * assertion,
MakeCompareHlo(Comparison::Direction::kEq, dim1, dim2));
if (shape_assertion_ == nullptr) {
shape_assertion_ = assertion;
} else {
TF_ASSIGN_OR_RETURN(
shape_assertion_,
MakeBinaryHlo(HloOpcode::kAnd, shape_assertion_, assertion));
}
return absl::OkStatus();
}
default:
LOG(FATAL) << "Unreachable";
}
}
absl::Status DynamicDimensionInferenceVisitor::ForEachDynamicDimensionInOperand(
HloInstruction* inst, int64_t operand_index, OperandDynamicDimensionFn fn) {
auto iter =
parent_->per_hlo_dynamic_dimensions_.find(inst->operand(operand_index));
if (iter != parent_->per_hlo_dynamic_dimensions_.end()) {
for (auto& dynamic_dimension : iter->second) {
HloInstruction* dynamic_size = parent_->GetDynamicSize(
dynamic_dimension.inst, dynamic_dimension.index,
dynamic_dimension.dim);
TF_RETURN_IF_ERROR(fn(dynamic_dimension.inst, dynamic_dimension.index,
dynamic_dimension.dim, operand_index,
dynamic_size));
}
}
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::ForEachOperandDynamicDimension(
HloInstruction* inst, OperandDynamicDimensionFn fn) {
for (int64_t operand_index = 0; operand_index < inst->operand_count();
++operand_index) {
TF_RETURN_IF_ERROR(
ForEachDynamicDimensionInOperand(inst, operand_index, fn));
}
return absl::OkStatus();
}
void DynamicDimensionInference::SetDynamicSize(HloInstruction* inst,
const ShapeIndex& index,
int64_t dim,
HloInstruction* size) {
CHECK_NE(inst, nullptr);
CHECK_NE(size, nullptr);
VLOG(1) << "Set dimension inst " << inst->ToString() << " index "
<< index.ToString() << "@" << dim << " to " << size->ToShortString();
const Shape& subshape = ShapeUtil::GetSubshape(inst->shape(), index);
CHECK(!subshape.IsTuple()) << "Can't set a tuple shape to dynamic dimension";
CHECK(dim < subshape.rank() && dim >= 0)
<< "Asked to set invalid dynamic dimension. Shape: "
<< subshape.ToString() << ", Dimension: " << dim;
DynamicDimension dynamic_dimension{inst, index, dim};
auto [it, inserted] = dynamic_mapping_.try_emplace(dynamic_dimension, size);
if (!inserted) {
CHECK_EQ(size, it->second) << "old: " << it->second->ToShortString()
<< ", new: " << size->ToShortString();
}
auto iter = per_hlo_dynamic_dimensions_.try_emplace(inst);
iter.first->second.emplace(dynamic_dimension);
}
void DynamicDimensionInference::CopyMapping(
HloInstruction* from, HloInstruction* to,
const absl::flat_hash_map<HloInstruction*, HloInstruction*>*
dynamic_size_map) {
auto iter = per_hlo_dynamic_dimensions_.find(from);
if (iter != per_hlo_dynamic_dimensions_.end()) {
for (auto& dynamic_dimension : iter->second) {
HloInstruction* dynamic_size =
GetDynamicSize(dynamic_dimension.inst, dynamic_dimension.index,
dynamic_dimension.dim);
if (dynamic_size_map != nullptr) {
dynamic_size = dynamic_size_map->at(dynamic_size);
}
SetDynamicSize(to, dynamic_dimension.index, dynamic_dimension.dim,
dynamic_size);
}
}
}
absl::StatusOr<DynamicDimensionInference> DynamicDimensionInference::Run(
HloModule* module, OpSupportsDynamismHandler op_supports_dynamism_handler,
CustomCallInferenceHandler custom_call_handler,
ShapeCheckMode shape_check_mode,
const AssertionGenerator& assertion_generator,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
DynamicDimensionInference inference(
module, std::move(op_supports_dynamism_handler),
std::move(custom_call_handler), shape_check_mode, assertion_generator,
execution_threads);
TF_RETURN_IF_ERROR(inference.AnalyzeDynamicDimensions());
return std::move(inference);
}
std::string DynamicDimensionInference::ToString() const {
std::vector<std::string> pieces;
pieces.push_back("DynamicDimensionInference: ");
for (const auto& mapping : dynamic_mapping_) {
const DynamicDimension& dynamic_dimension = mapping.first;
pieces.push_back(absl::StrFormat(
" -- instruction %s at %s has dim %lld as dynamic"
" dimension, which is represented by instruction %s",
dynamic_dimension.inst->ToString(), dynamic_dimension.index.ToString(),
dynamic_dimension.dim, mapping.second->ToString()));
}
return absl::StrJoin(pieces, "\n");
}
DynamicDimensionInference::DynamicDimensionInference(
HloModule* module, OpSupportsDynamismHandler op_supports_dynamism_handler,
CustomCallInferenceHandler custom_call_handler,
ShapeCheckMode shape_check_mode, AssertionGenerator assertion_generator,
const absl::flat_hash_set<absl::string_view>& execution_threads)
: module_(module),
op_supports_dynamism_handler_(std::move(op_supports_dynamism_handler)),
custom_call_handler_(std::move(custom_call_handler)),
shape_check_mode_(shape_check_mode),
assertion_generator_(assertion_generator),
execution_threads_(execution_threads) {}
absl::Status DynamicDimensionInference::AnalyzeDynamicDimensions() {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloDataflowAnalysis> dataflow_analysis,
HloDataflowAnalysis::Run(*module_, false,
true,
nullptr,
nullptr, execution_threads_));
for (HloComputation* computation : module_->MakeComputationPostOrder()) {
if (!HloInstruction::IsThreadIncluded(computation->execution_thread(),
execution_threads_)) {
continue;
}
TF_ASSIGN_OR_RETURN(
bool changed,
DynamicDimensionInferenceVisitor::Run(
computation, *dataflow_analysis, {}, this, custom_call_handler_,
shape_check_mode_, assertion_generator_));
changed_ |= changed;
}
return absl::OkStatus();
}
void DynamicDimensionInference::ReplaceAllDynamicDimensionUsesWith(
HloInstruction* replace, HloInstruction* with) {
CHECK(Shape::Equal().IgnoreLayout()(replace->shape(),
ShapeUtil::MakeScalarShape(S32)));
CHECK(Shape::Equal().IgnoreLayout()(with->shape(),
ShapeUtil::MakeScalarShape(S32)));
for (auto& kv : dynamic_mapping_) {
if (kv.second == replace) {
kv.second = with;
}
}
}
absl::Status DynamicDimensionInference::ForwardDynamicSize(
HloInstruction* inst, HloInstruction* new_inst, const ShapeIndex& index) {
TF_RET_CHECK(ShapeUtil::Compatible(inst->shape(), new_inst->shape()));
for (int64_t dim = 0; dim < inst->shape().rank(); ++dim) {
DynamicDimension dynamic_dimension_new{new_inst, index, dim};
DynamicDimension dynamic_dimension{inst, index, dim};
auto iter = dynamic_mapping_.find(dynamic_dimension);
if (iter != dynamic_mapping_.end()) {
dynamic_mapping_.insert({dynamic_dimension_new, iter->second});
auto iter = per_hlo_dynamic_dimensions_.try_emplace(new_inst);
iter.first->second.emplace(dynamic_dimension_new);
}
}
return absl::OkStatus();
}
bool DynamicDimensionInference::HasDynamicDimension(
HloInstruction* inst, ShapeIndexView index) const {
bool has_dynamic_dim = false;
ShapeUtil::ForEachSubshape(inst->shape(), [&](const Shape& subshape,
const ShapeIndex& subindex) {
if (subshape.IsTuple()) {
return;
}
if (ShapeIndexView(subindex).subspan(0, index.size()) != index) {
return;
}
for (int64_t i = 0; i < subshape.dimensions_size(); ++i) {
HloInstruction* operand_dynamic_size = GetDynamicSize(inst, subindex, i);
if (operand_dynamic_size != nullptr) {
has_dynamic_dim = true;
}
}
});
return has_dynamic_dim;
}
Shape DynamicDimensionInference::GetDynamicShape(HloInstruction* inst) {
Shape shape = inst->shape();
ShapeUtil::ForEachMutableSubshape(
&shape, [&](Shape* subshape, const ShapeIndex& index) {
if (!subshape->IsArray()) {
return;
}
for (int64_t dimension = 0; dimension < subshape->rank(); ++dimension) {
if (GetDynamicSize(inst, index, dimension) != nullptr) {
subshape->set_dynamic_dimension(dimension, true);
}
}
});
return shape;
}
HloInstruction* DynamicDimensionInference::GetDynamicSize(
HloInstruction* inst, const ShapeIndex& index, int64_t dim) const {
auto iter = dynamic_mapping_.find(DynamicDimension{inst, index, dim});
if (iter != dynamic_mapping_.end()) {
return iter->second;
}
return nullptr;
}
const HloInstruction* DynamicDimensionInference::GetDynamicSize(
const HloInstruction* inst, const ShapeIndex& index, int64_t dim) const {
return GetDynamicSize(const_cast<HloInstruction*>(inst), index, dim);
}
std::vector<HloInstruction*> DynamicDimensionInference::GetDynamicSizes(
HloInstruction* inst, const ShapeIndex& index) const {
CHECK(ShapeUtil::IndexIsValid(inst->shape(), index));
const int64_t rank = ShapeUtil::GetSubshape(inst->shape(), index).rank();
std::vector<HloInstruction*> result(rank, nullptr);
for (int64_t i = 0; i < rank; ++i) {
result[i] = GetDynamicSize(inst, index, i);
}
return result;
}
bool DynamicDimensionInference::CanInfer(HloInstruction* hlo) {
if (hlo->shape().is_static() && hlo->called_computations().empty() &&
hlo->opcode() != HloOpcode::kCustomCall) {
return false;
}
bool ok = true;
for (int64_t operand_index = 0; operand_index < hlo->operand_count();
++operand_index) {
ShapeUtil::ForEachSubshape(
hlo->operand(operand_index)->shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (!subshape.IsArray()) {
return;
}
for (int64_t dimension = 0; dimension < subshape.rank();
++dimension) {
bool shape_is_dynamic = subshape.is_dynamic_dimension(dimension);
bool dynamic_size_recorded =
GetDynamicSize(hlo->operand(operand_index), shape_index,
dimension) != nullptr;
if (shape_is_dynamic && !dynamic_size_recorded) {
VLOG(2) << "cannot infer " << hlo->ToShortString()
<< " because operand " << operand_index << " ("
<< hlo->operand(operand_index)->ToShortString() << ")"
<< " subshape " << shape_index.ToString()
<< " is missing dynamic size for dimension " << dimension;
ok = false;
}
CHECK(hlo->operand(operand_index)->opcode() ==
HloOpcode::kSetDimensionSize ||
hlo->operand(operand_index)->opcode() ==
HloOpcode::kCustomCall ||
!shape_is_dynamic || !dynamic_size_recorded);
}
});
}
return ok;
}
} | #include "xla/service/dynamic_dimension_inference.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/hlo_runner.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test_benchmark.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
class DynamicDimensionInferenceTest : public HloTestBase {
protected:
DynamicDimensionInferenceTest() : HloTestBase() {
module_ = CreateNewVerifiedModule();
}
absl::Status RunInference(
OpSupportsDynamismHandler op_supports_dynamism_handler = nullptr,
DynamicDimensionInference::CustomCallInferenceHandler handler = nullptr,
DynamicDimensionInference::ShapeCheckMode shape_check_mode =
DynamicDimensionInference::ShapeCheckMode::kIgnore,
const DynamicDimensionInference::AssertionGenerator& assertion_generator =
nullptr) {
TF_ASSIGN_OR_RETURN(DynamicDimensionInference inference,
DynamicDimensionInference::Run(
module_.get(), op_supports_dynamism_handler,
handler, shape_check_mode, assertion_generator));
inference_ = std::make_unique<DynamicDimensionInference>(inference);
return absl::OkStatus();
}
HloComputation* GetAdd() {
auto embedded_builder = HloComputation::Builder("add");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "rhs"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
return module_->AddEmbeddedComputation(embedded_builder.Build());
}
HloComputation* GetAddTuple() {
auto embedded_builder = HloComputation::Builder("add");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto lhs_1 =
embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "lhs.1"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeShape(F32, {}), "rhs"));
auto rhs_1 =
embedded_builder.AddInstruction(HloInstruction::CreateParameter(
3, ShapeUtil::MakeShape(F32, {}), "rhs.1"));
auto add = embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
auto add_1 = embedded_builder.AddInstruction(HloInstruction::CreateBinary(
lhs->shape(), HloOpcode::kAdd, lhs_1, rhs_1));
embedded_builder.AddInstruction(HloInstruction::CreateTuple({add, add_1}));
return module_->AddEmbeddedComputation(embedded_builder.Build());
}
HloComputation* GetGe() {
auto embedded_builder = HloComputation::Builder("ge");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "rhs"));
embedded_builder.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), lhs, rhs, ComparisonDirection::kGe));
return module_->AddEmbeddedComputation(embedded_builder.Build());
}
std::unique_ptr<HloModule> module_;
std::unique_ptr<DynamicDimensionInference> inference_;
const Shape scalar_shape_ = ShapeUtil::MakeShape(S32, {});
};
TEST_F(DynamicDimensionInferenceTest, ParamTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, true, false});
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "param"));
auto param2 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param"));
auto result = builder.AddInstruction(
HloInstruction::CreateSetDimensionSize(dynamic_shape, param, param2, 1));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(result, {}, 1), param2);
EXPECT_EQ(inference_->GetDynamicSize(param, {}, 0), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(param2, {}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ElementwiseTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, true, false});
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "data_param"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
auto dynamic_param =
builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 1));
auto* negate = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, dynamic_param));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(negate, {}, 1), size_param);
}
TEST_F(DynamicDimensionInferenceTest, ReduceTestI) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto reduce_shape = ShapeUtil::MakeShape(F32, {2}, {true});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, true, false});
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "data_param"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
auto dynamic_param =
builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 1));
auto negate = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, dynamic_param));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(
reduce_shape, negate, init, {0, 2}, GetAdd()));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(reduce, {}, 0), size_param);
}
TEST_F(DynamicDimensionInferenceTest, ReduceTestII) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto reduce_shape = ShapeUtil::MakeShape(F32, {1, 2}, {false, true});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, false, true});
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "data_param"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
auto dynamic_param =
builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 2));
auto negate = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, dynamic_param));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto reduce = builder.AddInstruction(
HloInstruction::CreateReduce(reduce_shape, negate, init, {1}, GetAdd()));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(reduce, {}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(reduce, {}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, VariadicReduce) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto reduce_shape = ShapeUtil::MakeShape(F32, {1, 2}, {false, true});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, false, true});
auto data_param_1 = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "data_param"));
auto data_param_2 = builder.AddInstruction(
HloInstruction::CreateParameter(1, input_shape, "data_param.2"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(2, scalar_shape_, "size_param"));
auto data_param_dynamic_1 =
builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param_1, size_param, 2));
auto data_param_dynamic_2 =
builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param_2, size_param, 2));
auto dynamic_negate_1 = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, data_param_dynamic_1));
auto dynamic_negate_2 = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, data_param_dynamic_2));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(
ShapeUtil::MakeTupleShape({reduce_shape, reduce_shape}),
{dynamic_negate_1, dynamic_negate_2}, {init, init}, {1}, GetAddTuple()));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(reduce, {0}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(reduce, {1}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(reduce, {0}, 0), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(reduce, {1}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, DotTest) {
auto builder = HloComputation::Builder(TestName());
constexpr int xdim = 3;
constexpr int ydim = 2;
constexpr int zdim = 1;
auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim});
auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim});
auto xy_dynamic_shape = ShapeUtil::MakeShape(F32, {xdim, ydim}, {true, true});
auto yz_dynamic_shape =
ShapeUtil::MakeShape(F32, {ydim, zdim}, {true, false});
auto xz_dynamic_shape =
ShapeUtil::MakeShape(F32, {xdim, zdim}, {true, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, xy_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, yz_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, xy_shape.dimensions(), {true, false}), a_param,
size_param, 0));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
xy_dynamic_shape, a_param, size_param, 1));
b_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
yz_dynamic_shape, b_param, size_param, 0));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(
HloInstruction::CreateDot(xz_dynamic_shape, a_param, b_param, dot_dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 0), size_param);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 1), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, DotTestBatch) {
auto builder = HloComputation::Builder(TestName());
auto lhs_shape = ShapeUtil::MakeShape(F32, {4, 128, 2, 8});
auto rhs_shape = ShapeUtil::MakeShape(F32, {4, 128, 2, 8});
auto output_shape =
ShapeUtil::MakeShape(F32, {4, 2, 128, 128}, {true, false, false, false});
auto lhs_shape_dynamic =
ShapeUtil::MakeShape(F32, {4, 128, 2, 8}, {true, false, false, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, lhs_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, rhs_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
lhs_shape_dynamic, a_param, size_param, 0));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(3);
dot_dnums.add_rhs_contracting_dimensions(3);
dot_dnums.add_lhs_batch_dimensions(0);
dot_dnums.add_lhs_batch_dimensions(2);
dot_dnums.add_rhs_batch_dimensions(0);
dot_dnums.add_rhs_batch_dimensions(2);
auto dot = builder.AddInstruction(
HloInstruction::CreateDot(output_shape, a_param, b_param, dot_dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 0), size_param);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 1), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 2), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 3), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, DotTestMultiContracting) {
auto builder = HloComputation::Builder(TestName());
auto lhs_shape = ShapeUtil::MakeShape(F32, {2, 2, 8, 64});
auto rhs_shape = ShapeUtil::MakeShape(F32, {2, 2, 512});
auto output_shape = ShapeUtil::MakeShape(F32, {8, 64, 512});
auto lhs_shape_dynamic =
ShapeUtil::MakeShape(F32, {2, 2, 8, 64}, {true, true, false, false});
auto rhs_shape_dynamic =
ShapeUtil::MakeShape(F32, {2, 2, 512}, {true, true, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, lhs_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, rhs_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, lhs_shape.dimensions(),
{true, false, false, false}),
a_param, size_param, 0));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
lhs_shape_dynamic, a_param, size_param, 1));
b_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, rhs_shape.dimensions(), {true, false, false}),
b_param, size_param, 0));
b_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
rhs_shape_dynamic, b_param, size_param, 1));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(0);
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
dot_dnums.add_rhs_contracting_dimensions(1);
auto dot = builder.AddInstruction(
HloInstruction::CreateDot(output_shape, a_param, b_param, dot_dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 0), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 1), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 2), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ConvolutionTest) {
auto builder = HloComputation::Builder(TestName());
constexpr int xdim = 3;
constexpr int ydim = 2;
constexpr int zdim = 1;
auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim});
auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim});
auto xy_shape_dynamic = ShapeUtil::MakeShape(F32, {xdim, ydim}, {true, true});
auto zx_shape_dynamic =
ShapeUtil::MakeShape(F32, {zdim, xdim}, {false, true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, xy_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, yz_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, xy_shape.dimensions(), {true, false}), a_param,
size_param, 0));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
xy_shape_dynamic, a_param, size_param, 1));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(0);
dnums.set_kernel_input_feature_dimension(0);
dnums.set_kernel_output_feature_dimension(1);
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(1);
dnums.set_output_feature_dimension(0);
Window window;
auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
zx_shape_dynamic, a_param, b_param, 1,
1, window, dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(conv, {}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(conv, {}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, TransposeTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 3});
auto output_shape = ShapeUtil::MakeShape(F32, {3, 2, 1}, {true, true, true});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param_1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
auto* size_param_2 = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
auto* size_param_3 = builder.AddInstruction(HloInstruction::CreateParameter(
3, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, false, false}), a_param,
size_param_1, 0));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, false}), a_param,
size_param_2, 1));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param_3, 2));
auto* transpose = builder.AddInstruction(
HloInstruction::CreateTranspose(output_shape, a_param, {2, 1, 0}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 0), size_param_3);
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 1), size_param_2);
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 2), size_param_1);
}
TEST_F(DynamicDimensionInferenceTest, NonDescendingTransposeTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 3});
auto output_shape = ShapeUtil::MakeShape(F32, {3, 1, 2}, {true, true, true});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param_1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
auto* size_param_2 = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
auto* size_param_3 = builder.AddInstruction(HloInstruction::CreateParameter(
3, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, false, false}), a_param,
size_param_1, 0));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, false}), a_param,
size_param_2, 1));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param_3, 2));
auto* transpose = builder.AddInstruction(
HloInstruction::CreateTranspose(output_shape, a_param, {2, 0, 1}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 0), size_param_3);
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 1), size_param_1);
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 2), size_param_2);
}
TEST_F(DynamicDimensionInferenceTest, ReshapeTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {2, 3, 4, 5, 6});
auto output_shape = ShapeUtil::MakeShape(
F32, {6, 4, 1, 5, 2, 3}, {false, true, false, true, false, false});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {2, 3, 4, 5, 6},
{false, false, true, true, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {2, 3, 4, 5, 6},
{false, false, true, false, false}),
a_param, size_param, 2));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 3));
auto* reshape = builder.AddInstruction(
HloInstruction::CreateReshape(output_shape, a_param));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 0), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 2), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 3), size_param);
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 4), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 5), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ReshapeInferredDimensionTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {4, 5});
auto output_shape =
ShapeUtil::MakeShape(F32, {1, 4, 5}, {true, false, false});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {4, 5}, {true, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 0));
auto* reshape = builder.AddInstruction(HloInstruction::CreateReshape(
output_shape, a_param, 0));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_NE(inference_->GetDynamicSize(reshape, {}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ReshapeTestMajorDimension) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {32, 10, 4});
auto output_shape = ShapeUtil::MakeShape(F32, {320, 4}, {true, false});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {32, 10, 4}, {true, false, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 0));
auto* reshape = builder.AddInstruction(
HloInstruction::CreateReshape(output_shape, a_param));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
absl::Status status = RunInference();
EXPECT_NE(inference_->GetDynamicSize(reshape, {}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ReshapeIntoScalar) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1});
auto output_shape = ShapeUtil::MakeShape(F32, {});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {1}, {true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 0));
builder.AddInstruction(HloInstruction::CreateReshape(output_shape, a_param));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_CHECK_OK(RunInference());
}
TEST_F(DynamicDimensionInferenceTest, GatherTest) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherV2
ENTRY main {
operand = s32[20,10]{1,0} parameter(0)
indices = s32[32,20] parameter(1)
dynamic_size = s32[] parameter(2)
indices_dynamic = s32[<=32,20] set-dimension-size(indices, dynamic_size), dimensions={0}
ROOT gather = s32[<=32,20,10]{2,1,0} gather(%operand, %indices_dynamic),
offset_dims={2},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,10}
}
)";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_text));
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(
module_->entry_computation()->root_instruction(), {}, 0),
module_->entry_computation()->parameter_instruction(2));
}
TEST_F(DynamicDimensionInferenceTest, BroadcastTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {2});
auto output_shape =
ShapeUtil::MakeShape(F32, {3, 2, 4}, {false, true, false});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {2}, {true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 0));
auto* broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(output_shape, a_param, {1}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(broadcast, {}, 0), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(broadcast, {}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(broadcast, {}, 2), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, WhileTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {2, 4, 4}, {true, false, false});
auto tuple_shape = ShapeUtil::MakeTupleShape({input_shape, input_shape});
auto dynamic_tuple_shape =
ShapeUtil::MakeTupleShape({dynamic_shape, dynamic_shape});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, dynamic_tuple_shape, "param"));
auto gte_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(dynamic_shape, body_param, 0));
auto gte_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(dynamic_shape, body_param, 1));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
dynamic_shape, HloOpcode::kAdd, gte_0, gte_1));
body_builder.AddInstruction(HloInstruction::CreateTuple({add, add}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, dynamic_tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, tuple_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
auto* a_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape, a_param, 0));
a_0 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_0, size_param, 0));
auto* a_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape, a_param, 0));
a_1 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_1, size_param, 0));
a_param = builder.AddInstruction(HloInstruction::CreateTuple({a_0, a_1}));
builder.AddInstruction(HloInstruction::CreateWhile(dynamic_tuple_shape,
condition, body, a_param));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
HloInstruction* while_hlo = nullptr;
for (HloInstruction* inst : module_->entry_computation()->instructions()) {
if (inst->opcode() == HloOpcode::kWhile) {
while_hlo = inst;
}
}
ASSERT_NE(while_hlo, nullptr);
EXPECT_EQ(while_hlo->shape().tuple_shapes_size(), 4);
HloInstruction* add_inst = nullptr;
for (HloInstruction* inst : while_hlo->while_body()->instructions()) {
if (inst->opcode() == HloOpcode::kAdd) {
add_inst = inst;
}
}
EXPECT_NE(add_inst, nullptr);
EXPECT_NE(inference_->GetDynamicSize(add_inst, {}, 0), nullptr);
EXPECT_NE(inference_->GetDynamicSize(
module_->entry_computation()->root_instruction(), {0}, 0),
nullptr);
EXPECT_NE(inference_->GetDynamicSize(
module_->entry_computation()->root_instruction(), {1}, 0),
nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ConditionalInputTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {2, 4, 4}, {true, false, false});
auto output_shape = ShapeUtil::MakeShape(F32, {2, 2, 2});
auto tuple_shape_1 = ShapeUtil::MakeTupleShape({input_shape});
auto tuple_shape_2 = ShapeUtil::MakeTupleShape({input_shape, input_shape});
auto tuple_shape_3 =
ShapeUtil::MakeTupleShape({input_shape, input_shape, input_shape});
auto tuple_shape_2_dynamic =
ShapeUtil::MakeTupleShape({dynamic_shape, dynamic_shape});
auto tuple_shape_3_dynamic =
ShapeUtil::MakeTupleShape({input_shape, dynamic_shape, dynamic_shape});
auto true_builder = HloComputation::Builder("true");
{
auto true_param = true_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape_2_dynamic, "param"));
auto gte_0 = true_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(dynamic_shape, true_param, 0));
auto gte_1 = true_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(dynamic_shape, true_param, 1));
auto add = true_builder.AddInstruction(HloInstruction::CreateBinary(
dynamic_shape, HloOpcode::kAdd, gte_0, gte_1));
true_builder.AddInstruction(HloInstruction::CreateTuple({add}));
}
HloComputation* true_branch =
module_->AddEmbeddedComputation(true_builder.Build());
auto false_builder = HloComputation::Builder("false");
{
auto false_param = false_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape_3_dynamic, "param"));
auto gte_0 = false_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(dynamic_shape, false_param, 1));
auto gte_1 = false_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(dynamic_shape, false_param, 2));
auto add = false_builder.AddInstruction(HloInstruction::CreateBinary(
dynamic_shape, HloOpcode::kAdd, gte_0, gte_1));
false_builder.AddInstruction(HloInstruction::CreateTuple({add}));
}
HloComputation* false_branch =
module_->AddEmbeddedComputation(false_builder.Build());
auto* pred_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeScalarShape(PRED), "pred"));
auto* tuple_2_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, tuple_shape_2, "tuple_2_param"));
auto* tuple_3_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, tuple_shape_3, "tuple_3_param"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
3, scalar_shape_, "size_param"));
auto* param_2_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape, tuple_2_param, 0));
param_2_0 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, param_2_0, size_param, 0));
auto* param_2_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape, tuple_2_param, 1));
param_2_1 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, param_2_1, size_param, 0));
tuple_2_param = builder.AddInstruction(
HloInstruction::CreateTuple({param_2_0, param_2_1}));
auto* param_3_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape, tuple_3_param, 0));
auto* param_3_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape, tuple_3_param, 1));
param_3_1 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, param_3_1, size_param, 0));
auto* param_3_2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape, tuple_3_param, 2));
param_3_2 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, param_3_1, size_param, 0));
tuple_3_param = builder.AddInstruction(
HloInstruction::CreateTuple({param_3_0, param_3_1, param_3_2}));
builder.AddInstruction(HloInstruction::CreateConditional(
tuple_shape_1, pred_param, tuple_2_param, true_branch, tuple_3_param,
false_branch));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
HloInstruction* conditional_hlo = nullptr;
for (HloInstruction* inst : module_->entry_computation()->instructions()) {
if (inst->opcode() == HloOpcode::kConditional) {
conditional_hlo = inst;
}
}
ASSERT_NE(conditional_hlo, nullptr);
EXPECT_EQ(conditional_hlo->shape().tuple_shapes_size(), 2);
HloInstruction* add_true_branch = nullptr;
for (HloInstruction* inst :
conditional_hlo->true_computation()->instructions()) {
if (inst->opcode() == HloOpcode::kAdd) {
add_true_branch = inst;
}
}
EXPECT_NE(add_true_branch, nullptr);
EXPECT_NE(inference_->GetDynamicSize(add_true_branch, {}, 0), nullptr);
HloInstruction* add_false_branch = nullptr;
for (HloInstruction* inst :
conditional_hlo->false_computation()->instructions()) {
if (inst->opcode() == HloOpcode::kAdd) {
add_false_branch = inst;
}
}
EXPECT_NE(add_false_branch, nullptr);
EXPECT_NE(inference_->GetDynamicSize(add_false_branch, {}, 0), nullptr);
EXPECT_NE(inference_->GetDynamicSize(conditional_hlo, {0}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ReduceWindowBatchTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});
auto output_shape =
ShapeUtil::MakeShape(F32, {2, 2, 2}, {true, false, false});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {2, 4, 4}, {true, false, false});
Window window;
WindowDimension* batch_dim = window.add_dimensions();
batch_dim->set_size(1);
batch_dim->set_stride(1);
batch_dim->set_padding_low(0);
batch_dim->set_padding_high(0);
batch_dim->set_window_dilation(1);
batch_dim->set_base_dilation(1);
for (int64_t i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(2);
dim->set_stride(2);
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
}
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 0));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto* reduce_window =
builder.AddInstruction(HloInstruction::CreateReduceWindow(
output_shape, a_param, init, window, GetAdd()));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(reduce_window, {}, 0), size_param);
}
TEST_F(DynamicDimensionInferenceTest, SelectAndScatterTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});
auto source_shape = ShapeUtil::MakeShape(F32, {2, 2, 2});
auto input_shape_dynamic =
ShapeUtil::MakeShape(F32, {2, 4, 4}, {true, false, false});
auto source_shape_dynamic =
ShapeUtil::MakeShape(F32, {2, 2, 2}, {true, false, false});
Window window;
WindowDimension* batch_dim = window.add_dimensions();
batch_dim->set_size(1);
batch_dim->set_stride(1);
batch_dim->set_padding_low(0);
batch_dim->set_padding_high(0);
batch_dim->set_window_dilation(1);
batch_dim->set_base_dilation(1);
for (int64_t i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(2);
dim->set_stride(2);
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
}
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
auto* source = builder.AddInstruction(HloInstruction::CreateParameter(
2, source_shape, "B"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
input_shape_dynamic, a_param, size_param, 0));
source = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
source_shape_dynamic, source, size_param, 0));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto* sns = builder.AddInstruction(HloInstruction::CreateSelectAndScatter(
input_shape_dynamic, a_param, GetGe(), window, source, init, GetAdd()));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(sns, {}, 0), size_param);
}
TEST_F(DynamicDimensionInferenceTest, ConcatTest) {
auto builder = HloComputation::Builder(TestName());
auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param_1"));
auto data_param_2 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {5, 8}), "data_param_2"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(2, scalar_shape_, "size_param"));
data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {5, 7}, {true, false}), data_param, size_param,
0));
data_param_2 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {5, 8}, {true, false}), data_param_2,
size_param, 0));
auto* concat = builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(F32, {5, 15}, {true, false}),
{data_param, data_param_2}, 1));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(concat, {}, 0), size_param);
}
TEST_F(DynamicDimensionInferenceTest, SliceTest) {
auto builder = HloComputation::Builder(TestName());
auto dynamic_shape = ShapeUtil::MakeShape(F32, {5, 7}, {false, true});
auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 1));
auto* slice = builder.AddInstruction(HloInstruction::CreateSlice(
dynamic_shape, data_param,
{0, 0},
{5, 7}, {1, 1}));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(slice, {}, 1), size_param);
}
TEST_F(DynamicDimensionInferenceTest, DynamicSliceTest) {
auto builder = HloComputation::Builder(TestName());
auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
std::vector<HloInstruction*> params;
for (int i = 0; i < 2; ++i) {
params.push_back(builder.AddInstruction(HloInstruction::CreateParameter(
i + 2, ShapeUtil::MakeShape(S32, {}), "slice_indices")));
}
data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {5, 7}, {true, false}), data_param, size_param,
0));
auto* slice = builder.AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(F32, {5, 1}, {true, false}), data_param, params,
{5, 1}));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(slice, {}, 0), size_param);
}
TEST_F(DynamicDimensionInferenceTest, SortTest) {
auto builder = HloComputation::Builder(TestName());
auto dynamic_shape = ShapeUtil::MakeShape(F32, {5, 7}, {true, false});
auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
auto compare_builder = HloComputation::Builder("condition");
compare_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "param1"));
compare_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "param2"));
compare_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* compare =
module_->AddEmbeddedComputation(compare_builder.Build());
data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 0));
auto* sort = builder.AddInstruction(
HloInstruction::CreateSort(dynamic_shape, 1, {data_param}, compare,
false));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(sort, {}, 0), size_param);
}
TEST_F(DynamicDimensionInferenceTest, MultiValueSortTest) {
auto builder = HloComputation::Builder(TestName());
auto shape = ShapeUtil::MakeShape(F32, {5, 7});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {5, 7}, {true, false});
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "data_param"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
auto compare_builder = HloComputation::Builder("condition");
compare_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "param1"));
compare_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "param2"));
compare_builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeShape(F32, {}), "param3"));
compare_builder.AddInstruction(HloInstruction::CreateParameter(
3, ShapeUtil::MakeShape(F32, {}), "param4"));
compare_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* compare =
module_->AddEmbeddedComputation(compare_builder.Build());
data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 0));
auto* sort = builder.AddInstruction(HloInstruction::CreateSort(
ShapeUtil::MakeTupleShape({dynamic_shape, dynamic_shape}), 1,
{data_param, data_param}, compare,
false));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(sort, {0}, 0), size_param);
EXPECT_EQ(inference_->GetDynamicSize(sort, {1}, 0), size_param);
}
TEST_F(DynamicDimensionInferenceTest, DynamicSliceSingleElementTest) {
auto builder = HloComputation::Builder(TestName());
auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param"));
auto* size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
std::vector<HloInstruction*> params;
for (int i = 0; i < 2; ++i) {
params.push_back(builder.AddInstruction(HloInstruction::CreateParameter(
i + 2, ShapeUtil::MakeShape(S32, {}), "slice_indices")));
}
data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {5, 7}, {true, false}), data_param, size_param,
0));
auto* slice = builder.AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(F32, {1, 1}), data_param, params,
{1, 1}));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(slice, {}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, InfersCustomOp) {
auto builder = HloComputation::Builder(TestName());
auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param"));
auto* size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {5, 7}, {true, false}), data_param, size_param,
0));
builder.AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeShape(F32, {1, 1}), {data_param}, "MyCustomOp", ""));
module_->AddEntryComputation(builder.Build());
bool handler_called = false;
auto handler = [&](HloInstruction* hlo,
DynamicDimensionInference* inference) {
CHECK(inference != nullptr);
CHECK(Cast<HloCustomCallInstruction>(hlo) != nullptr);
handler_called = true;
return absl::OkStatus();
};
TF_ASSERT_OK(RunInference(nullptr, handler));
EXPECT_TRUE(handler_called);
}
TEST_F(DynamicDimensionInferenceTest, DynamicReshapeOp) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {9}), "data_input"));
auto six = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(6)));
auto dynamic_input =
builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {9}, {true}), input, six, 0));
auto dynamic_size = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(S32, {}), "size_param"));
auto three = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(3)));
auto dynamic_reshape =
builder.AddInstruction(HloInstruction::CreateDynamicReshape(
ShapeUtil::MakeShape(F32, {3, 3}, {false, true}), dynamic_input,
{three, dynamic_size}));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 0), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 1), dynamic_size);
}
TEST_F(DynamicDimensionInferenceTest, ReshapeOpWithMultipleDynamicDimensions) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {9, 2}), "data_input"));
auto six = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(6)));
input = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {9, 2}, {true, false}), input, six, 0));
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
input = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {9, 2}, {true, true}), input, one, 1));
auto dynamic_reshape = builder.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {9, 1, 2}, {true, false, true}), input));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 0), six);
EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 1), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 2), one);
}
TEST_F(DynamicDimensionInferenceTest, HandleMapInDynamicDimensionInference) {
const char* module_str = R"(
HloModule test_module
%scatter-combiner.285 (p0.286: c128[], p1.287: c128[]) -> c128[] {
%p0.286 = c128[] parameter(0)
%p1.287 = c128[] parameter(1)
ROOT %add.288 = c128[] add(c128[] %p0.286, c128[] %p1.287)
}
%while_body {
%reshape.8 = s32[] parameter(4)
%reshape.7 = c128[1]{0} parameter(3)
%reduce = pred[] parameter(2)
%concatenate = s32[1]{0} parameter(1)
%slice.4 = s32[1]{0} slice(s32[1]{0} %concatenate), slice={[0 : 1]}
%broadcast.7 = pred[1]{0} broadcast(pred[] %reduce), dimensions={}
%param.1 = (s32[],c128[<=1]{0},s32[1]{0},c128[1]{0}) parameter(0)
%get-tuple-element.2 = c128[<=1]{0} get-tuple-element((s32[],c128[<=1]{0},s32[1]{0},c128[1]{0}) %param.1), index=1
%dynamic-slice.2 = c128[1]{0} dynamic-slice(c128[<=1]{0} %get-tuple-element.2,s32[] %reshape.8), dynamic_slice_sizes={1}
%map = c128[1]{0} map(c128[1]{0} %dynamic-slice.2,c128[1]{0} %reshape.7), dimensions={0}, to_apply=%scatter-combiner.285
%select = c128[1]{0} select(pred[1]{0} %broadcast.7,c128[1]{0} %map,c128[1]{0} %dynamic-slice.2)
%reshape.9 = s32[] reshape(s32[1]{0} %slice.4)
%dynamic-update-slice = c128[<=1]{0} dynamic-update-slice(c128[<=1]{0} %get-tuple-element.2,c128[1]{0} %select,s32[] %reshape.9)
})";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnUnverifiedModule(module_str));
TF_ASSERT_OK(RunInference());
}
TEST_F(DynamicDimensionInferenceTest, RuntimeShapeCheck) {
const char* hlo = R"(
HloModule module
ENTRY computation {
a = f32[20,20] parameter(0)
a_size_1 = s32[] parameter(1)
a_size_2 = s32[] parameter(2)
a_dynamic_1 = f32[<=20,20] set-dimension-size(a, a_size_1), dimensions={0}
a_dynamic_2 = f32[<=20,<=20] set-dimension-size(a_dynamic_1, a_size_2), dimensions={1}
b = f32[20,20] parameter(3)
b_size_1 = s32[] parameter(4)
b_size_2 = s32[] parameter(5)
b_dynamic_1 = f32[<=20,20] set-dimension-size(b, b_size_1), dimensions={0}
b_dynamic_2 = f32[<=20,<=20] set-dimension-size(b_dynamic_1, b_size_2), dimensions={1}
ROOT f = add(a_dynamic_2, b_dynamic_2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo));
TF_ASSERT_OK(RunInference(
nullptr,
nullptr, DynamicDimensionInference::ShapeCheckMode::kRuntime,
[&](HloInstruction* constraint) {
constraint->parent()->AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeTokenShape(), {constraint},
"__xla__assert",
std::string{}, API_VERSION_STATUS_RETURNING));
}));
absl::StatusOr<bool> filecheck_result = RunFileCheck(module_->ToString({}),
R"(
)");
TF_ASSERT_OK(filecheck_result.status());
EXPECT_TRUE(*filecheck_result);
}
TEST_F(DynamicDimensionInferenceTest, NestedControlFlow) {
const char* hlo = R"(
HloModule tfcompile.377, entry_computation_layout={(s32[], f32[250]{0}, pred[], pred[], s32[], pred[], s32[], pred[])->(f32[3]{0})}
cond_2_Sum-reduction.17 {
x.18 = f32[] parameter(0)
y.19 = f32[] parameter(1)
ROOT add.20 = f32[] add(x.18, y.19)
}
cond_2_cond_true_214__.21 {
arg_tuple.22 = () parameter(0)
constant.23 = s32[] constant(1)
reshape.24 = s32[] reshape(constant.23)
ROOT tuple.25 = (s32[]) tuple(constant.23)
}
cond_2_cond_false_215__.26 {
arg_tuple.27 = () parameter(0)
constant.28 = s32[] constant(0)
reshape.29 = s32[] reshape(constant.28)
ROOT tuple.30 = (s32[]) tuple(constant.28)
}
cond_2_true_195__.31 {
arg_tuple.32 = (s32[], f32[250]{0}) parameter(0)
get-tuple-element.33 = s32[] get-tuple-element(arg_tuple.32), index=0
constant.35 = s32[] constant(20)
minimum.36 = s32[] minimum(get-tuple-element.33, constant.35)
reshape.37 = s32[1]{0} reshape(minimum.36)
concatenate.38 = s32[1]{0} concatenate(reshape.37), dimensions={0}
slice.48 = s32[1]{0} slice(concatenate.38), slice={[0:1]}
reshape.49 = s32[] reshape(reshape.37)
constant.43 = s32[] constant(0)
compare.50 = pred[] compare(minimum.36, constant.43), direction=LT
constant.44 = s32[] constant(250)
add.51 = s32[] add(constant.44, minimum.36)
select.52 = s32[] select(compare.50, add.51, minimum.36)
constant.45 = s32[1]{0} constant({0})
slice.46 = s32[1]{0} slice(constant.45), slice={[0:1]}
reshape.47 = s32[] reshape(slice.46)
subtract.53 = s32[] subtract(select.52, reshape.47)
maximum.54 = s32[] maximum(subtract.53, constant.43)
convert.55 = s32[] convert(maximum.54)
get-tuple-element.34 = f32[250]{0} get-tuple-element(arg_tuple.32), index=1
constant.39 = f32[] constant(0)
pad.40 = f32[500]{0} pad(get-tuple-element.34, constant.39), padding=0_250
constant.41 = s32[] constant(500)
set-dimension-size.42 = f32[500]{0} set-dimension-size(pad.40, constant.41), dimensions={0}
dynamic-slice.56 = f32[250]{0} dynamic-slice(set-dimension-size.42, reshape.47), dynamic_slice_sizes={250}
reshape.57 = f32[250]{0} reshape(dynamic-slice.56)
set-dimension-size.58 = f32[<=250]{0} set-dimension-size(dynamic-slice.56, maximum.54), dimensions={0}
constant.59 = f32[] constant(1)
broadcast.60 = f32[250]{0} broadcast(constant.59), dimensions={}
compare.61 = pred[<=250]{0} compare(set-dimension-size.58, broadcast.60), direction=GE
convert.62 = f32[<=250]{0} convert(compare.61)
convert.63 = f32[<=250]{0} convert(convert.62)
constant.64 = f32[] constant(0)
convert.65 = f32[] convert(constant.64)
reduce.66 = f32[] reduce(convert.62, constant.64), dimensions={0}, to_apply=cond_2_Sum-reduction.17
convert.67 = f32[] convert(reduce.66)
reshape.73 = f32[] reshape(reduce.66)
constant.68 = f32[] constant(6)
compare.69 = pred[] compare(reduce.66, constant.68), direction=GE
tuple.70 = () tuple()
conditional.71 = (s32[]) conditional(compare.69, tuple.70, tuple.70), true_computation=cond_2_cond_true_214__.21, false_computation=cond_2_cond_false_215__.26
get-tuple-element.72 = s32[] get-tuple-element(conditional.71), index=0
reshape.74 = s32[] reshape(get-tuple-element.72)
ROOT tuple.75 = (f32[], s32[]) tuple(reduce.66, get-tuple-element.72)
}
cond_2_false_196__.76 {
arg_tuple.77 = (s32[], f32[250]{0}) parameter(0)
constant.80 = f32[] constant(0)
reshape.82 = f32[] reshape(constant.80)
constant.81 = s32[] constant(0)
reshape.83 = s32[] reshape(constant.81)
ROOT tuple.84 = (f32[], s32[]) tuple(constant.80, constant.81)
}
cond_true_10__.85 {
arg_tuple.86 = (pred[], pred[], pred[]) parameter(0)
get-tuple-element.87 = pred[] get-tuple-element(arg_tuple.86), index=0
reshape.90 = pred[] reshape(get-tuple-element.87)
ROOT tuple.91 = (pred[]) tuple(get-tuple-element.87)
}
cond_cond_true_16__.92 {
arg_tuple.93 = (pred[], pred[]) parameter(0)
get-tuple-element.94 = pred[] get-tuple-element(arg_tuple.93), index=0
reshape.96 = pred[] reshape(get-tuple-element.94)
ROOT tuple.97 = (pred[]) tuple(get-tuple-element.94)
}
cond_cond_false_17__.98 {
arg_tuple.99 = (pred[], pred[]) parameter(0)
get-tuple-element.101 = pred[] get-tuple-element(arg_tuple.99), index=1
reshape.102 = pred[] reshape(get-tuple-element.101)
ROOT tuple.103 = (pred[]) tuple(get-tuple-element.101)
}
cond_false_11__.104 {
arg_tuple.105 = (pred[], pred[], pred[]) parameter(0)
get-tuple-element.107 = pred[] get-tuple-element(arg_tuple.105), index=1
get-tuple-element.108 = pred[] get-tuple-element(arg_tuple.105), index=2
tuple.109 = (pred[], pred[]) tuple(get-tuple-element.107, get-tuple-element.108)
conditional.110 = (pred[]) conditional(get-tuple-element.107, tuple.109, tuple.109), true_computation=cond_cond_true_16__.92, false_computation=cond_cond_false_17__.98
get-tuple-element.111 = pred[] get-tuple-element(conditional.110), index=0
reshape.112 = pred[] reshape(get-tuple-element.111)
ROOT tuple.113 = (pred[]) tuple(get-tuple-element.111)
}
cond_1_map_while_cond_true_82__.114 {
arg_tuple.115 = (f32[]) parameter(0)
constant.117 = f32[] constant(0)
reshape.118 = f32[] reshape(constant.117)
ROOT tuple.119 = (f32[]) tuple(constant.117)
}
cond_1_map_while_cond_cond_true_91__.120 {
constant.123 = f32[] constant(0.1)
arg_tuple.121 = (f32[]) parameter(0)
get-tuple-element.122 = f32[] get-tuple-element(arg_tuple.121), index=0
multiply.124 = f32[] multiply(constant.123, get-tuple-element.122)
constant.125 = f32[] constant(0)
add.126 = f32[] add(multiply.124, constant.125)
constant.127 = f32[] constant(0.9)
divide.128 = f32[] divide(add.126, constant.127)
reshape.129 = f32[] reshape(divide.128)
ROOT tuple.130 = (f32[]) tuple(divide.128)
}
cond_1_map_while_cond_cond_cond_true_106__.131 {
constant.134 = f32[] constant(0.8)
arg_tuple.132 = (f32[]) parameter(0)
get-tuple-element.133 = f32[] get-tuple-element(arg_tuple.132), index=0
multiply.135 = f32[] multiply(constant.134, get-tuple-element.133)
constant.136 = f32[] constant(-0.711)
add.137 = f32[] add(multiply.135, constant.136)
constant.138 = f32[] constant(0.09)
divide.139 = f32[] divide(add.137, constant.138)
reshape.140 = f32[] reshape(divide.139)
ROOT tuple.141 = (f32[]) tuple(divide.139)
}
cond_1_map_while_cond_cond_cond_cond_true_121__.142 {
constant.145 = f32[] constant(0.2)
arg_tuple.143 = (f32[]) parameter(0)
get-tuple-element.144 = f32[] get-tuple-element(arg_tuple.143), index=0
multiply.146 = f32[] multiply(constant.145, get-tuple-element.144)
constant.147 = f32[] constant(-0.18)
add.148 = f32[] add(multiply.146, constant.147)
constant.149 = f32[] constant(0.02)
divide.150 = f32[] divide(add.148, constant.149)
reshape.151 = f32[] reshape(divide.150)
ROOT tuple.152 = (f32[]) tuple(divide.150)
}
cond_1_map_while_cond_cond_cond_cond_cond_true_136__.153 {
constant.156 = f32[] constant(0.1)
arg_tuple.154 = (f32[]) parameter(0)
get-tuple-element.155 = f32[] get-tuple-element(arg_tuple.154), index=0
multiply.157 = f32[] multiply(constant.156, get-tuple-element.155)
constant.158 = f32[] constant(108.788)
add.159 = f32[] add(multiply.157, constant.158)
constant.160 = f32[] constant(98.99)
divide.161 = f32[] divide(add.159, constant.160)
reshape.162 = f32[] reshape(divide.161)
ROOT tuple.163 = (f32[]) tuple(divide.161)
}
cond_1_map_while_cond_cond_cond_cond_cond_false_137__.164 {
arg_tuple.165 = (f32[]) parameter(0)
constant.167 = f32[] constant(1.2)
reshape.168 = f32[] reshape(constant.167)
ROOT tuple.169 = (f32[]) tuple(constant.167)
}
cond_1_map_while_cond_cond_cond_cond_false_122__.170 {
arg_tuple.171 = (f32[]) parameter(0)
get-tuple-element.172 = f32[] get-tuple-element(arg_tuple.171), index=0
constant.173 = f32[] constant(100)
compare.174 = pred[] compare(get-tuple-element.172, constant.173), direction=LE
tuple.175 = (f32[]) tuple(get-tuple-element.172)
conditional.176 = (f32[]) conditional(compare.174, tuple.175, tuple.175), true_computation=cond_1_map_while_cond_cond_cond_cond_cond_true_136__.153, false_computation=cond_1_map_while_cond_cond_cond_cond_cond_false_137__.164
get-tuple-element.177 = f32[] get-tuple-element(conditional.176), index=0
reshape.178 = f32[] reshape(get-tuple-element.177)
ROOT tuple.179 = (f32[]) tuple(get-tuple-element.177)
}
cond_1_map_while_cond_cond_cond_false_107__.180 {
arg_tuple.181 = (f32[]) parameter(0)
get-tuple-element.182 = f32[] get-tuple-element(arg_tuple.181), index=0
constant.183 = f32[] constant(1.01)
compare.184 = pred[] compare(get-tuple-element.182, constant.183), direction=LE
tuple.185 = (f32[]) tuple(get-tuple-element.182)
conditional.186 = (f32[]) conditional(compare.184, tuple.185, tuple.185), true_computation=cond_1_map_while_cond_cond_cond_cond_true_121__.142, false_computation=cond_1_map_while_cond_cond_cond_cond_false_122__.170
get-tuple-element.187 = f32[] get-tuple-element(conditional.186), index=0
reshape.188 = f32[] reshape(get-tuple-element.187)
ROOT tuple.189 = (f32[]) tuple(get-tuple-element.187)
}
cond_1_map_while_cond_cond_false_92__.190 {
arg_tuple.191 = (f32[]) parameter(0)
get-tuple-element.192 = f32[] get-tuple-element(arg_tuple.191), index=0
constant.193 = f32[] constant(0.99)
compare.194 = pred[] compare(get-tuple-element.192, constant.193), direction=LE
tuple.195 = (f32[]) tuple(get-tuple-element.192)
conditional.196 = (f32[]) conditional(compare.194, tuple.195, tuple.195), true_computation=cond_1_map_while_cond_cond_cond_true_106__.131, false_computation=cond_1_map_while_cond_cond_cond_false_107__.180
get-tuple-element.197 = f32[] get-tuple-element(conditional.196), index=0
reshape.198 = f32[] reshape(get-tuple-element.197)
ROOT tuple.199 = (f32[]) tuple(get-tuple-element.197)
}
cond_1_map_while_cond_false_83__.200 {
arg_tuple.201 = (f32[]) parameter(0)
get-tuple-element.202 = f32[] get-tuple-element(arg_tuple.201), index=0
constant.203 = f32[] constant(0.9)
compare.204 = pred[] compare(get-tuple-element.202, constant.203), direction=LE
tuple.205 = (f32[]) tuple(get-tuple-element.202)
conditional.206 = (f32[]) conditional(compare.204, tuple.205, tuple.205), true_computation=cond_1_map_while_cond_cond_true_91__.120, false_computation=cond_1_map_while_cond_cond_false_92__.190
get-tuple-element.207 = f32[] get-tuple-element(conditional.206), index=0
reshape.208 = f32[] reshape(get-tuple-element.207)
ROOT tuple.209 = (f32[]) tuple(get-tuple-element.207)
}
cond_1_map_while_body_59__.210 {
arg_tuple.211 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) parameter(0)
get-tuple-element.212 = s32[] get-tuple-element(arg_tuple.211), index=0
constant.218 = s32[] constant(1)
add.219 = s32[] add(get-tuple-element.212, constant.218)
reshape.239 = s32[] reshape(add.219)
get-tuple-element.213 = s32[] get-tuple-element(arg_tuple.211), index=1
reshape.240 = s32[] reshape(get-tuple-element.213)
get-tuple-element.214 = s32[] get-tuple-element(arg_tuple.211), index=2
constant.220 = s32[] constant(1)
add.221 = s32[] add(get-tuple-element.214, constant.220)
reshape.241 = s32[] reshape(add.221)
get-tuple-element.216 = s32[] get-tuple-element(arg_tuple.211), index=4
reshape.242 = s32[] reshape(get-tuple-element.216)
get-tuple-element.215 = (f32[<=250]{0}, s32[]) get-tuple-element(arg_tuple.211), index=3
get-tuple-element.235 = f32[<=250]{0} get-tuple-element(get-tuple-element.215), index=0
get-tuple-element.217 = (f32[<=250]{0}, s32[]) get-tuple-element(arg_tuple.211), index=5
get-tuple-element.223 = f32[<=250]{0} get-tuple-element(get-tuple-element.217), index=0
dynamic-slice.224 = f32[1]{0} dynamic-slice(get-tuple-element.223, get-tuple-element.214), dynamic_slice_sizes={1}
reshape.225 = f32[] reshape(dynamic-slice.224)
constant.226 = f32[] constant(0)
compare.227 = pred[] compare(reshape.225, constant.226), direction=LE
tuple.228 = (f32[]) tuple(reshape.225)
conditional.229 = (f32[]) conditional(compare.227, tuple.228, tuple.228), true_computation=cond_1_map_while_cond_true_82__.114, false_computation=cond_1_map_while_cond_false_83__.200
get-tuple-element.230 = f32[] get-tuple-element(conditional.229), index=0
reshape.233 = f32[1]{0} reshape(get-tuple-element.230)
dynamic-update-slice.236 = f32[<=250]{0} dynamic-update-slice(get-tuple-element.235, reshape.233, get-tuple-element.214)
get-tuple-element.237 = s32[] get-tuple-element(get-tuple-element.215), index=1
tuple.238 = (f32[<=250]{0}, s32[]) tuple(dynamic-update-slice.236, get-tuple-element.237)
ROOT tuple.243 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) tuple(add.219, get-tuple-element.213, add.221, tuple.238, get-tuple-element.216, get-tuple-element.217)
}
cond_wrapper.257 {
inputs.258 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) parameter(0)
get-tuple-element.0 = s32[] get-tuple-element(inputs.258), index=0
get-tuple-element.1 = s32[] get-tuple-element(inputs.258), index=1
compare.0 = pred[] compare(get-tuple-element.0, get-tuple-element.1), direction=LT
get-tuple-element.2 = s32[] get-tuple-element(inputs.258), index=2
get-tuple-element.3 = s32[] get-tuple-element(inputs.258), index=4
compare.1 = pred[] compare(get-tuple-element.2, get-tuple-element.3), direction=LT
and.0 = pred[] and(compare.0, compare.1)
tuple.0 = (pred[]) tuple(and.0)
ROOT get-tuple-element.260 = pred[] get-tuple-element(tuple.0), index=0
reshape.0 = pred[] reshape(and.0)
}
cond_1_Sum-reduction.261 {
x.262 = f32[] parameter(0)
y.263 = f32[] parameter(1)
ROOT add.264 = f32[] add(x.262, y.263)
}
cond_1_true_36__.265 {
arg_tuple.266 = (s32[], f32[250]{0}) parameter(0)
get-tuple-element.267 = s32[] get-tuple-element(arg_tuple.266), index=0
reshape.269 = s32[1]{0} reshape(get-tuple-element.267)
concatenate.270 = s32[1]{0} concatenate(reshape.269), dimensions={0}
slice.280 = s32[1]{0} slice(concatenate.270), slice={[0:1]}
reshape.281 = s32[] reshape(reshape.269)
constant.275 = s32[] constant(0)
compare.282 = pred[] compare(get-tuple-element.267, constant.275), direction=LT
constant.276 = s32[] constant(250)
add.283 = s32[] add(constant.276, get-tuple-element.267)
select.284 = s32[] select(compare.282, add.283, get-tuple-element.267)
constant.277 = s32[1]{0} constant({0})
slice.278 = s32[1]{0} slice(constant.277), slice={[0:1]}
reshape.279 = s32[] reshape(slice.278)
subtract.285 = s32[] subtract(select.284, reshape.279)
maximum.286 = s32[] maximum(subtract.285, constant.275)
convert.287 = s32[] convert(maximum.286)
get-tuple-element.268 = f32[250]{0} get-tuple-element(arg_tuple.266), index=1
constant.271 = f32[] constant(0)
pad.272 = f32[500]{0} pad(get-tuple-element.268, constant.271), padding=0_250
constant.273 = s32[] constant(500)
set-dimension-size.274 = f32[500]{0} set-dimension-size(pad.272, constant.273), dimensions={0}
dynamic-slice.288 = f32[250]{0} dynamic-slice(set-dimension-size.274, reshape.279), dynamic_slice_sizes={250}
reshape.289 = f32[250]{0} reshape(dynamic-slice.288)
set-dimension-size.290 = f32[<=250]{0} set-dimension-size(dynamic-slice.288, maximum.286), dimensions={0}
get-dimension-size.291 = s32[] get-dimension-size(set-dimension-size.290), dimensions={0}
convert.292 = s32[] convert(get-dimension-size.291)
broadcast.293 = s32[1]{0} broadcast(get-dimension-size.291), dimensions={}
concatenate.294 = s32[1]{0} concatenate(broadcast.293), dimensions={0}
slice.295 = s32[1]{0} slice(concatenate.294), slice={[0:1]}
reshape.296 = s32[] reshape(broadcast.293)
constant.309 = s32[] constant(0)
constant.310 = s32[] constant(0)
constant.312 = f32[] constant(0)
broadcast.313 = f32[250]{0} broadcast(constant.312), dimensions={}
constant.302 = s32[] constant(0)
broadcast.303 = s32[250]{0} broadcast(constant.302), dimensions={}
set-dimension-size.304 = s32[<=250]{0} set-dimension-size(broadcast.303, get-dimension-size.291), dimensions={0}
get-dimension-size.311 = s32[] get-dimension-size(set-dimension-size.304), dimensions={0}
set-dimension-size.314 = f32[<=250]{0} set-dimension-size(broadcast.313, get-dimension-size.311), dimensions={0}
constant.315 = s32[] constant(0)
tuple.316 = (f32[<=250]{0}, s32[]) tuple(set-dimension-size.314, constant.315)
constant.305 = s32[] constant(250)
tuple.306 = (f32[<=250]{0}, s32[]) tuple(set-dimension-size.290, constant.305)
tuple.317 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) tuple(constant.309, get-dimension-size.291, constant.310, tuple.316, get-dimension-size.291, tuple.306)
while.318 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) while(tuple.317), condition=cond_wrapper.257, body=cond_1_map_while_body_59__.210
get-tuple-element.319 = s32[] get-tuple-element(while.318), index=0
get-tuple-element.320 = s32[] get-tuple-element(while.318), index=1
get-tuple-element.321 = s32[] get-tuple-element(while.318), index=2
get-tuple-element.322 = (f32[<=250]{0}, s32[]) get-tuple-element(while.318), index=3
get-tuple-element.323 = s32[] get-tuple-element(while.318), index=4
get-tuple-element.324 = (f32[<=250]{0}, s32[]) get-tuple-element(while.318), index=5
tuple.325 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) tuple(get-tuple-element.319, get-tuple-element.320, get-tuple-element.321, get-tuple-element.322, get-tuple-element.323, get-tuple-element.324)
get-tuple-element.329 = (f32[<=250]{0}, s32[]) get-tuple-element(tuple.325), index=3
get-tuple-element.332 = f32[<=250]{0} get-tuple-element(get-tuple-element.329), index=0
convert.333 = f32[<=250]{0} convert(get-tuple-element.332)
constant.334 = f32[] constant(0)
convert.335 = f32[] convert(constant.334)
reduce.336 = f32[] reduce(get-tuple-element.332, constant.334), dimensions={0}, to_apply=cond_1_Sum-reduction.261
convert.337 = f32[] convert(reduce.336)
reshape.338 = f32[] reshape(reduce.336)
ROOT tuple.339 = (f32[]) tuple(reduce.336)
}
cond_1_false_37__.340 {
arg_tuple.341 = (s32[], f32[250]{0}) parameter(0)
constant.344 = f32[] constant(0)
reshape.345 = f32[] reshape(constant.344)
ROOT tuple.346 = (f32[]) tuple(constant.344)
}
ENTRY tfcompile.377 {
arg6.7 = s32[] parameter(6), parameter_replication={false}
arg0.1 = s32[] parameter(0), parameter_replication={false}
reshape.9 = s32[] reshape(arg0.1)
arg1.2 = f32[250]{0} parameter(1), parameter_replication={false}
reshape.10 = f32[250]{0} reshape(arg1.2)
arg2.3 = pred[] parameter(2), parameter_replication={false}
reshape.11 = pred[] reshape(arg2.3)
arg3.4 = pred[] parameter(3), parameter_replication={false}
reshape.12 = pred[] reshape(arg3.4)
arg4.5 = s32[] parameter(4), parameter_replication={false}
reshape.13 = s32[] reshape(arg4.5)
arg5.6 = pred[] parameter(5), parameter_replication={false}
reshape.14 = pred[] reshape(arg5.6)
arg7.8 = pred[] parameter(7), parameter_replication={false}
reshape.16 = pred[] reshape(arg7.8)
tuple.1 = (s32[], f32[250]{0}) tuple(arg0.1, arg1.2)
conditional.0 = (f32[], s32[]) conditional(arg2.3, tuple.1, tuple.1), true_computation=cond_2_true_195__.31, false_computation=cond_2_false_196__.76
get-tuple-element.4 = f32[] get-tuple-element(conditional.0), index=0
reshape.1 = f32[1]{0} reshape(get-tuple-element.4)
get-tuple-element.5 = s32[] get-tuple-element(conditional.0), index=1
convert.0 = f32[] convert(get-tuple-element.5)
reshape.2 = f32[1]{0} reshape(convert.0)
tuple.2 = (pred[], pred[], pred[]) tuple(arg3.4, arg5.6, arg7.8)
conditional.1 = (pred[]) conditional(arg3.4, tuple.2, tuple.2), true_computation=cond_true_10__.85, false_computation=cond_false_11__.104
get-tuple-element.6 = pred[] get-tuple-element(conditional.1), index=0
tuple.3 = (s32[], f32[250]{0}) tuple(arg4.5, arg1.2)
conditional.2 = (f32[]) conditional(get-tuple-element.6, tuple.3, tuple.3), true_computation=cond_1_true_36__.265, false_computation=cond_1_false_37__.340
get-tuple-element.7 = f32[] get-tuple-element(conditional.2), index=0
reshape.3 = f32[1]{0} reshape(get-tuple-element.7)
concatenate.0 = f32[3]{0} concatenate(reshape.1, reshape.2, reshape.3), dimensions={0}
tuple.4 = (f32[3]{0}) tuple(concatenate.0)
get-tuple-element.374 = f32[3]{0} get-tuple-element(tuple.4), index=0
reshape.375 = f32[3]{0} reshape(get-tuple-element.374)
ROOT tuple.376 = (f32[3]{0}) tuple(get-tuple-element.374)
reshape.4 = f32[3]{0} reshape(concatenate.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo));
TF_ASSERT_OK(RunInference());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_dimension_inference.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_dimension_inference_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4576ccbd-f7dd-4e17-a657-fa9fd05c7865 | cpp | google/arolla | operator_package | arolla/codegen/operator_package/operator_package.cc | arolla/codegen/operator_package/operator_package_test.cc | #include "arolla/codegen/operator_package/operator_package.h"
#include <set>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "google/protobuf/io/gzip_stream.h"
#include "google/protobuf/io/zero_copy_stream_impl_lite.h"
#include "arolla/codegen/operator_package/operator_package.pb.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/serialization/decode.h"
#include "arolla/serialization/encode.h"
#include "arolla/serialization_codecs/generic/operator_codec.pb.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::operator_package {
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::ExprOperatorRegistry;
using ::arolla::expr::LookupOperator;
using ::arolla::serialization::Decode;
using ::arolla::serialization::Encode;
using ::arolla::serialization_codecs::OperatorV1Proto;
absl::Status ParseEmbeddedOperatorPackage(
absl::string_view embedded_zlib_data,
OperatorPackageProto* operator_package_proto) {
::google::protobuf::io::ArrayInputStream input_stream(embedded_zlib_data.data(),
embedded_zlib_data.size());
::google::protobuf::io::GzipInputStream gzip_input_stream(&input_stream);
if (!operator_package_proto->ParseFromZeroCopyStream(&gzip_input_stream) ||
gzip_input_stream.ZlibErrorMessage() != nullptr) {
return absl::InternalError("unable to parse an embedded operator package");
}
return absl::OkStatus();
}
absl::Status LoadOperatorPackageProto(
const OperatorPackageProto& operator_package_proto) {
if (operator_package_proto.version() != 1) {
return absl::InvalidArgumentError(
absl::StrFormat("expected operator_package_proto.version=1, got %d",
operator_package_proto.version()));
}
auto* const operator_registry = ExprOperatorRegistry::GetInstance();
auto check_registered_operator_presence = [&](absl::string_view name) {
return operator_registry->LookupOperatorOrNull(name) != nullptr;
};
std::set<absl::string_view> missing_operators;
for (absl::string_view operator_name :
operator_package_proto.required_registered_operators()) {
if (!check_registered_operator_presence(operator_name)) {
missing_operators.insert(operator_name);
}
}
if (!missing_operators.empty()) {
return absl::FailedPreconditionError(
"missing dependencies: M." + absl::StrJoin(missing_operators, ", M."));
}
std::set<absl::string_view> already_registered_operators;
for (const auto& operator_proto : operator_package_proto.operators()) {
if (check_registered_operator_presence(
operator_proto.registration_name())) {
already_registered_operators.insert(operator_proto.registration_name());
}
}
if (!already_registered_operators.empty()) {
return absl::FailedPreconditionError(
"already present in the registry: M." +
absl::StrJoin(already_registered_operators, ", M."));
}
for (int i = 0; i < operator_package_proto.operators_size(); ++i) {
const auto& operator_proto = operator_package_proto.operators(i);
ASSIGN_OR_RETURN(
auto decode_result, Decode(operator_proto.implementation()),
_ << "operators[" << i
<< "].registration_name=" << operator_proto.registration_name());
if (decode_result.values.size() != 1 || !decode_result.exprs.empty()) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected to get a value, got %d values and %d exprs; "
"operators[%d].registration_name=%s",
decode_result.values.size(), decode_result.exprs.size(), i,
operator_proto.registration_name()));
}
const auto& qvalue = decode_result.values[0];
if (qvalue.GetType() != GetQType<ExprOperatorPtr>()) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected to get %s, got %s; operators[%d].registration_name=%s",
GetQType<ExprOperatorPtr>()->name(), qvalue.GetType()->name(), i,
operator_proto.registration_name()));
}
RETURN_IF_ERROR(operator_registry
->Register(operator_proto.registration_name(),
qvalue.UnsafeAs<ExprOperatorPtr>())
.status());
}
return absl::OkStatus();
}
absl::StatusOr<OperatorPackageProto> DumpOperatorPackageProto(
absl::Span<const absl::string_view> operator_names) {
OperatorPackageProto result;
result.set_version(1);
std::set<absl::string_view> stored_operators;
for (const auto& op_name : operator_names) {
if (!stored_operators.emplace(op_name).second) {
return absl::InvalidArgumentError(
absl::StrFormat("operator `%s` is listed multiple times", op_name));
}
auto* op_proto = result.add_operators();
op_proto->set_registration_name(op_name.data(), op_name.size());
ASSIGN_OR_RETURN(const auto& op, LookupOperator(op_name));
ASSIGN_OR_RETURN(const auto& op_impl, op->GetImplementation());
ASSIGN_OR_RETURN(*op_proto->mutable_implementation(),
Encode({TypedValue::FromValue(op_impl)}, {}));
}
std::set<absl::string_view> required_registered_operators;
for (const auto& op_proto : result.operators()) {
if (required_registered_operators.count(op_proto.registration_name())) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected the operator names to be given in topological order, but "
"`%s` is listed after it was already required by other operator",
op_proto.registration_name()));
}
for (const auto& decoding_step :
op_proto.implementation().decoding_steps()) {
if (!decoding_step.has_value() ||
!decoding_step.value().HasExtension(OperatorV1Proto::extension)) {
continue;
}
const auto& op_v1_proto =
decoding_step.value().GetExtension(OperatorV1Proto::extension);
if (!op_v1_proto.has_registered_operator_name()) {
continue;
}
required_registered_operators.emplace(
op_v1_proto.registered_operator_name());
}
}
for (const auto& op_proto : result.operators()) {
required_registered_operators.erase(op_proto.registration_name());
}
for (const auto& op_name : required_registered_operators) {
result.add_required_registered_operators(op_name.data(), op_name.size());
}
return result;
}
} | #include "arolla/codegen/operator_package/operator_package.h"
#include <cstdint>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "arolla/codegen/operator_package/operator_package.pb.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/lambda_expr_operator.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/serialization/encode.h"
namespace arolla::operator_package {
namespace {
using ::absl_testing::StatusIs;
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::LookupOperator;
using ::arolla::expr::Placeholder;
using ::arolla::expr::RegisterOperator;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::SizeIs;
TEST(ParseEmbeddedOperatorPackageTest, TrivialOperatorPackage) {
OperatorPackageProto operator_package_proto;
ASSERT_OK(ParseEmbeddedOperatorPackage("x\x9c\xe3`\x04\x00\x00\x13\x00\n",
&operator_package_proto));
EXPECT_THAT(operator_package_proto.version(), 1);
}
TEST(ParseEmbeddedOperatorPackageTest, ZLibError) {
OperatorPackageProto operator_package_proto;
EXPECT_THAT(ParseEmbeddedOperatorPackage("abc", &operator_package_proto),
StatusIs(absl::StatusCode::kInternal,
"unable to parse an embedded operator package"));
}
TEST(ParseEmbeddedOperatorPackageTest, ProtoError) {
OperatorPackageProto operator_package_proto;
EXPECT_THAT(
ParseEmbeddedOperatorPackage("x\xda\xe3\x98\x06\x00\x00\xa8\x00\x9f",
&operator_package_proto),
StatusIs(absl::StatusCode::kInternal,
"unable to parse an embedded operator package"));
}
class LoadOperatorPackageProtoTest : public ::testing::Test {
protected:
template <typename Proto>
static absl::StatusOr<std::string> SerializeToString(const Proto& proto) {
if (std::string result; proto.SerializeToString(&result)) {
return result;
}
return absl::InvalidArgumentError("unable to serialize a proto message");
}
};
TEST(LoadOperatorPackageProtoTest, Registration) {
ASSERT_OK_AND_ASSIGN(ExprOperatorPtr op,
MakeLambdaOperator(Placeholder("x")));
OperatorPackageProto operator_package_proto;
operator_package_proto.set_version(1);
auto* operator_proto = operator_package_proto.add_operators();
operator_proto->set_registration_name("foo.bar.registration");
ASSERT_OK_AND_ASSIGN(*operator_proto->mutable_implementation(),
serialization::Encode({TypedValue::FromValue(op)}, {}));
EXPECT_OK(LoadOperatorPackageProto(operator_package_proto));
ASSERT_OK_AND_ASSIGN(auto reg_op, LookupOperator("foo.bar.registration"));
ASSERT_OK_AND_ASSIGN(auto op_impl, reg_op->GetImplementation());
ASSERT_NE(op_impl, nullptr);
EXPECT_EQ(op_impl->fingerprint(), op->fingerprint());
}
TEST(LoadOperatorPackageProtoTest, ErrorAlreadyRegistered) {
ASSERT_OK_AND_ASSIGN(ExprOperatorPtr op,
MakeLambdaOperator(Placeholder("x")));
OperatorPackageProto operator_package_proto;
operator_package_proto.set_version(1);
auto* operator_proto = operator_package_proto.add_operators();
operator_proto->set_registration_name("foo.bar.already_registered");
ASSERT_OK_AND_ASSIGN(*operator_proto->mutable_implementation(),
serialization::Encode({TypedValue::FromValue(op)}, {}));
EXPECT_OK(LoadOperatorPackageProto(operator_package_proto));
EXPECT_THAT(LoadOperatorPackageProto(operator_package_proto),
StatusIs(absl::StatusCode::kFailedPrecondition,
"already present in the registry: "
"M.foo.bar.already_registered"));
}
TEST(LoadOperatorPackageProtoTest, ErrorUnexpectedFormatVersion) {
OperatorPackageProto operator_package_proto;
EXPECT_THAT(LoadOperatorPackageProto(operator_package_proto),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected operator_package_proto.version=1, got 0"));
}
TEST(LoadOperatorPackageProtoTest, ErrorMissingDependency) {
OperatorPackageProto operator_package_proto;
operator_package_proto.set_version(1);
operator_package_proto.add_required_registered_operators("foo.bar");
operator_package_proto.add_required_registered_operators("far.boo");
EXPECT_THAT(LoadOperatorPackageProto(operator_package_proto),
StatusIs(absl::StatusCode::kFailedPrecondition,
"missing dependencies: M.far.boo, M.foo.bar"));
}
TEST(LoadOperatorPackageProtoTest, ErrorBrokenOperatorImplementation) {
OperatorPackageProto operator_package_proto;
operator_package_proto.set_version(1);
operator_package_proto.add_operators()->set_registration_name("foo.bar");
EXPECT_THAT(LoadOperatorPackageProto(operator_package_proto),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("; operators[0].registration_name=foo.bar")));
}
TEST(LoadOperatorPackageProtoTest, ErrorNoValueInOperatorImplementation) {
OperatorPackageProto operator_package_proto;
operator_package_proto.set_version(1);
auto* operator_proto = operator_package_proto.add_operators();
operator_proto->set_registration_name("foo.bar");
ASSERT_OK_AND_ASSIGN(*operator_proto->mutable_implementation(),
serialization::Encode({}, {}));
EXPECT_THAT(
LoadOperatorPackageProto(operator_package_proto),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected to get a value, got 0 values and 0 exprs; "
"operators[0].registration_name=foo.bar")));
}
TEST(LoadOperatorPackageProtoTest,
ErrorUnexpectedValueInOperatorImplementation) {
OperatorPackageProto operator_package_proto;
operator_package_proto.set_version(1);
auto* operator_proto = operator_package_proto.add_operators();
operator_proto->set_registration_name("foo.bar");
ASSERT_OK_AND_ASSIGN(
*operator_proto->mutable_implementation(),
serialization::Encode({TypedValue::FromValue<int64_t>(0)}, {}));
EXPECT_THAT(LoadOperatorPackageProto(operator_package_proto),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected to get EXPR_OPERATOR, got INT64; "
"operators[0].registration_name=foo.bar")));
}
TEST(DumpOperatorPackageProtoTest, Empty) {
ASSERT_OK_AND_ASSIGN(auto operator_package_proto,
DumpOperatorPackageProto({}));
EXPECT_EQ(operator_package_proto.version(), 1);
EXPECT_THAT(operator_package_proto.required_registered_operators(),
IsEmpty());
EXPECT_THAT(operator_package_proto.operators(), IsEmpty());
}
TEST(DumpOperatorPackageProtoTest, Basics) {
constexpr absl::string_view kOp1Name =
"dump_operator_package_test.basics.op1";
constexpr absl::string_view kOp2Name =
"dump_operator_package_test.basics.op2";
ASSERT_OK(RegisterOperator(kOp1Name, MakeLambdaOperator(Placeholder("x"))));
ASSERT_OK(RegisterOperator(
kOp2Name, MakeLambdaOperator(CallOp(kOp1Name, {Placeholder("x")}))));
{
ASSERT_OK_AND_ASSIGN(auto operator_package_proto,
DumpOperatorPackageProto({kOp1Name, kOp2Name}));
EXPECT_THAT(operator_package_proto.required_registered_operators(),
IsEmpty());
EXPECT_THAT(operator_package_proto.operators(), SizeIs(2));
}
{
ASSERT_OK_AND_ASSIGN(auto operator_package_proto,
DumpOperatorPackageProto({kOp1Name}));
EXPECT_THAT(operator_package_proto.required_registered_operators(),
IsEmpty());
EXPECT_THAT(operator_package_proto.operators(), SizeIs(1));
}
{
ASSERT_OK_AND_ASSIGN(auto operator_package_proto,
DumpOperatorPackageProto({kOp2Name}));
EXPECT_THAT(operator_package_proto.required_registered_operators(),
ElementsAre(kOp1Name));
EXPECT_THAT(operator_package_proto.operators(), SizeIs(1));
}
{
EXPECT_THAT(DumpOperatorPackageProto({kOp1Name, kOp1Name}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("listed multiple times")));
}
{
EXPECT_THAT(DumpOperatorPackageProto({kOp2Name, kOp1Name}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected the operator names to be given in "
"topological order")));
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/codegen/operator_package/operator_package.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/codegen/operator_package/operator_package_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
b4765acf-7156-479d-b560-41b2e7874fdd | cpp | tensorflow/tensorflow | tensor_array | tensorflow/lite/kernels/variants/tensor_array.cc | tensorflow/lite/kernels/variants/tensor_array_test.cc | #include "tensorflow/lite/kernels/variants/tensor_array.h"
#include <cstring>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace variants {
TensorArray::TensorArray(const TensorArray& other) {
TfLiteIntArray* copied_shape = TfLiteIntArrayCopy(other.element_shape_.get());
element_shape_ = IntArrayUniquePtr(copied_shape);
element_type_ = other.element_type_;
num_elements_ = other.num_elements_;
elements_ =
(RefCountedTensor*)malloc(sizeof(RefCountedTensor) * other.num_elements_);
other.AssignBuffer(elements_);
}
TensorArray& TensorArray::operator=(const TensorArray& other) {
TfLiteIntArray* copied_shape = TfLiteIntArrayCopy(other.element_shape_.get());
element_shape_ = IntArrayUniquePtr(copied_shape);
Resize(other.num_elements_);
Clear();
other.AssignBuffer(elements_);
return *this;
}
void TensorArray::Resize(int num_elements) {
if (num_elements == NumElements() || num_elements < 0) return;
if (num_elements > NumElements()) {
elements_ = (RefCountedTensor*)realloc(
elements_, num_elements * sizeof(RefCountedTensor));
for (int i = NumElements(); i < num_elements; ++i) {
elements_[i].count = nullptr;
elements_[i].tensor = nullptr;
}
} else {
for (int i = num_elements; i < NumElements(); ++i) {
Drop(i);
}
elements_ = (RefCountedTensor*)realloc(
elements_, num_elements * sizeof(RefCountedTensor));
}
num_elements_ = num_elements;
}
const TfLiteTensor* TensorArray::At(int index) const {
if (index < 0 || index >= NumElements()) {
return nullptr;
}
return elements_[index].tensor;
}
bool TensorArray::Set(int index, TensorUniquePtr tensor) {
if (index < 0 || index >= NumElements()) {
return false;
}
Drop(index);
int* c = (int*)malloc(sizeof(int));
*c = 1;
elements_[index].tensor = tensor.release();
elements_[index].count = c;
return true;
}
TensorArray::~TensorArray() {
Clear();
free(elements_);
elements_ = nullptr;
}
void TensorArray::Drop(int i) {
RefCountedTensor* t = elements_ + i;
int* count = t->count;
if (count == nullptr) {
return;
}
if (*count == 1) {
TfLiteTensorFree(t->tensor);
free(t->tensor);
free(t->count);
t->tensor = nullptr;
t->count = nullptr;
return;
}
(*count)--;
}
void TensorArray::Clear() {
for (int i = 0; i < num_elements_; ++i) {
Drop(i);
}
}
void TensorArray::AssignBuffer(RefCountedTensor* dst) const {
std::memcpy(dst, elements_, sizeof(RefCountedTensor) * num_elements_);
for (int i = 0; i < num_elements_; ++i) {
if (dst[i].count == nullptr) {
continue;
}
(*dst[i].count)++;
}
}
}
} | #include "tensorflow/lite/kernels/variants/tensor_array.h"
#include <memory>
#include <numeric>
#include <optional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/portable_type_to_tflitetype.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace variants {
namespace {
template <typename T>
TensorUniquePtr MakeTensorWithData(std::vector<int> dims,
const std::vector<T>& data) {
TensorUniquePtr tensor =
BuildTfLiteTensor(typeToTfLiteType<T>(), dims, kTfLiteDynamic);
const int num_elements =
std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<int>());
T* data_start = (T*)tensor->data.data;
for (int i = 0; i < num_elements; ++i) {
data_start[i] = data[i];
}
return tensor;
}
TensorArray MakeTensorArrayForTest(const std::vector<int>& dims) {
return TensorArray(kTfLiteInt32, BuildTfLiteArray(dims));
}
TEST(TensorArrayTest, InsertSingleElement) {
auto arr = MakeTensorArrayForTest({});
arr.Resize(2);
ASSERT_TRUE(arr.Set(0, MakeTensorWithData<int>({2}, {3, 4})));
const TfLiteTensor* added_tensor = arr.At(0);
ASSERT_TRUE(added_tensor != nullptr);
ASSERT_THAT(added_tensor, DimsAre({2}));
EXPECT_EQ(added_tensor->data.i32[0], 3);
EXPECT_EQ(added_tensor->data.i32[1], 4);
}
TEST(TensorArrayTest, ResizeToZero) {
auto arr = MakeTensorArrayForTest({});
arr.Resize(2);
EXPECT_EQ(arr.NumElements(), 2);
arr.Resize(0);
EXPECT_EQ(arr.NumElements(), 0);
}
TEST(TensorArrayTest, InsertOOB) {
auto arr = MakeTensorArrayForTest({});
TensorUniquePtr tensor = MakeTensorWithData<int>({2}, {3, 4});
arr.Resize(1);
ASSERT_FALSE(arr.Set(-1, std::move(tensor)));
EXPECT_FALSE(arr.At(0));
}
TEST(TensorArrayTest, InsertMultipleElements) {
auto arr = MakeTensorArrayForTest({});
arr.Resize(2);
EXPECT_TRUE(arr.Set(0, MakeTensorWithData<int>({2}, {3, 4})));
EXPECT_TRUE(arr.Set(1, MakeTensorWithData<int>({3}, {3, 4, 5})));
EXPECT_THAT(arr.At(0), DimsAre({2}));
EXPECT_THAT(arr.At(1), DimsAre({3}));
}
TEST(TensorArrayTest, InsertSameIndexTwiceDeletes) {
auto arr = MakeTensorArrayForTest({});
arr.Resize(2);
EXPECT_TRUE(arr.Set(0, MakeTensorWithData<int>({2}, {3, 2})));
EXPECT_TRUE(arr.Set(0, MakeTensorWithData<int>({3}, {3, 4, 5})));
EXPECT_THAT(arr.At(0), DimsAre({3}));
}
TEST(TensorArrayTest, ResizeUpWithElements) {
auto arr = MakeTensorArrayForTest({});
arr.Resize(1);
ASSERT_TRUE(arr.Set(0, MakeTensorWithData<int>({2}, {3, 4})));
arr.Resize(2);
EXPECT_THAT(arr.At(0), DimsAre({2}));
EXPECT_FALSE(arr.At(1));
EXPECT_EQ(arr.NumElements(), 2);
}
TEST(TensorArrayTest, ResizeDownDeletesElements) {
auto arr = MakeTensorArrayForTest({});
arr.Resize(2);
ASSERT_TRUE(arr.Set(1, MakeTensorWithData<int>({2}, {3, 4})));
arr.Resize(1);
EXPECT_EQ(arr.NumElements(), 1);
EXPECT_FALSE(arr.At(0));
}
TEST(TensorArrayTest, CopyListWithZeroLength) {
auto arr = MakeTensorArrayForTest({});
TensorArray arr2{arr};
EXPECT_EQ(arr.NumElements(), arr2.NumElements());
EXPECT_EQ(arr.NumElements(), 0);
}
TEST(TensorArrayTest, CopyAssignListWithZeroLength) {
auto arr = MakeTensorArrayForTest({});
arr = MakeTensorArrayForTest({2, 2});
EXPECT_EQ(arr.NumElements(), 0);
EXPECT_THAT(arr.ElementShape(), DimsAre({2, 2}));
}
TEST(TensorArrayTest, CopyEmptyList) {
auto arr = MakeTensorArrayForTest({});
arr.Resize(2);
TensorArray arr2{arr};
EXPECT_EQ(arr.NumElements(), arr2.NumElements());
EXPECT_EQ(arr.NumElements(), 2);
}
TEST(TensorArrayTest, CopyAssignToEmptyList) {
auto arr = MakeTensorArrayForTest({});
auto target_arr = MakeTensorArrayForTest({2, 2});
target_arr.Resize(2);
target_arr = arr;
EXPECT_EQ(target_arr.NumElements(), 0);
EXPECT_THAT(target_arr.ElementShape(), DimsAre({}));
}
TEST(TensorArrayTest, CopyListWithItem) {
std::optional<TensorArray> arr = TensorArray(kTfLiteInt32, {});
arr->Resize(1);
ASSERT_TRUE(arr->Set(0, MakeTensorWithData<int>({2}, {3, 4})));
TensorArray arr2{*arr};
EXPECT_EQ(arr->NumElements(), arr2.NumElements());
EXPECT_EQ(arr->At(0), arr2.At(0));
arr.reset();
EXPECT_THAT(arr2.At(0), DimsAre({2}));
}
TEST(TensorArrayTest, CopyAssignToListWithItem) {
auto target_arr = MakeTensorArrayForTest({});
target_arr.Resize(2);
ASSERT_TRUE(target_arr.Set(0, MakeTensorWithData<int>({2}, {3, 4})));
auto src_arr = MakeTensorArrayForTest({2, 2});
src_arr.Resize(1);
target_arr = src_arr;
EXPECT_EQ(target_arr.NumElements(), src_arr.NumElements());
EXPECT_EQ(target_arr.At(0), nullptr);
}
TEST(TensorArrayTest, CopyAssignFromListWithItem) {
auto target_arr = MakeTensorArrayForTest({2, 2});
target_arr.Resize(1);
auto src_arr = MakeTensorArrayForTest({});
src_arr.Resize(2);
ASSERT_TRUE(src_arr.Set(0, MakeTensorWithData<int>({2}, {3, 4})));
target_arr = src_arr;
EXPECT_EQ(target_arr.NumElements(), src_arr.NumElements());
EXPECT_EQ(src_arr.At(0), target_arr.At(0));
}
TEST(TensorArrayTest, DeleteEmptyTensorArray) {
TensorArray* arr = new TensorArray{kTfLiteInt32, {}};
delete arr;
}
TEST(TensorArrayTest, DeleteResizedEmptyTensorArray) {
TensorArray* arr = new TensorArray{kTfLiteInt32, {}};
arr->Resize(2);
delete arr;
}
TEST(OpaqueVariantTensorArrayDataTest, CastThroughVoidAndCopy) {
TensorArray* arr = new TensorArray{kTfLiteFloat32, {}};
arr->Resize(2);
ASSERT_TRUE(arr->Set(0, MakeTensorWithData<int>({2}, {3, 4})));
void* erased = static_cast<VariantData*>(arr);
VariantData* d = static_cast<VariantData*>(erased);
VariantData* copied_d = d->CloneTo(nullptr);
auto* copied_arr = static_cast<TensorArray*>(copied_d);
ASSERT_THAT(copied_arr->At(0), DimsAre({2}));
ASSERT_THAT(arr->At(0), DimsAre({2}));
ASSERT_EQ(arr->At(0), arr->At(0));
delete d;
delete copied_d;
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/tensor_array.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/tensor_array_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f87db812-983d-4651-9b5e-a77f29660ffd | cpp | tensorflow/tensorflow | prefetched_split_provider | tensorflow/core/data/service/snapshot/prefetched_split_provider.cc | tensorflow/core/data/service/snapshot/prefetched_split_provider_test.cc | #include "tensorflow/core/data/service/snapshot/prefetched_split_provider.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "xla/tsl/lib/io/compression.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace tensorflow {
namespace data {
PrefetchedSplitProvider::PrefetchedSplitProvider(
std::unique_ptr<SplitProvider> split_provider, const std::string& directory,
tsl::Env* env, size_t num_write_threads, size_t buffer_size_per_thread)
: env_(env),
directory_(directory),
num_write_threads_(num_write_threads),
buffer_size_(num_write_threads_ * buffer_size_per_thread),
split_provider_(std::move(split_provider)) {
absl::Status status = InitDirs();
if (!status.ok()) {
UpdateStatus(std::move(status));
return;
}
absl::MutexLock l(&mu_);
thread_pool_ = RunPrefetchThreads();
}
PrefetchedSplitProvider::~PrefetchedSplitProvider() { Cancel(); }
absl::StatusOr<std::optional<Tensor>> PrefetchedSplitProvider::GetNext(
const std::string& split_path) ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
while (status_.ok() &&
(buffer_.empty() || buffer_.begin()->index != split_index_to_read_) &&
(finished_threads_ < num_write_threads_ || reset_)) {
ready_to_pop_.Wait(&mu_);
}
TF_RETURN_IF_ERROR(status_);
if (buffer_.empty()) {
return std::nullopt;
}
if (buffer_.begin()->index != split_index_to_read_) {
return absl::InternalError(absl::StrCat(
"Failed to get tf.data snapshot split. Expected split ",
split_index_to_read_, ", got split ", buffer_.begin()->index,
". This is likely a tf.data bug."));
}
auto it = buffer_.begin();
SplitAndIndex split = std::move(*it);
buffer_.erase(it);
TF_RETURN_IF_ERROR(env_->RenameFile(split.SplitPath(directory_), split_path));
++split_index_to_read_;
ready_to_push_.Signal();
return std::move(split.split);
}
std::unique_ptr<tsl::thread::ThreadPool>
PrefetchedSplitProvider::RunPrefetchThreads() {
auto thread_pool = std::make_unique<tsl::thread::ThreadPool>(
env_, tsl::ThreadOptions{}, "tf_data_prefetch_splits_thread",
num_write_threads_);
for (size_t i = 0; i < num_write_threads_; ++i) {
thread_pool->Schedule([this]() { PrefetchLoop(); });
}
return thread_pool;
}
void PrefetchedSplitProvider::PrefetchLoop() ABSL_LOCKS_EXCLUDED(mu_) {
while (ShouldPrefetchSplit()) {
absl::StatusOr<bool> has_next = PrefetchSplit();
if (!has_next.status().ok()) {
UpdateStatus(has_next.status());
break;
}
if (!*has_next) {
break;
}
}
absl::MutexLock l(&mu_);
if (++finished_threads_ >= num_write_threads_) {
ready_to_pop_.SignalAll();
}
}
bool PrefetchedSplitProvider::ShouldPrefetchSplit() const
ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
return status_.ok() && !reset_;
}
absl::StatusOr<bool> PrefetchedSplitProvider::PrefetchSplit()
ABSL_LOCKS_EXCLUDED(mu_) {
TF_ASSIGN_OR_RETURN(std::optional<SplitAndIndex> split,
GetSplitFromProvider());
if (!split.has_value()) {
return false;
}
TF_RETURN_IF_ERROR(
AtomicallyWriteTFRecords(split->SplitPath(directory_), {split->split},
tsl::io::compression::kNone, env_));
absl::MutexLock l(&mu_);
buffer_.insert(std::move(*split));
ready_to_pop_.Signal();
return true;
}
absl::StatusOr<std::optional<PrefetchedSplitProvider::SplitAndIndex>>
PrefetchedSplitProvider::GetSplitFromProvider() ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
while (status_.ok() && buffer_.size() >= buffer_size_ && !reset_) {
ready_to_push_.Wait(&mu_);
}
TF_RETURN_IF_ERROR(status_);
if (reset_) {
return std::nullopt;
}
Tensor split;
bool end_of_splits = false;
TF_RETURN_IF_ERROR(split_provider_->GetNext(&split, &end_of_splits));
if (end_of_splits) {
return std::nullopt;
}
return SplitAndIndex{split, split_index_to_write_++};
}
absl::Status PrefetchedSplitProvider::Reset() ABSL_LOCKS_EXCLUDED(mu_) {
std::unique_ptr<tsl::thread::ThreadPool> thread_pool;
{
absl::MutexLock l(&mu_);
reset_ = true;
ready_to_push_.SignalAll();
ready_to_pop_.SignalAll();
thread_pool = std::move(thread_pool_);
}
thread_pool.reset();
TF_RETURN_IF_ERROR(split_provider_->Reset());
absl::MutexLock l(&mu_);
TF_RETURN_IF_ERROR(status_);
reset_ = false;
split_index_to_read_ = 0;
split_index_to_write_ = 0;
finished_threads_ = 0;
buffer_.clear();
TF_RETURN_IF_ERROR(InitDirs());
thread_pool_ = RunPrefetchThreads();
return absl::OkStatus();
}
void PrefetchedSplitProvider::Cancel() {
UpdateStatus(
absl::CancelledError("tf.data prefetched split provider is shut down."));
std::unique_ptr<tsl::thread::ThreadPool> thread_pool;
{
absl::MutexLock l(&mu_);
thread_pool = std::move(thread_pool_);
}
}
absl::Status PrefetchedSplitProvider::InitDirs() {
if (env_->FileExists(directory_).ok()) {
int64_t undeleted_files, undeleted_dirs;
TF_RETURN_IF_ERROR(
env_->DeleteRecursively(directory_, &undeleted_files, &undeleted_dirs));
}
return env_->RecursivelyCreateDir(directory_);
}
void PrefetchedSplitProvider::UpdateStatus(absl::Status status)
ABSL_LOCKS_EXCLUDED(mu_) {
if (status.ok()) {
return;
}
absl::MutexLock l(&mu_);
status_.Update(std::move(status));
ready_to_push_.SignalAll();
ready_to_pop_.SignalAll();
}
}
} | #include "tensorflow/core/data/service/snapshot/prefetched_split_provider.h"
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/lib/io/compression.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/split_provider.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::ElementsAreArray;
using ::testing::IsSupersetOf;
using ::testing::UnorderedElementsAreArray;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
absl::StatusOr<std::vector<std::string>> TestDirs(size_t num_dirs) {
std::vector<std::string> test_dirs;
std::string base_dir;
if (!tsl::Env::Default()->LocalTempFilename(&base_dir)) {
return absl::FailedPreconditionError("Failed to create local temp file.");
}
TF_RETURN_IF_ERROR(tsl::Env::Default()->RecursivelyCreateDir(base_dir));
for (size_t i = 0; i < num_dirs; ++i) {
std::string test_dir =
tsl::io::JoinPath(base_dir, absl::StrCat("test_dir_", i));
TF_RETURN_IF_ERROR(tsl::Env::Default()->RecursivelyCreateDir(test_dir));
test_dirs.push_back(std::move(test_dir));
}
return test_dirs;
}
absl::StatusOr<std::unique_ptr<SplitProvider>> RangeSplitProvider(
int64_t range) {
DatasetDef range_dataset = testing::RangeDataset(range);
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_RETURN_IF_ERROR(CreateSplitProviders(range_dataset, split_providers));
if (split_providers.size() != 1) {
return absl::InternalError(
absl::StrCat("Range dataset should have one split provider, got ",
split_providers.size(), "."));
}
return std::move(split_providers[0]);
}
template <class T>
T GetValue(const Tensor& tensor) {
return tensor.unaligned_flat<T>().data()[0];
}
template <class T>
absl::StatusOr<T> GetValueFromFile(const std::string& filename) {
snapshot_util::TFRecordReaderImpl reader(filename,
tsl::io::compression::kNone);
TF_RETURN_IF_ERROR(reader.Initialize(tsl::Env::Default()));
TF_ASSIGN_OR_RETURN(std::vector<Tensor> tensors, reader.GetTensors());
if (tensors.size() != 1) {
return absl::InternalError(absl::StrCat(
"A snapshot split file is expected to contain 1 tensor. Got ",
tensors.size(), " tensors from ", filename, "."));
}
return GetValue<T>(tensors[0]);
}
template <class T>
absl::StatusOr<std::vector<T>> GetSplits(
PrefetchedSplitProvider& prefetched_split_provider,
const std::string& test_dir) {
std::vector<T> splits;
for (size_t i = 0;; ++i) {
std::string target_split_path =
tsl::io::JoinPath(test_dir, absl::StrCat("split_", i));
TF_ASSIGN_OR_RETURN(std::optional<Tensor> split,
prefetched_split_provider.GetNext(target_split_path));
if (!split.has_value()) {
return splits;
}
T split_value = GetValue<T>(*split);
TF_ASSIGN_OR_RETURN(T split_from_file,
GetValueFromFile<T>(target_split_path));
if (split_value != split_from_file) {
return absl::InternalError(
absl::StrCat("Inconsistent splits. From buffer: ", split_value,
", from file: ", split_from_file, "."));
}
splits.push_back(split_value);
}
return splits;
}
std::vector<int64_t> Range(int64_t range) {
std::vector<int64_t> result(range);
std::iota(result.begin(), result.end(), 0);
return result;
}
class PrefetchedSplitProviderParamTest
: public ::testing::TestWithParam<
std::tuple<int64_t, size_t, size_t, size_t>> {
protected:
int64_t NumElements() const { return std::get<0>(GetParam()); }
size_t NumClients() const { return std::get<1>(GetParam()); }
size_t NumWriteThreads() const { return std::get<2>(GetParam()); }
size_t BufferSizePerThread() const { return std::get<3>(GetParam()); }
};
TEST_P(PrefetchedSplitProviderParamTest, GetSplits) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(NumElements()));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(2));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default(),
NumWriteThreads(), BufferSizePerThread());
EXPECT_THAT(GetSplits<int64_t>(prefetched_split_provider, test_dirs[1]),
IsOkAndHolds(ElementsAreArray(Range(NumElements()))));
}
TEST_P(PrefetchedSplitProviderParamTest, ConcurrentGetSplits) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(NumElements()));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(1 + NumClients()));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default(),
NumWriteThreads(), BufferSizePerThread());
absl::Mutex mu;
std::vector<int64_t> splits;
std::vector<std::unique_ptr<tsl::Thread>> client_threads;
for (int i = 0; i < NumClients(); ++i) {
client_threads.push_back(absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, absl::StrCat("Client_", i),
[i, &prefetched_split_provider, &splits, &test_dirs, &mu]() {
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> splits_per_thread,
GetSplits<int64_t>(prefetched_split_provider, test_dirs[1 + i]));
EXPECT_TRUE(absl::c_is_sorted(splits_per_thread));
absl::MutexLock l(&mu);
absl::c_move(splits_per_thread, std::back_inserter(splits));
})));
}
client_threads.clear();
EXPECT_THAT(splits, UnorderedElementsAreArray(Range(NumElements())));
}
TEST_P(PrefetchedSplitProviderParamTest, Reset) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(NumElements()));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(2));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default(),
NumWriteThreads(), BufferSizePerThread());
for (int i = 0; i < 3; ++i) {
EXPECT_THAT(GetSplits<int64_t>(prefetched_split_provider, test_dirs[1]),
IsOkAndHolds(ElementsAreArray(Range(NumElements()))));
TF_EXPECT_OK(prefetched_split_provider.Reset());
}
}
TEST_P(PrefetchedSplitProviderParamTest, ConcurrentGetSplitsAndReset) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(NumElements()));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(1 + NumClients()));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default(),
NumWriteThreads(), BufferSizePerThread());
absl::Mutex mu;
std::vector<int64_t> splits;
std::vector<std::unique_ptr<tsl::Thread>> client_threads;
for (int i = 0; i < NumClients(); ++i) {
client_threads.push_back(absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, absl::StrCat("Client_", i),
[i, &prefetched_split_provider, &splits, &test_dirs, &mu]() {
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> splits_per_thread,
GetSplits<int64_t>(prefetched_split_provider, test_dirs[1 + i]));
absl::MutexLock l(&mu);
absl::c_move(splits_per_thread, std::back_inserter(splits));
})));
}
TF_EXPECT_OK(prefetched_split_provider.Reset());
client_threads.clear();
EXPECT_THAT(splits, IsSupersetOf(Range(NumElements())));
}
INSTANTIATE_TEST_SUITE_P(
PrefetchedSplitProviderParams, PrefetchedSplitProviderParamTest,
::testing::Combine(
::testing::Values(0, 10, 1000),
::testing::Values(1, 5),
::testing::Values(1, 10),
::testing::Values(1, 10000)));
TEST(PrefetchedSplitProviderTest, Cancellation) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(999999));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(2));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default(),
2, 1);
std::unique_ptr<tsl::Thread> client_thread =
absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, "client_thread",
[&prefetched_split_provider, &test_dirs]() {
EXPECT_THAT(
GetSplits<int64_t>(prefetched_split_provider, test_dirs[1]),
StatusIs(absl::StatusCode::kCancelled));
}));
prefetched_split_provider.Cancel();
client_thread.reset();
}
TEST(PrefetchedSplitProviderTest, ShutdownWithUnreadSplits) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(100));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(2));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default());
TF_EXPECT_OK(prefetched_split_provider.Reset());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/prefetched_split_provider.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/prefetched_split_provider_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
67a73e33-84d3-4cb9-abee-05f1219dfecb | cpp | abseil/abseil-cpp | cordz_update_tracker | absl/strings/internal/cordz_update_tracker.h | absl/strings/internal/cordz_update_tracker_test.cc | #ifndef ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_TRACKER_H_
#define ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_TRACKER_H_
#include <atomic>
#include <cstdint>
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
class CordzUpdateTracker {
public:
enum MethodIdentifier {
kUnknown,
kAppendCord,
kAppendCordBuffer,
kAppendExternalMemory,
kAppendString,
kAssignCord,
kAssignString,
kClear,
kConstructorCord,
kConstructorString,
kCordReader,
kFlatten,
kGetAppendBuffer,
kGetAppendRegion,
kMakeCordFromExternal,
kMoveAppendCord,
kMoveAssignCord,
kMovePrependCord,
kPrependCord,
kPrependCordBuffer,
kPrependString,
kRemovePrefix,
kRemoveSuffix,
kSetExpectedChecksum,
kSubCord,
kNumMethods,
};
constexpr CordzUpdateTracker() noexcept : values_{} {}
CordzUpdateTracker(const CordzUpdateTracker& rhs) noexcept { *this = rhs; }
CordzUpdateTracker& operator=(const CordzUpdateTracker& rhs) noexcept {
for (int i = 0; i < kNumMethods; ++i) {
values_[i].store(rhs.values_[i].load(std::memory_order_relaxed),
std::memory_order_relaxed);
}
return *this;
}
int64_t Value(MethodIdentifier method) const {
return values_[method].load(std::memory_order_relaxed);
}
void LossyAdd(MethodIdentifier method, int64_t n = 1) {
auto& value = values_[method];
value.store(value.load(std::memory_order_relaxed) + n,
std::memory_order_relaxed);
}
void LossyAdd(const CordzUpdateTracker& src) {
for (int i = 0; i < kNumMethods; ++i) {
MethodIdentifier method = static_cast<MethodIdentifier>(i);
if (int64_t value = src.Value(method)) {
LossyAdd(method, value);
}
}
}
private:
class Counter : public std::atomic<int64_t> {
public:
constexpr Counter() noexcept : std::atomic<int64_t>(0) {}
};
Counter values_[kNumMethods];
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/strings/internal/cordz_update_tracker.h"
#include <array>
#include <thread>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/synchronization/notification.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
namespace {
using ::testing::AnyOf;
using ::testing::Eq;
using Method = CordzUpdateTracker::MethodIdentifier;
using Methods = std::array<Method, Method::kNumMethods>;
Methods AllMethods() {
return Methods{Method::kUnknown,
Method::kAppendCord,
Method::kAppendCordBuffer,
Method::kAppendExternalMemory,
Method::kAppendString,
Method::kAssignCord,
Method::kAssignString,
Method::kClear,
Method::kConstructorCord,
Method::kConstructorString,
Method::kCordReader,
Method::kFlatten,
Method::kGetAppendBuffer,
Method::kGetAppendRegion,
Method::kMakeCordFromExternal,
Method::kMoveAppendCord,
Method::kMoveAssignCord,
Method::kMovePrependCord,
Method::kPrependCord,
Method::kPrependCordBuffer,
Method::kPrependString,
Method::kRemovePrefix,
Method::kRemoveSuffix,
Method::kSetExpectedChecksum,
Method::kSubCord};
}
TEST(CordzUpdateTracker, IsConstExprAndInitializesToZero) {
constexpr CordzUpdateTracker tracker;
for (Method method : AllMethods()) {
ASSERT_THAT(tracker.Value(method), Eq(0));
}
}
TEST(CordzUpdateTracker, LossyAdd) {
int64_t n = 1;
CordzUpdateTracker tracker;
for (Method method : AllMethods()) {
tracker.LossyAdd(method, n);
EXPECT_THAT(tracker.Value(method), Eq(n));
n += 2;
}
}
TEST(CordzUpdateTracker, CopyConstructor) {
int64_t n = 1;
CordzUpdateTracker src;
for (Method method : AllMethods()) {
src.LossyAdd(method, n);
n += 2;
}
n = 1;
CordzUpdateTracker tracker(src);
for (Method method : AllMethods()) {
EXPECT_THAT(tracker.Value(method), Eq(n));
n += 2;
}
}
TEST(CordzUpdateTracker, OperatorAssign) {
int64_t n = 1;
CordzUpdateTracker src;
CordzUpdateTracker tracker;
for (Method method : AllMethods()) {
src.LossyAdd(method, n);
n += 2;
}
n = 1;
tracker = src;
for (Method method : AllMethods()) {
EXPECT_THAT(tracker.Value(method), Eq(n));
n += 2;
}
}
TEST(CordzUpdateTracker, ThreadSanitizedValueCheck) {
absl::Notification done;
CordzUpdateTracker tracker;
std::thread reader([&done, &tracker] {
while (!done.HasBeenNotified()) {
int n = 1;
for (Method method : AllMethods()) {
EXPECT_THAT(tracker.Value(method), AnyOf(Eq(n), Eq(0)));
n += 2;
}
}
int n = 1;
for (Method method : AllMethods()) {
EXPECT_THAT(tracker.Value(method), Eq(n));
n += 2;
}
});
int64_t n = 1;
for (Method method : AllMethods()) {
tracker.LossyAdd(method, n);
n += 2;
}
done.Notify();
reader.join();
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cordz_update_tracker.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cordz_update_tracker_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
b8f1a831-4754-45ce-a70d-ceae8ef287fa | cpp | tensorflow/tensorflow | summary_file_writer | tensorflow/core/summary/summary_file_writer.cc | tensorflow/core/summary/summary_file_writer_test.cc | #include "tensorflow/core/summary/summary_file_writer.h"
#include <memory>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/summary/summary_converter.h"
#include "tensorflow/core/util/events_writer.h"
namespace tensorflow {
namespace {
class SummaryFileWriter : public SummaryWriterInterface {
public:
SummaryFileWriter(int max_queue, int flush_millis, Env* env)
: SummaryWriterInterface(),
is_initialized_(false),
max_queue_(max_queue),
flush_millis_(flush_millis),
env_(env) {}
Status Initialize(const string& logdir, const string& filename_suffix) {
const Status is_dir = env_->IsDirectory(logdir);
if (!is_dir.ok()) {
if (is_dir.code() != tensorflow::error::NOT_FOUND) {
return is_dir;
}
TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(logdir));
}
int32_t pid = env_->GetProcessId();
static std::atomic<int64_t> file_id_counter(0);
string sep = absl::StartsWith(filename_suffix, ".") ? "" : ".";
const string uniquified_filename_suffix = absl::StrCat(
".", pid, ".", file_id_counter.fetch_add(1), sep, filename_suffix);
mutex_lock ml(mu_);
events_writer_ =
std::make_unique<EventsWriter>(io::JoinPath(logdir, "events"));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
events_writer_->InitWithSuffix(uniquified_filename_suffix),
"Could not initialize events writer.");
last_flush_ = env_->NowMicros();
is_initialized_ = true;
return absl::OkStatus();
}
Status Flush() override {
mutex_lock ml(mu_);
if (!is_initialized_) {
return errors::FailedPrecondition("Class was not properly initialized.");
}
return InternalFlush();
}
~SummaryFileWriter() override {
(void)Flush();
}
Status WriteTensor(int64_t global_step, Tensor t, const string& tag,
const string& serialized_metadata) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
Summary::Value* v = e->mutable_summary()->add_value();
if (t.dtype() == DT_STRING) {
t.AsProtoField(v->mutable_tensor());
} else {
t.AsProtoTensorContent(v->mutable_tensor());
}
v->set_tag(tag);
if (!serialized_metadata.empty()) {
v->mutable_metadata()->ParseFromString(serialized_metadata);
}
return WriteEvent(std::move(e));
}
Status WriteScalar(int64_t global_step, Tensor t,
const string& tag) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
TF_RETURN_IF_ERROR(
AddTensorAsScalarToSummary(t, tag, e->mutable_summary()));
return WriteEvent(std::move(e));
}
Status WriteHistogram(int64_t global_step, Tensor t,
const string& tag) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
TF_RETURN_IF_ERROR(
AddTensorAsHistogramToSummary(t, tag, e->mutable_summary()));
return WriteEvent(std::move(e));
}
Status WriteImage(int64_t global_step, Tensor t, const string& tag,
int max_images, Tensor bad_color) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
TF_RETURN_IF_ERROR(AddTensorAsImageToSummary(t, tag, max_images, bad_color,
e->mutable_summary()));
return WriteEvent(std::move(e));
}
Status WriteAudio(int64_t global_step, Tensor t, const string& tag,
int max_outputs, float sample_rate) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
TF_RETURN_IF_ERROR(AddTensorAsAudioToSummary(
t, tag, max_outputs, sample_rate, e->mutable_summary()));
return WriteEvent(std::move(e));
}
Status WriteGraph(int64_t global_step,
std::unique_ptr<GraphDef> graph) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
graph->SerializeToString(e->mutable_graph_def());
return WriteEvent(std::move(e));
}
Status WriteEvent(std::unique_ptr<Event> event) override {
mutex_lock ml(mu_);
queue_.emplace_back(std::move(event));
if (queue_.size() > max_queue_ ||
env_->NowMicros() - last_flush_ > 1000 * flush_millis_) {
return InternalFlush();
}
return absl::OkStatus();
}
string DebugString() const override { return "SummaryFileWriter"; }
private:
double GetWallTime() {
return static_cast<double>(env_->NowMicros()) / 1.0e6;
}
Status InternalFlush() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
for (const std::unique_ptr<Event>& e : queue_) {
events_writer_->WriteEvent(*e);
}
queue_.clear();
TF_RETURN_WITH_CONTEXT_IF_ERROR(events_writer_->Flush(),
"Could not flush events file.");
last_flush_ = env_->NowMicros();
return absl::OkStatus();
}
bool is_initialized_;
const int max_queue_;
const int flush_millis_;
uint64 last_flush_;
Env* env_;
mutex mu_;
std::vector<std::unique_ptr<Event>> queue_ TF_GUARDED_BY(mu_);
std::unique_ptr<EventsWriter> events_writer_ TF_GUARDED_BY(mu_);
std::vector<std::pair<string, SummaryMetadata>> registered_summaries_
TF_GUARDED_BY(mu_);
};
}
Status CreateSummaryFileWriter(int max_queue, int flush_millis,
const string& logdir,
const string& filename_suffix, Env* env,
SummaryWriterInterface** result) {
SummaryFileWriter* w = new SummaryFileWriter(max_queue, flush_millis, env);
const Status s = w->Initialize(logdir, filename_suffix);
if (!s.ok()) {
w->Unref();
*result = nullptr;
return s;
}
*result = w;
return absl::OkStatus();
}
} | #include "tensorflow/core/summary/summary_file_writer.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
namespace {
class FakeClockEnv : public EnvWrapper {
public:
FakeClockEnv() : EnvWrapper(Env::Default()), current_millis_(0) {}
void AdvanceByMillis(const uint64 millis) { current_millis_ += millis; }
uint64 NowMicros() const override { return current_millis_ * 1000; }
uint64 NowSeconds() const override { return current_millis_ * 1000; }
private:
uint64 current_millis_;
};
class SummaryFileWriterTest : public ::testing::Test {
protected:
Status SummaryTestHelper(
const string& test_name,
const std::function<Status(SummaryWriterInterface*)>& writer_fn,
const std::function<void(const Event&)>& test_fn) {
static std::set<string>* tests = new std::set<string>();
CHECK(tests->insert(test_name).second) << ": " << test_name;
SummaryWriterInterface* writer;
TF_CHECK_OK(CreateSummaryFileWriter(1, 1, testing::TmpDir(), test_name,
&env_, &writer));
core::ScopedUnref deleter(writer);
TF_CHECK_OK(writer_fn(writer));
TF_CHECK_OK(writer->Flush());
std::vector<string> files;
TF_CHECK_OK(env_.GetChildren(testing::TmpDir(), &files));
bool found = false;
for (const string& f : files) {
if (absl::StrContains(f, test_name)) {
if (found) {
return errors::Unknown("Found more than one file for ", test_name);
}
found = true;
std::unique_ptr<RandomAccessFile> read_file;
TF_CHECK_OK(env_.NewRandomAccessFile(io::JoinPath(testing::TmpDir(), f),
&read_file));
io::RecordReader reader(read_file.get(), io::RecordReaderOptions());
tstring record;
uint64 offset = 0;
TF_CHECK_OK(
reader.ReadRecord(&offset,
&record));
TF_CHECK_OK(reader.ReadRecord(&offset, &record));
Event e;
e.ParseFromString(record);
test_fn(e);
}
}
if (!found) {
return errors::Unknown("Found no file for ", test_name);
}
return absl::OkStatus();
}
FakeClockEnv env_;
};
TEST_F(SummaryFileWriterTest, WriteTensor) {
TF_CHECK_OK(SummaryTestHelper("tensor_test",
[](SummaryWriterInterface* writer) {
Tensor one(DT_FLOAT, TensorShape({}));
one.scalar<float>()() = 1.0;
TF_RETURN_IF_ERROR(writer->WriteTensor(
2, one, "name",
SummaryMetadata().SerializeAsString()));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name");
}));
TF_CHECK_OK(SummaryTestHelper(
"string_tensor_test",
[](SummaryWriterInterface* writer) {
Tensor hello(DT_STRING, TensorShape({}));
hello.scalar<tstring>()() = "hello";
TF_RETURN_IF_ERROR(writer->WriteTensor(
2, hello, "name", SummaryMetadata().SerializeAsString()));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name");
EXPECT_EQ(e.summary().value(0).tensor().dtype(), DT_STRING);
EXPECT_EQ(e.summary().value(0).tensor().string_val()[0], "hello");
}));
}
TEST_F(SummaryFileWriterTest, WriteScalar) {
TF_CHECK_OK(SummaryTestHelper(
"scalar_test",
[](SummaryWriterInterface* writer) {
Tensor one(DT_FLOAT, TensorShape({}));
one.scalar<float>()() = 1.0;
TF_RETURN_IF_ERROR(writer->WriteScalar(2, one, "name"));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name");
EXPECT_EQ(e.summary().value(0).simple_value(), 1.0);
}));
}
TEST_F(SummaryFileWriterTest, WriteHistogram) {
TF_CHECK_OK(SummaryTestHelper("hist_test",
[](SummaryWriterInterface* writer) {
Tensor one(DT_FLOAT, TensorShape({}));
one.scalar<float>()() = 1.0;
TF_RETURN_IF_ERROR(
writer->WriteHistogram(2, one, "name"));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name");
EXPECT_TRUE(e.summary().value(0).has_histo());
}));
}
namespace {
template <typename T>
static Status CreateImage(SummaryWriterInterface* writer) {
Tensor bad_color(DT_UINT8, TensorShape({1}));
bad_color.scalar<uint8>()() = 0;
Tensor one(DataTypeToEnum<T>::v(), TensorShape({1, 1, 1, 1}));
one.scalar<T>()() = T(1);
TF_RETURN_IF_ERROR(writer->WriteImage(2, one, "name", 1, bad_color));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
}
static void CheckImage(const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name/image");
CHECK(e.summary().value(0).has_image());
EXPECT_EQ(e.summary().value(0).image().height(), 1);
EXPECT_EQ(e.summary().value(0).image().width(), 1);
EXPECT_EQ(e.summary().value(0).image().colorspace(), 1);
}
}
TEST_F(SummaryFileWriterTest, WriteImageUInt8) {
TF_CHECK_OK(
SummaryTestHelper("image_test_uint8", CreateImage<uint8>, CheckImage));
}
TEST_F(SummaryFileWriterTest, WriteImageFloat) {
TF_CHECK_OK(
SummaryTestHelper("image_test_float", CreateImage<float>, CheckImage));
}
TEST_F(SummaryFileWriterTest, WriteImageHalf) {
TF_CHECK_OK(SummaryTestHelper("image_test_half", CreateImage<Eigen::half>,
CheckImage));
}
TEST_F(SummaryFileWriterTest, WriteImageDouble) {
TF_CHECK_OK(
SummaryTestHelper("image_test_double", CreateImage<double>, CheckImage));
}
TEST_F(SummaryFileWriterTest, WriteAudio) {
TF_CHECK_OK(SummaryTestHelper(
"audio_test",
[](SummaryWriterInterface* writer) {
Tensor one(DT_FLOAT, TensorShape({1, 1}));
one.scalar<float>()() = 1.0;
TF_RETURN_IF_ERROR(writer->WriteAudio(2, one, "name", 1, 1));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name/audio");
CHECK(e.summary().value(0).has_audio());
}));
}
TEST_F(SummaryFileWriterTest, WriteEvent) {
TF_CHECK_OK(
SummaryTestHelper("event_test",
[](SummaryWriterInterface* writer) {
std::unique_ptr<Event> e{new Event};
e->set_step(7);
e->mutable_summary()->add_value()->set_tag("hi");
TF_RETURN_IF_ERROR(writer->WriteEvent(std::move(e)));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 7);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "hi");
}));
}
TEST_F(SummaryFileWriterTest, WallTime) {
env_.AdvanceByMillis(7023);
TF_CHECK_OK(SummaryTestHelper(
"wall_time_test",
[](SummaryWriterInterface* writer) {
Tensor one(DT_FLOAT, TensorShape({}));
one.scalar<float>()() = 1.0;
TF_RETURN_IF_ERROR(writer->WriteScalar(2, one, "name"));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) { EXPECT_EQ(e.wall_time(), 7.023); }));
}
TEST_F(SummaryFileWriterTest, AvoidFilenameCollision) {
string test_name = "avoid_filename_collision_test";
int num_files = 10;
for (int i = 0; i < num_files; i++) {
SummaryWriterInterface* writer;
TF_CHECK_OK(CreateSummaryFileWriter(1, 1, testing::TmpDir(), test_name,
&env_, &writer));
core::ScopedUnref deleter(writer);
}
std::vector<string> files;
TF_CHECK_OK(env_.GetChildren(testing::TmpDir(), &files));
files.erase(std::remove_if(files.begin(), files.end(),
[test_name](string f) {
return !absl::StrContains(f, test_name);
}),
files.end());
EXPECT_EQ(num_files, files.size())
<< "files = [" << absl::StrJoin(files, ", ") << "]";
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/summary/summary_file_writer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/summary/summary_file_writer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
05b3a5fb-d17e-46f2-a604-6a0d2c9a26bd | cpp | tensorflow/tensorflow | se_gpu_pjrt_client | third_party/xla/xla/pjrt/gpu/se_gpu_pjrt_client.cc | third_party/xla/xla/pjrt/gpu/se_gpu_pjrt_client_test.cc | #include "xla/pjrt/gpu/se_gpu_pjrt_client.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/functional/bind_front.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/client/local_client.h"
#include "xla/client/xla_computation.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/pjrt/distributed/in_memory_key_value_store.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/pjrt/distributed/topology_util.h"
#include "xla/pjrt/event_pool.h"
#include "xla/pjrt/gpu/gpu_helpers.h"
#include "xla/pjrt/gpu/gpu_topology.h"
#include "xla/pjrt/gpu/gpu_topology.pb.h"
#include "xla/pjrt/host_memory_spaces.h"
#include "xla/pjrt/local_device_state.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/pjrt/pjrt_device_description.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/pjrt/pjrt_stream_executor_client.h"
#include "xla/pjrt/stream_executor_executable.h"
#include "xla/pjrt/tracked_device_buffer.h"
#include "xla/service/compiler.h"
#include "xla/service/computation_placer.h"
#include "xla/service/global_device_id.h"
#include "xla/service/shaped_buffer.h"
#include "xla/service/transfer_manager.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/allocator.h"
#include "xla/tsl/lib/strings/proto_serialization.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/fingerprint.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/connected_traceme.h"
#include "tsl/profiler/lib/nvtx_utils.h"
#include "tsl/profiler/lib/traceme.h"
#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM)
#include "xla/debug_options_flags.h"
#include "xla/pjrt/compile_options.pb.h"
#include "xla/pjrt/gpu/gpu_metrics.h"
#include "xla/pjrt/gpu/nccl_id_store.h"
#include "xla/pjrt/stream_executor_executable.pb.h"
#include "xla/service/gpu/gpu_compiler.h"
#include "xla/service/gpu/gpu_memory_space_assignment.h"
#include "xla/xla.pb.h"
#endif
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "xla/stream_executor/gpu/gpu_cudamallocasync_allocator.h"
#elif TENSORFLOW_USE_ROCM
#include "rocm/rocm_config.h"
#endif
#include "xla/service/gpu/gpu_executable_run_options.h"
#include "xla/stream_executor/integrations/device_mem_allocator.h"
#include "xla/stream_executor/integrations/tf_allocator_adapter.h"
#include "xla/util.h"
namespace xla {
class AsyncHostToDeviceTransferManager
: public xla::PjRtClient::AsyncHostToDeviceTransferManager {
public:
static absl::StatusOr<std::unique_ptr<AsyncHostToDeviceTransferManager>>
Create(absl::Span<const PjRtClient::ShapeSpec> shape_specs,
std::optional<absl::Span<const Layout>> device_layouts,
PjRtStreamExecutorDevice* device, PjRtStreamExecutorClient* client,
PjRtMemorySpace* memory_space) {
if (device_layouts != std::nullopt &&
device_layouts->size() != shape_specs.size()) {
return InvalidArgument(
"Number of layouts %d does not match the number of shapes %d",
device_layouts->size(), shape_specs.size());
}
absl::InlinedVector<std::unique_ptr<PjRtBuffer>, 4> buffers;
absl::InlinedVector<std::shared_ptr<TrackedDeviceBuffer>, 4> buffer_ptrs;
absl::InlinedVector<std::shared_ptr<BufferSequencingEvent>, 4>
definition_events;
absl::InlinedVector<Shape, 4> device_shapes;
buffers.reserve(shape_specs.size());
buffer_ptrs.reserve(shape_specs.size());
definition_events.reserve(shape_specs.size());
device_shapes.reserve(shape_specs.size());
for (int i = 0; i < shape_specs.size(); ++i) {
const PjRtClient::ShapeSpec& shape_spec = shape_specs[i];
if (shape_spec.element_type == TUPLE) {
return Unimplemented(
"Async buffer transfer of tuples not implemented.");
}
definition_events.push_back(
std::make_shared<BufferSequencingEvent>(client->thread_pool()));
Shape& device_shape = device_shapes.emplace_back(
ShapeUtil::MakeShape(shape_spec.element_type, shape_spec.dims));
if (device_layouts == std::nullopt) {
TF_ASSIGN_OR_RETURN(device_shape,
client->client()
->backend()
.transfer_manager()
->ChooseCompactLayoutForShape(device_shape));
} else {
*device_shape.mutable_layout() = (*device_layouts)[i];
}
LocalDeviceState* local_device = device->local_device_state();
se::Stream* h2d_stream = local_device->host_to_device_stream();
TF_ASSIGN_OR_RETURN(auto buffer,
AllocateDestinationBuffer(
device_shape, device, local_device, h2d_stream,
true, client,
definition_events.back(), memory_space));
auto* se_buffer =
tensorflow::down_cast<PjRtStreamExecutorBuffer*>(buffer.get());
DCHECK(se_buffer);
auto hold = se_buffer->GetBufferWithUsageHold();
buffer_ptrs.push_back(hold.buffer());
buffers.push_back(std::move(buffer));
}
return std::make_unique<AsyncHostToDeviceTransferManager>(
std::move(buffers), std::move(buffer_ptrs),
std::move(definition_events), std::move(device_shapes), device);
}
AsyncHostToDeviceTransferManager(
absl::InlinedVector<std::unique_ptr<PjRtBuffer>, 4> buffers,
absl::InlinedVector<std::shared_ptr<TrackedDeviceBuffer>, 4> buffer_ptrs,
absl::InlinedVector<std::shared_ptr<BufferSequencingEvent>, 4>
definition_events,
absl::InlinedVector<Shape, 4> device_shapes,
PjRtStreamExecutorDevice* device)
: buffers_(std::move(buffers)),
buffer_ptrs_(std::move(buffer_ptrs)),
definition_events_(std::move(definition_events)),
device_shapes_(std::move(device_shapes)),
remaining_buffer_count_(buffer_ptrs_.size()),
transfers_in_flight_(0),
device_(device) {
buffer_sizes_.reserve(buffer_ptrs_.size());
for (const auto& ptr : buffer_ptrs_) {
DCHECK_EQ(ptr->device_memory().size(), 1);
buffer_sizes_.push_back(ptr->device_memory()[0].size());
}
last_transfer_started_.resize(buffer_ptrs_.size(), false);
}
~AsyncHostToDeviceTransferManager() override {
auto transfers_finished = [this]() {
mu_.AssertHeld();
return transfers_in_flight_ == 0;
};
{
absl::MutexLock l(&mu_);
mu_.Await(absl::Condition(&transfers_finished));
}
}
size_t buffer_count() const override { return buffers_.size(); };
size_t buffer_size(int buffer_index) const override {
DCHECK_LT(buffer_index, buffer_sizes_.size());
return buffer_sizes_[buffer_index];
}
PjRtDevice* device() const override { return device_; }
std::unique_ptr<PjRtBuffer> RetrieveBuffer(int buffer_index) override {
DCHECK_LT(buffer_index, buffers_.size());
return std::move(buffers_[buffer_index]);
};
absl::Status TransferLiteralToBuffer(
int buffer_index, const LiteralSlice& literal,
absl::AnyInvocable<void() &&> on_done) override {
tsl::profiler::TraceMe traceme(
"AsyncHostToDeviceTransferManager::TransferLiteralToBuffer");
auto* stream = device_->local_device_state()->host_to_device_stream();
auto* se_client =
tensorflow::down_cast<PjRtStreamExecutorClient*>(device_->client());
DCHECK(se_client);
TransferManager* transfer_manager =
se_client->client()->backend().transfer_manager();
std::shared_ptr<TrackedDeviceBuffer> buffer;
{
absl::MutexLock l(&mu_);
DCHECK_LT(buffer_index, buffer_ptrs_.size());
if (last_transfer_started_[buffer_index]) {
return InvalidArgument(
"TransferLiteralToBuffer requested for buffer index %d which has "
"already been fully transferred",
buffer_index);
}
last_transfer_started_[buffer_index] = true;
buffer = buffer_ptrs_[buffer_index];
DCHECK(buffer);
if (buffer->device_memory().empty()) {
return InvalidArgument(
"TransferLiteralToBuffer requested for buffer index %d which has "
"been donated. Async transfer of donated buffers is not supported "
"in SE:GPU",
buffer_index);
}
DCHECK_EQ(buffer->device_memory().size(), 1);
++transfers_in_flight_;
}
auto transfer_h2d = [this, buffer_index, stream, transfer_manager, literal,
device_buffer = buffer.get(),
local_device =
std::move(device_->local_device_state()),
on_done = std::move(on_done)]() mutable {
tsl::profiler::TraceMe traceme(
"AsyncHostToDeviceTransferManager::TransferLiteralToBuffer::transfer_"
"h2d");
auto event = local_device->event_pool().AllocateEvent(stream->parent());
ShapedBuffer buffer =
device_buffer->AsShapedBuffer(device_shapes_[buffer_index]);
TF_CHECK_OK(transfer_manager->TransferLiteralToDeviceAsync(
stream, literal, buffer));
local_device->event_pool().ThenRecordEvent(stream, event.value());
auto cleanup = [this, buffer_index, stream, on_done = std::move(on_done),
event = std::move(event).value()]() mutable {
CleanUp(buffer_index, std::move(event), stream,
true, std::move(on_done));
};
auto status = stream->DoHostCallback(std::move(cleanup));
if (!status.ok()) {
LOG(ERROR) << "DoHostCallback failed: " << status;
}
};
se_client->thread_pool()->Schedule(
([ptr = new absl::AnyInvocable<void()>(std::move(transfer_h2d))]() {
(*ptr)();
delete ptr;
}));
return absl::OkStatus();
}
absl::Status TransferRawDataToBuffer(
int buffer_index, absl::string_view data,
absl::AnyInvocable<void() &&> on_done) override {
return TransferRawDataToSubBuffer(buffer_index, data.data(),
0, data.size(),
true,
std::move(on_done));
}
absl::Status TransferRawDataToSubBuffer(
int buffer_index, const void* data, int64_t offset, int64_t transfer_size,
bool is_last_transfer, absl::AnyInvocable<void() &&> on_done) override {
auto* stream = device_->local_device_state()->host_to_device_stream();
auto* client =
tensorflow::down_cast<PjRtStreamExecutorClient*>(device_->client());
bool should_stage_host_to_device_transfers =
client->should_stage_host_to_device_transfers();
std::shared_ptr<void> staging_buffer;
if (should_stage_host_to_device_transfers) {
auto* host_memory_allocator = client->host_memory_allocator();
if (host_memory_allocator == nullptr) {
return InvalidArgument(
"host_memory_allocator should be initialized for staging buffer "
"transfer.");
}
void* ptr = host_memory_allocator->AllocateRaw(
tsl::Allocator::kAllocatorAlignment, transfer_size);
staging_buffer = std::shared_ptr<void>(
ptr, [host_memory_allocator = host_memory_allocator](void* ptr) {
host_memory_allocator->DeallocateRaw(ptr);
});
}
absl::ReleasableMutexLock l(&mu_);
DCHECK_LT(buffer_index, buffer_ptrs_.size());
if (last_transfer_started_[buffer_index]) {
return InvalidArgument(
"TransferRawData requested for buffer index %d which has "
"already been fully transferred",
buffer_index);
}
if (is_last_transfer) {
last_transfer_started_[buffer_index] = true;
}
DCHECK(buffer_ptrs_[buffer_index]);
if (buffer_ptrs_[buffer_index]->device_memory().empty()) {
return InvalidArgument(
"TransferRawDataToSubBuffer requested for buffer index %d which has "
"been donated. Async transfer of donated buffers is not supported "
"in SE:GPU",
buffer_index);
}
DCHECK_EQ(buffer_ptrs_[buffer_index]->device_memory().size(), 1);
auto& buffer_memory = buffer_ptrs_[buffer_index]->device_memory()[0];
se::DeviceMemoryBase sub_buffer;
CHECK_LE(offset, buffer_memory.size());
CHECK_LE(transfer_size, buffer_memory.size() - offset);
if (transfer_size < buffer_memory.size()) {
sub_buffer = buffer_memory.GetByteSlice(offset, transfer_size);
} else {
sub_buffer = buffer_memory;
}
++transfers_in_flight_;
l.Release();
auto event = device_->local_device_state()->event_pool().AllocateEvent(
stream->parent());
if (transfer_size != 0) {
if (staging_buffer != nullptr) {
auto copy_to_staging_buffer = [data, transfer_size,
staging_buffer]() mutable {
std::memcpy(staging_buffer.get(), data, transfer_size);
};
if (auto status =
stream->DoHostCallback(std::move(copy_to_staging_buffer));
!status.ok()) {
return status;
}
if (auto status = stream->Memcpy(&sub_buffer, staging_buffer.get(),
transfer_size);
!status.ok()) {
return status;
}
} else if (auto status = stream->Memcpy(&sub_buffer, data, transfer_size);
!status.ok()) {
return status;
}
}
device_->local_device_state()->event_pool().ThenRecordEvent(stream,
event.value());
auto cleanup = [this, buffer_index, event = std::move(event).value(),
stream, is_last_transfer, on_done = std::move(on_done),
staging_buffer = std::move(staging_buffer)]() mutable {
CleanUp(buffer_index, std::move(event), stream, is_last_transfer,
std::move(on_done));
};
return stream->DoHostCallback(std::move(cleanup));
}
void SetBufferError(int buffer_index, absl::Status error) override {
{
absl::MutexLock l(&mu_);
CHECK(!definition_events_[buffer_index]->IsDefined());
definition_events_[buffer_index]->SetDefinedStatus(error);
}
VLOG(1) << "SetBufferError sets the " << buffer_index
<< "th buffer error: " << error;
}
void AddTransferMetadata(const TransferMetadata& meta) override {}
private:
absl::Mutex mu_;
absl::InlinedVector<std::unique_ptr<PjRtBuffer>, 4> buffers_;
absl::InlinedVector<size_t, 4> buffer_sizes_;
absl::InlinedVector<std::shared_ptr<TrackedDeviceBuffer>, 4> buffer_ptrs_
ABSL_GUARDED_BY(mu_);
absl::InlinedVector<bool, 4> last_transfer_started_ ABSL_GUARDED_BY(mu_);
absl::InlinedVector<std::shared_ptr<BufferSequencingEvent>, 4>
definition_events_ ABSL_GUARDED_BY(mu_);
const absl::InlinedVector<Shape, 4> device_shapes_;
size_t remaining_buffer_count_ ABSL_GUARDED_BY(mu_);
int transfers_in_flight_ ABSL_GUARDED_BY(mu_);
PjRtStreamExecutorDevice* device_;
void CleanUp(int buffer_index, EventPool::Handle event, se::Stream* stream,
bool is_last_transfer, absl::AnyInvocable<void() &&> on_done) {
{
absl::MutexLock l(&mu_);
CHECK_GT(transfers_in_flight_, 0);
--transfers_in_flight_;
if (is_last_transfer) {
CHECK(buffer_ptrs_[buffer_index]);
buffer_ptrs_[buffer_index] = nullptr;
CHECK_GT(remaining_buffer_count_, 0);
--remaining_buffer_count_;
definition_events_[buffer_index]->SetSequencingEvent(std::move(event),
stream);
if (remaining_buffer_count_ == 0) {
VLOG(1) << "TransferLiteralToBuffer for all buffers is done.";
}
}
}
std::move(on_done)();
}
};
StreamExecutorGpuClient::StreamExecutorGpuClient(
std::string platform_name, LocalClient* client,
std::vector<std::unique_ptr<PjRtStreamExecutorDevice>> devices,
int process_index, std::unique_ptr<se::DeviceMemoryAllocator> allocator,
std::unique_ptr<tsl::Allocator> host_memory_allocator,
bool should_stage_host_to_device_transfers,
std::unique_ptr<gpu::GpuExecutableRunOptions> gpu_run_options,
std::shared_ptr<KeyValueStoreInterface> kv_store,
std::shared_ptr<const GpuTopology> gpu_topology)
: xla::PjRtStreamExecutorClient(
platform_name, client, std::move(devices), process_index,
std::move(allocator), std::move(host_memory_allocator),
should_stage_host_to_device_transfers, std::move(gpu_run_options)),
topology_(xla::StreamExecutorGpuTopologyDescription::Create(
tsl::Fingerprint64(platform_name), platform_name,
std::move(gpu_topology))),
kv_store_(std::move(kv_store)) {
for (auto* device : addressable_devices()) {
const int id = device->id();
auto memory_space =
std::make_unique<StreamExecutorGpuHbmMemorySpace>(id, device);
tensorflow::down_cast<PjRtStreamExecutorDevice*>(device)->AttachMemorySpace(
memory_space.get());
owned_memory_spaces_.push_back(std::move(memory_space));
const size_t basePinnedId = devices.size();
auto pinned = std::make_unique<PinnedHostMemorySpace>(basePinnedId, device);
tensorflow::down_cast<PjRtStreamExecutorDevice*>(device)->AttachMemorySpace(
pinned.get());
owned_memory_spaces_.push_back(std::move(pinned));
}
for (const std::unique_ptr<PjRtMemorySpace>& memory_space :
owned_memory_spaces_) {
memory_spaces_.push_back(memory_space.get());
}
absl::c_sort(memory_spaces_,
[](const PjRtMemorySpace* a, const PjRtMemorySpace* b) {
return a->id() < b->id();
});
}
absl::string_view StreamExecutorGpuClient::platform_version() const {
#define STRINGIFY2(X) #X
#define STRINGIFY(X) STRINGIFY2(X)
#if TENSORFLOW_USE_ROCM && defined(TF_ROCM_VERSION)
return "rocm " STRINGIFY(TF_ROCM_VERSION);
#elif GOOGLE_CUDA && defined(CUDART_VERSION)
return "cuda " STRINGIFY(CUDART_VERSION);
#else
return "<unknown>";
#endif
}
absl::StatusOr<std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager>>
StreamExecutorGpuClient::CreateBuffersForAsyncHostToDevice(
absl::Span<const PjRtClient::ShapeSpec> shape_specs,
std::optional<absl::Span<const Layout>> device_layouts,
PjRtDevice* device) {
auto* stream_executor_device =
tensorflow::down_cast<PjRtStreamExecutorDevice*>(device);
return xla::AsyncHostToDeviceTransferManager::Create(
shape_specs, std::move(device_layouts), stream_executor_device, this,
nullptr);
}
absl::StatusOr<std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager>>
StreamExecutorGpuClient::CreateBuffersForAsyncHostToDevice(
absl::Span<const Shape> shapes, PjRtDevice* device) {
absl::InlinedVector<PjRtClient::ShapeSpec, 4> shape_specs;
shape_specs.reserve(shapes.size());
for (const auto& shape : shapes) {
shape_specs.emplace_back(PjRtClient::ShapeSpec{
shape.element_type(),
DimensionVector(shape.dimensions().begin(), shape.dimensions().end())});
}
return CreateBuffersForAsyncHostToDevice(
shape_specs, std::nullopt, device);
}
absl::StatusOr<std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager>>
StreamExecutorGpuClient::CreateBuffersForAsyncHostToDevice(
absl::Span<const PjRtClient::ShapeSpec> shape_specs,
std::optional<absl::Span<const Layout>> device_layouts,
PjRtMemorySpace* memory_space) {
CHECK_EQ(memory_space->devices().size(), 1);
PjRtDevice* device = memory_space->devices()[0];
auto* stream_executor_device =
tensorflow::down_cast<PjRtStreamExecutorDevice*>(device);
return xla::AsyncHostToDeviceTransferManager::Create(
shape_specs, std::move(device_layouts), stream_executor_device, this,
memory_space);
}
absl::StatusOr<std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager>>
StreamExecutorGpuClient::CreateBuffersForAsyncHostToDevice(
absl::Span<const Shape> shapes, PjRtMemorySpace* memory_space) {
absl::InlinedVector<PjRtClient::ShapeSpec, 4> shape_specs;
shape_specs.reserve(shapes.size());
for (const auto& shape : shapes) {
shape_specs.emplace_back(PjRtClient::ShapeSpec{
shape.element_type(),
DimensionVector(shape.dimensions().begin(), shape.dimensions().end())});
}
return CreateBuffersForAsyncHostToDevice(
shape_specs, std::nullopt, memory_space);
}
absl::StatusOr<xla::DeviceAssignment>
StreamExecutorGpuClient::GetDefaultDeviceAssignment(int num_replicas,
int num_partitions) const {
if (num_partitions == 1 && num_replicas <= addressable_devices().size()) {
xla::DeviceAssignment assignment(num_replicas, 1);
for (int i = 0; i < num_replicas; ++i) {
assignment(i, 0) = addressable_devices().at(i)->id();
}
return assignment;
}
return PjRtStreamExecutorClient::GetDefaultDeviceAssignment(num_replicas,
num_partitions);
}
PjRtFuture<> StreamExecutorGpuClient::CopyRawSubBufferToHost(
PjRtBuffer* pjrt_buffer, PjRtFuture<void*> dst, int64_t offset,
int64_t transfer_size) {
auto* buffer = tensorflow::down_cast<PjRtStreamExecutorBuffer*>(pjrt_buffer);
DCHECK(buffer);
PjRtStreamExecutorDevice* device = buffer->device();
LocalDeviceState* local_device = device->local_device_state();
se::Stream* stream = local_device->GetDeviceToHostStream();
PjRtStreamExecutorBuffer::ScopedHold hold(buffer->GetBufferWithUsageHold());
if (!hold.ok()) {
return PjRtFuture<>(hold.status());
}
auto device_buffer = hold.buffer();
if (device_buffer->device_memory().size() != 1) {
return PjRtFuture<>(InvalidArgument("Copy raw buffer called on tuple"));
}
auto promise = PjRtFuture<>::CreatePromise();
auto usage_event =
std::make_shared<BufferSequencingEvent>(this->thread_pool());
hold.ConvertUsageHold(stream, usage_event, true);
auto async_copy = [this, promise, offset, transfer_size, stream, local_device,
device_buffer, usage_event = std::move(usage_event)](
absl::StatusOr<void*> dst) mutable {
absl::StatusOr<EventPool::Handle> event =
local_device->event_pool().AllocateEvent(stream->parent());
if (!event.ok()) {
promise.Set(event.status());
return;
}
absl::Status defined_status =
device_buffer->definition_events()[0]->GetDefinedStatus();
if (!defined_status.ok()) {
promise.Set(defined_status);
return;
}
auto& device_memory = device_buffer->device_memory()[0];
if (offset < 0 || offset > device_memory.size() ||
device_memory.size() - offset < transfer_size) {
promise.Set(
InvalidArgument("Copy raw buffer called on buffer size %lld with "
"invalid offset %lld, transfer size %lld",
device_memory.size(), offset, transfer_size));
return;
}
std::unique_ptr<se::DeviceMemoryBase> sub_buffer;
if (transfer_size < device_memory.size()) {
sub_buffer = std::make_unique<se::DeviceMemoryBase>(
device_memory.GetByteSlice(offset, transfer_size));
} else {
sub_buffer = std::make_unique<se::DeviceMemoryBase>(device_memory);
}
WaitForBufferDefinitionEventsOnStream(*device_buffer, stream);
if (transfer_size != 0) {
if (should_stage_host_to_device_transfers()) {
if (host_memory_allocator() == nullptr) {
promise.Set(InvalidArgument(
"host_memory_allocator should be initialized for staging buffer "
"transfer."));
return;
}
void* ptr = host_memory_allocator()->AllocateRaw(
tsl::Allocator::kAllocatorAlignment, transfer_size);
std::shared_ptr<void> staging_buffer = std::shared_ptr<void>(
ptr, [host_memory_allocator = host_memory_allocator()](void* ptr) {
host_memory_allocator->DeallocateRaw(ptr);
});
if (auto status = stream->Memcpy(staging_buffer.get(), *sub_buffer,
transfer_size);
!status.ok()) {
promise.Set(std::move(status));
return;
}
auto copy_to_staging_buffer = [dst, transfer_size,
staging_buffer]() mutable {
std::memcpy(*dst, staging_buffer.get(), transfer_size);
};
if (auto status = stream->DoHostCallback(copy_to_staging_buffer);
!status.ok()) {
promise.Set(std::move(status));
return;
}
} else {
auto status = stream->Memcpy(*dst, *sub_buffer, transfer_size);
if (!status.ok()) {
promise.Set(std::move(status));
return;
}
}
}
local_device->event_pool().ThenRecordEvent(stream, event.value());
usage_event->SetSequencingEvent(std::move(event).value(), stream);
auto callback_status = local_device->ThenExecuteCallback(
stream, [promise, device_buffer = std::move(device_buffer)]() mutable {
promise.Set();
});
if (!callback_status.ok()) {
promise.Set(std::move(callback_status));
return;
}
};
device_buffer->definition_events()[0]->ExecuteOrAddToFutureTasks(
absl::StrFormat("async_copy_raw_sub_buffer_to_host_%p", &async_copy),
[this, dst, async_copy = std::move(async_copy)]() mutable {
dst.OnReady([this, async_copy = std::move(async_copy)](
absl::StatusOr<void*> dst) {
thread_pool()->Schedule(absl::bind_front(async_copy, std::move(dst)));
});
});
return PjRtFuture<>(
std::move(promise),
[]() {
tsl::profiler::TraceMeProducer traceme(
"StreamExecutorGpuClient::CopyRawSubBufferToHost");
VLOG(1) << "StreamExecutorGpuClient::CopyRawSubBufferToHost";
return PjRtFutureHelpers::ProfilingKeys(
{traceme.GetContextId()});
},
[](PjRtFutureHelpers::ProfilingKeys keys) {
tsl::profiler::TraceMeConsumer traceme(
"StreamExecutorGpuClient::CopyRawSubBufferToHost",
keys.traceme_context_id);
});
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
StreamExecutorGpuClient::Compile(const XlaComputation& computation,
CompileOptions options) {
options.executable_build_options.set_key_value_store(kv_store_);
auto executable = PjRtStreamExecutorClient::Compile(computation, options);
#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM)
for (const PjRtDevice* device : addressable_devices()) {
LocalDeviceState* local_device_state =
tensorflow::down_cast<const PjRtStreamExecutorDevice*>(device)
->local_device_state();
int64_t free_memory, total_memory;
if (local_device_state != nullptr) {
se::StreamExecutor* executor = local_device_state->executor();
int device_ordinal = executor->device_ordinal();
if (executor->DeviceMemoryUsage(&free_memory, &total_memory)) {
gpu_metrics::RecordFreeGpuSystemMemory(device_ordinal, free_memory);
} else {
LOG(ERROR) << "Failed to query available memory for GPU "
<< device_ordinal;
}
}
}
#endif
return executable;
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
StreamExecutorGpuClient::LoadSerialized(absl::string_view serialized,
std::optional<CompileOptions> options,
const LoadOptions& load_options) {
return PjRtStreamExecutorClient::DeserializeExecutable(serialized, options);
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
StreamExecutorGpuClient::Load(std::unique_ptr<PjRtExecutable> executable) {
auto se_executable = absl::WrapUnique(
tensorflow::down_cast<StreamExecutorExecutable*>(executable.release()));
CompileOptions compile_options = se_executable->compile_options();
CompileOptions input_options = compile_options;
TF_RETURN_IF_ERROR(compile_options.ApplyAllOptionOverrides());
TF_ASSIGN_OR_RETURN(ExecutableExtras extras,
GetExecutableExtras(&compile_options));
std::vector<std::unique_ptr<LocalExecutable>> local_executables;
local_executables.reserve(se_executable->aot_executables().size());
for (std::unique_ptr<xla::AotCompilationResult>& aot_executable :
se_executable->aot_executables()) {
TF_ASSIGN_OR_RETURN(std::string serialized,
aot_executable->SerializeAsString());
TF_ASSIGN_OR_RETURN(
std::unique_ptr<LocalExecutable> local_executable,
client()->Load(serialized, compile_options.executable_build_options));
local_executables.push_back(std::move(local_executable));
}
bool parameter_is_tupled_arguments =
compile_options.parameter_is_tupled_arguments;
auto ret = std::make_unique<PjRtStreamExecutorLoadedExecutable>(
std::move(local_executables), parameter_is_tupled_arguments,
std::move(extras.device_assignment), std::move(input_options),
std::move(extras.addressable_device_logical_ids),
std::move(extras.addressable_devices), this);
TF_RETURN_IF_ERROR(ret->SetUpDonation(parameter_is_tupled_arguments));
return std::unique_ptr<PjRtLoadedExecutable>(std::move(ret));
}
namespace {
#if defined(GOOGLE_CUDA) && CUDA_VERSION >= 11020
absl::StatusOr<std::unique_ptr<se::GpuCudaMallocAsyncAllocator>>
CreateCudaAsyncAllocator(const LocalDeviceState& device, double memory_fraction,
bool reserve_memory, bool create_new_pool,
bool sync_mode, bool compute_stats = true) {
se::StreamExecutor* executor = device.executor();
int device_ordinal = executor->device_ordinal();
int64_t free_memory;
int64_t total_memory;
if (!executor->DeviceMemoryUsage(&free_memory, &total_memory)) {
return Unavailable("Failed to query available memory from device %i",
device_ordinal);
}
size_t allocator_memory = total_memory * memory_fraction;
if (reserve_memory) {
LOG(INFO) << "XLA backend allocating " << allocator_memory
<< " bytes on device " << device_ordinal
<< " for CudaAsyncAllocator.";
} else {
LOG(INFO) << "XLA backend will use up to " << allocator_memory
<< " bytes on device " << device_ordinal
<< " for CudaAsyncAllocator.";
}
auto allocator = std::make_unique<se::GpuCudaMallocAsyncAllocator>(
tsl::PlatformDeviceId(device_ordinal),
create_new_pool,
allocator_memory,
reserve_memory,
reserve_memory ? allocator_memory : 0,
sync_mode,
compute_stats);
allocator->SetStreamAndPreallocateMemory(
device.compute_stream()->platform_specific_handle().stream);
return allocator;
}
#else
absl::StatusOr<std::unique_ptr<tsl::Allocator>> CreateCudaAsyncAllocator(
const LocalDeviceState& device, double memory_fraction, bool reserve_memory,
bool create_new_pool, bool sync_mode, bool compute_stats = true) {
return FailedPrecondition("CUDA async allocator requires CUDA >= 11.2");
}
#endif
absl::StatusOr<std::map<int, std::unique_ptr<LocalDeviceState>>>
BuildLocalDeviceStates(LocalClient* xla_client) {
std::map<int, std::unique_ptr<LocalDeviceState>> addressable_devices;
for (se::StreamExecutor* executor :
xla_client->backend().stream_executors()) {
addressable_devices.emplace(
executor->device_ordinal(),
std::make_unique<LocalDeviceState>(
executor, xla_client, LocalDeviceState::kComputeSynchronized,
32,
true, true));
}
return std::move(addressable_devices);
}
absl::StatusOr<std::unique_ptr<se::DeviceMemoryAllocator>>
GetStreamExecutorGpuDeviceAllocator(
se::Platform* platform, const GpuAllocatorConfig& allocator_config,
const std::map<int, std::unique_ptr<LocalDeviceState>>&
addressable_devices) {
std::vector<se::MultiDeviceAdapter::AllocatorInfo> allocators;
switch (allocator_config.kind) {
case GpuAllocatorConfig::Kind::kCudaAsync: {
for (const auto& ordinal_and_device : addressable_devices) {
TF_ASSIGN_OR_RETURN(
auto async_allocator,
CreateCudaAsyncAllocator(
*(ordinal_and_device.second), allocator_config.memory_fraction,
allocator_config.preallocate, false, false, true));
allocators.emplace_back(std::move(async_allocator),
ordinal_and_device.second->compute_stream(),
0);
}
break;
}
case GpuAllocatorConfig::Kind::kDefault:
case GpuAllocatorConfig::Kind::kBFC: {
LOG(INFO) << "Using BFC allocator.";
for (const auto& ordinal_and_device : addressable_devices) {
TF_ASSIGN_OR_RETURN(
auto bfc_allocator,
CreateBFCAllocator(ordinal_and_device.second->executor(),
allocator_config.memory_fraction,
allocator_config.preallocate,
allocator_config.gpu_system_memory_size));
allocators.emplace_back(std::move(bfc_allocator),
ordinal_and_device.second->compute_stream(),
0);
}
break;
}
case GpuAllocatorConfig::Kind::kPlatform:
LOG(INFO) << "Using platform allocator.";
if (allocator_config.collective_memory_size != 0) {
LOG(WARNING)
<< "collective_memory_size is non-zero, but allocator kind is set "
"to \"platform\". Collective memory will not be allocated.";
}
return nullptr;
}
for (const auto& ordinal_and_device : addressable_devices) {
TF_ASSIGN_OR_RETURN(
auto collective_bfc_allocator,
CreateCollectiveBFCAllocator(
ordinal_and_device.second->executor(),
1.0 - allocator_config.memory_fraction,
allocator_config.collective_memory_size));
allocators.emplace_back(std::move(collective_bfc_allocator),
ordinal_and_device.second->compute_stream(),
1);
}
for (const auto& ordinal_and_device : addressable_devices) {
auto host_allocator =
GetGpuHostAllocator(ordinal_and_device.second->executor());
allocators.emplace_back(std::move(host_allocator),
ordinal_and_device.second->compute_stream(),
static_cast<int>(se::MemoryType::kHost));
}
#if defined(GOOGLE_CUDA) && CUDA_VERSION >= 11020
const auto& debug_options = xla::GetDebugOptionsFromFlags();
if (debug_options.xla_gpu_temp_buffer_use_separate_color()) {
for (const auto& ordinal_and_device : addressable_devices) {
TF_ASSIGN_OR_RETURN(
auto async_allocator,
CreateCudaAsyncAllocator(*(ordinal_and_device.second), 1.0, false,
true, true, true));
allocators.emplace_back(
std::move(async_allocator),
ordinal_and_device.second->compute_stream(),
gpu::kTempBufferMemorySpaceColor);
}
}
#endif
return std::make_unique<se::MultiDeviceAdapter>(platform,
std::move(allocators));
}
void NameDeviceAndLauncherThread(const LocalTopologyProto& node,
const DeviceProto& device_proto,
WorkerThread* launcher_thread) {
auto suffix = absl::StrFormat(":#global=%d,local=%d,process=%d,slice=%d#",
device_proto.global_device_id(),
device_proto.local_device_ordinal(),
node.node_id(), device_proto.slice_index());
tsl::profiler::NameDevice(device_proto.local_device_ordinal(),
absl::StrCat("Xla", suffix));
launcher_thread->Schedule([name = absl::StrCat("XlaLauncher", suffix)] {
tsl::profiler::NameCurrentThread(name);
});
}
}
absl::StatusOr<DeviceTopologyPair> BuildDistributedDevices(
std::string_view platform_name,
std::map<int, std::unique_ptr<LocalDeviceState>> local_device_states,
int node_id, int num_nodes,
gpu::GpuExecutableRunOptions* gpu_executable_run_options,
std::shared_ptr<KeyValueStoreInterface> kv_store, bool enable_mock_nccl,
absl::Duration get_local_topology_timeout,
absl::Duration get_global_topology_timeout) {
std::vector<std::unique_ptr<PjRtStreamExecutorDevice>> devices;
LocalTopologyProto local_topology;
local_topology.set_node_id(node_id);
std::string boot_id_str;
auto boot_id_str_or_status = GetBootIdString();
if (!boot_id_str_or_status.ok()) {
LOG(INFO) << boot_id_str_or_status.status();
} else {
boot_id_str = boot_id_str_or_status.value();
}
local_topology.set_boot_id(boot_id_str);
for (const auto& ordinal_and_device : local_device_states) {
const se::Platform* platform =
ordinal_and_device.second->executor()->GetPlatform();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<xla::se::DeviceDescription> desc,
platform->DescriptionForDevice(
ordinal_and_device.second->local_hardware_id().value()));
DeviceProto* device_proto = local_topology.add_devices();
device_proto->set_local_device_ordinal(ordinal_and_device.first);
device_proto->set_name(desc->name());
device_proto->set_vendor(desc->device_vendor());
device_proto->set_compute_capability(
MakeComputeCapabilityString(desc.get()));
device_proto->set_core_count(desc->core_count());
}
GlobalTopologyProto global_topology;
if (enable_mock_nccl) {
std::vector<LocalTopologyProto> local_topologies(num_nodes, local_topology);
for (int i = 0; i < num_nodes; ++i) {
local_topologies[i].set_node_id(i);
local_topologies[i].set_boot_id(absl::StrCat(i));
}
global_topology = BuildGlobalTopology(absl::MakeSpan(local_topologies),
true);
} else {
TF_RETURN_IF_ERROR(ExchangeTopologies(
platform_name, node_id, num_nodes, get_local_topology_timeout,
get_global_topology_timeout, kv_store.get(), local_topology,
&global_topology, true));
}
std::map<int, GlobalDeviceId> gpu_device_ids;
absl::flat_hash_map<GlobalDeviceId, int> device_to_node;
for (const LocalTopologyProto& node : global_topology.nodes()) {
for (const DeviceProto& device_proto : node.devices()) {
GlobalDeviceId global_device_id(device_proto.global_device_id());
device_to_node[global_device_id] = node.node_id();
std::unique_ptr<LocalDeviceState> local_device;
if (node.node_id() == node_id) {
auto it = local_device_states.find(device_proto.local_device_ordinal());
TF_RET_CHECK(it != local_device_states.end())
<< device_proto.local_device_ordinal();
TF_RET_CHECK(it->second != nullptr);
local_device = std::move(it->second);
gpu_device_ids[device_proto.local_device_ordinal()] = global_device_id;
NameDeviceAndLauncherThread(node, device_proto,
local_device->execute_thread());
}
auto device = std::make_unique<StreamExecutorGpuDevice>(
device_proto.global_device_id(), std::move(local_device),
device_proto.name(), device_proto.vendor(),
device_proto.compute_capability(), device_proto.core_count(),
node.node_id(), device_proto.slice_index());
devices.push_back(std::move(device));
}
}
for (const auto& device : local_device_states) {
TF_RET_CHECK(device.second == nullptr);
}
gpu_executable_run_options->set_gpu_global_device_ids(
std::move(gpu_device_ids));
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
if (num_nodes > 1) {
auto nccl_id_store = std::make_shared<NcclIdStore>(node_id, device_to_node,
std::move(kv_store));
gpu_executable_run_options->set_nccl_clique_id_callback(
[nccl_id_store](const gpu::NcclCliqueKey& key) {
return nccl_id_store->GetNcclUniqueId(key);
});
}
#endif
TF_ASSIGN_OR_RETURN(GpuTopologyProto gpu_topology,
BuildGpuTopology(global_topology));
return std::make_pair(std::move(devices), gpu_topology);
}
std::string MakeComputeCapabilityString(const se::DeviceDescription* desc) {
se::GpuComputeCapability cc = desc->gpu_compute_capability();
if (std::holds_alternative<se::CudaComputeCapability>(cc)) {
auto nvcc = std::get<se::CudaComputeCapability>(cc);
return absl::StrCat(nvcc.major, ".", nvcc.minor);
} else if (std::holds_alternative<se::RocmComputeCapability>(cc)) {
auto rocmcc = std::get<se::RocmComputeCapability>(cc);
return rocmcc.gfx_version();
} else {
return "unknown";
}
}
StreamExecutorGpuDevice::StreamExecutorGpuDevice(
int id, std::unique_ptr<LocalDeviceState> local_device_state,
std::string device_kind, std::string device_vendor,
std::string compute_capability, int core_count, int node_id,
int slice_index)
: PjRtStreamExecutorDevice(id, std::move(local_device_state),
std::move(device_kind), node_id),
device_vendor_(std::move(device_vendor)),
slice_index_(slice_index) {
std::array<int, 1> coords = {local_device_id().value()};
description().SetCoords(coords);
std::vector<int64_t> v_coords(description().coords().begin(),
description().coords().end());
description().SetAttributes(
{{"coords", xla::PjRtDeviceAttribute(v_coords)},
{"device_vendor", device_vendor_},
{"slice_index", static_cast<int64_t>(slice_index)},
{"compute_capability", xla::PjRtDeviceAttribute(compute_capability)},
{"core_count", static_cast<int64_t>(core_count)}});
description().SetToString(absl::StrFormat(
"StreamExecutorGpuDevice(device_kind=%s, id=%i, process_index=%i, "
"slice_index=%i))",
description().device_kind(), id, process_index(), slice_index));
description().SetDebugString(absl::StrFormat("%s_%i(process=%i,(%i))",
description().device_kind(), id,
process_index(), v_coords[0]));
}
int StreamExecutorGpuDevice::slice_index() const { return slice_index_; }
absl::string_view StreamExecutorGpuDevice::device_vendor() const {
return device_vendor_;
}
absl::StatusOr<tsl::AllocatorStats> StreamExecutorGpuDevice::GetAllocatorStats()
const {
if (!IsAddressable()) {
return FailedPrecondition(
"GetAllocatorStats() is allowed only for addressable devices");
}
auto* allocator_adapter = dynamic_cast<se::MultiDeviceAdapter*>(
tensorflow::down_cast<PjRtStreamExecutorClient*>(client())->allocator());
if (!allocator_adapter) {
return Unimplemented(
"GetAllocatorStats() is only implemented with MultiDeviceAdapter "
"allocator");
}
TF_ASSIGN_OR_RETURN(auto allocator, allocator_adapter->GetAllocator(
local_device_id().value()));
auto stats = allocator->GetStats();
TF_RET_CHECK(stats.has_value());
return stats.value();
}
absl::Span<int const> StreamExecutorGpuDevice::coords() const {
return description().coords();
}
absl::StatusOr<PjRtMemorySpace*> StreamExecutorGpuDevice::default_memory_space()
const {
return memory_space_by_kind_id(StreamExecutorGpuHbmMemorySpace::kKindId);
}
const int StreamExecutorGpuHbmMemorySpace::kKindId = []() {
uint32_t kind_id = tsl::Fingerprint32(StreamExecutorGpuHbmMemorySpace::kKind);
return static_cast<int>(kind_id);
}();
StreamExecutorGpuHbmMemorySpace::StreamExecutorGpuHbmMemorySpace(
int id, PjRtDevice* device)
: PjRtStreamExecutorMemorySpace(id, device, kKind, kKindId) {}
absl::StatusOr<std::unique_ptr<PjRtClient>> GetStreamExecutorGpuClient(
const GpuClientOptions& options) {
#if TENSORFLOW_USE_ROCM
auto pjrt_platform_name = xla::RocmName();
#elif TENSORFLOW_USE_SYCL
auto pjrt_platform_name = xla::SyclName();
#else
auto pjrt_platform_name = xla::CudaName();
#endif
TF_ASSIGN_OR_RETURN(
LocalClient * xla_client,
GetGpuXlaClient(options.platform_name, options.allowed_devices));
std::map<int, std::unique_ptr<LocalDeviceState>> local_device_states;
TF_ASSIGN_OR_RETURN(local_device_states, BuildLocalDeviceStates(xla_client));
EnablePeerAccess(xla_client->backend().stream_executors());
TF_ASSIGN_OR_RETURN(auto allocator,
GetStreamExecutorGpuDeviceAllocator(
xla_client->platform(), options.allocator_config,
local_device_states));
auto host_memory_allocator =
GetGpuHostAllocator(local_device_states.begin()->second->executor());
auto gpu_run_options = std::make_unique<gpu::GpuExecutableRunOptions>();
if (options.enable_mock_nccl) {
gpu_run_options->set_enable_mock_nccl_collectives();
}
std::shared_ptr<KeyValueStoreInterface> kv_store = options.kv_store;
if (options.enable_mock_nccl) {
kv_store = std::make_shared<InMemoryKeyValueStore>();
}
TF_RET_CHECK(options.num_nodes == 1 || kv_store != nullptr);
TF_ASSIGN_OR_RETURN(
DeviceTopologyPair device_topology_pair,
BuildDistributedDevices(pjrt_platform_name,
std::move(local_device_states), options.node_id,
options.num_nodes, gpu_run_options.get(),
kv_store, options.enable_mock_nccl));
auto gpu_topology = std::shared_ptr<const GpuTopology>(
GpuTopology::FromProto(device_topology_pair.second));
return std::unique_ptr<PjRtClient>(std::make_unique<StreamExecutorGpuClient>(
pjrt_platform_name, xla_client, std::move(device_topology_pair.first),
options.node_id, std::move(allocator), std::move(host_memory_allocator),
options.should_stage_host_to_device_transfers, std::move(gpu_run_options),
std::move(kv_store), std::move(gpu_topology)));
}
absl::StatusOr<std::string> StreamExecutorGpuTopologyDescription::Serialize()
const {
std::string result;
if (!tsl::SerializeToStringDeterministic(gpu_topology_->ToProto(), &result)) {
return absl::InternalError("Failed to serialize gpu_topology");
}
return result;
}
absl::StatusOr<Layout> StreamExecutorGpuTopologyDescription::GetDefaultLayout(
PrimitiveType element_type, absl::Span<const int64_t> dims) const {
Shape shape = ShapeUtil::MakeShape(element_type, dims);
return LayoutUtil::GetWithDefaultLayout(shape).layout();
}
std::vector<std::unique_ptr<PjRtStreamExecutorDevice>> BuildLocalDevices(
std::map<int, std::unique_ptr<LocalDeviceState>> local_device_states,
int node_id) {
std::vector<std::unique_ptr<PjRtStreamExecutorDevice>> devices;
for (auto& ordinal_and_device : local_device_states) {
const se::DeviceDescription& desc =
ordinal_and_device.second->executor()->GetDeviceDescription();
auto device = std::make_unique<StreamExecutorGpuDevice>(
ordinal_and_device.first, std::move(ordinal_and_device.second),
desc.name(), desc.device_vendor(), MakeComputeCapabilityString(&desc),
desc.core_count(), node_id);
devices.push_back(std::move(device));
}
return devices;
}
} | #include "xla/pjrt/gpu/se_gpu_pjrt_client.h"
#include <stdlib.h>
#include <array>
#include <cstdint>
#include <cstring>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/client/xla_computation.h"
#include "xla/ffi/ffi.h"
#include "xla/ffi/ffi_api.h"
#include "xla/layout.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/pjrt/distributed/in_memory_key_value_store.h"
#include "xla/pjrt/gpu/gpu_topology.h"
#include "xla/pjrt/host_memory_spaces.h"
#include "xla/pjrt/mlir_to_hlo.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/pjrt/pjrt_stream_executor_client.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/platform_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream.h"
#include "xla/test.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Eq;
using ::testing::FloatEq;
using ::testing::Ge;
using ::testing::Gt;
using ::testing::HasSubstr;
using ::testing::SizeIs;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
absl::StatusOr<std::unique_ptr<xla::PjRtLoadedExecutable>> CompileExecutable(
absl::string_view program, xla::PjRtClient& client,
xla::CompileOptions compile_options = xla::CompileOptions()) {
TF_ASSIGN_OR_RETURN(auto hlo_module,
ParseAndReturnUnverifiedModule(program, {}));
xla::XlaComputation xla_computation(hlo_module->ToProto());
return client.Compile(xla_computation, compile_options);
}
absl::StatusOr<std::shared_ptr<xla::Literal>> ExtractSingleResult(
absl::StatusOr<std::vector<std::vector<std::unique_ptr<xla::PjRtBuffer>>>>&
result) {
TF_RETURN_IF_ERROR(result.status());
TF_RET_CHECK(result->size() == 1);
std::vector<std::unique_ptr<xla::PjRtBuffer>>& result_buffers = (*result)[0];
TF_RET_CHECK(result_buffers.size() == 1);
auto literal_or = result_buffers[0]->ToLiteralSync();
if (!literal_or.status().ok()) return literal_or.status();
return *literal_or;
}
static constexpr char const* kProgram = R"(HloModule HostTransfer
ENTRY SendRecvSynchronous() -> f32[2] {
in_chain = token[] after-all()
data = f32[2] constant({2, 3})
send = (f32[2], u32[], token[]) send(data, in_chain),
channel_id=1,
is_host_transfer=true,
frontend_attributes={
_xla_host_transfer_handler_name="undef",
_xla_host_transfer_rendezvous="undef"
}
send-done = token[] send-done(send),
channel_id=1, is_host_transfer=true
recv = (f32[2], u32[], token[]) recv(send-done),
channel_id=2,
is_host_transfer=true,
frontend_attributes={
_xla_host_transfer_handler_name="undef",
_xla_host_transfer_rendezvous="undef"
}
recv-done = (f32[2], token[]) recv-done(recv),
channel_id=2, is_host_transfer=true
ROOT result = f32[2] get-tuple-element(recv-done), index=0
})";
TEST(StreamExecutorGpuClientTest, MemorySpace) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->devices().size(), 1);
for (auto* device : client->devices()) {
TF_ASSERT_OK_AND_ASSIGN(auto* memory_space, device->default_memory_space());
EXPECT_EQ(memory_space->kind(), StreamExecutorGpuHbmMemorySpace::kKind);
EXPECT_EQ(memory_space->kind_id(),
StreamExecutorGpuHbmMemorySpace::kKindId);
EXPECT_THAT(
device->memory_space_by_kind(StreamExecutorGpuHbmMemorySpace::kKind),
IsOkAndHolds(memory_space));
EXPECT_EQ(device->memory_spaces().size(), 2);
auto* pinned = device->memory_spaces()[1];
EXPECT_EQ(pinned->kind_id(), PinnedHostMemorySpace::kKindId);
EXPECT_THAT(device->memory_space_by_kind(PinnedHostMemorySpace::kKind),
IsOkAndHolds(pinned));
}
}
TEST(StreamExecutorGpuClientTest, PropagateError) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
auto shape = xla::ShapeUtil::MakeScalarShape(xla::F32);
absl::Status input_error = absl::InvalidArgumentError("input error");
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->CreateErrorBuffer(
input_error, shape,
*client->addressable_devices()[0]->default_memory_space()));
static constexpr char const* kAddProgram =
R"(
HloModule Add.6, entry_computation_layout={(f32[], f32[])->(f32[], f32[])}
ENTRY %Add.6 (a.1: f32[], b.2: f32[]) -> (f32[], f32[]) {
%a.1 = f32[] parameter(0)
%b.2 = f32[] parameter(1)
%add.3 = f32[] add(f32[] %a.1, f32[] %b.2)
%add.4 = f32[] add(f32[] %add.3, f32[] %add.3)
ROOT %tuple.5 = (f32[], f32[]) tuple(f32[] %add.3, f32[] %add.4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto executable,
CompileExecutable(kAddProgram, *client));
TF_ASSERT_OK_AND_ASSIGN(
auto result,
executable->Execute({{buffer.get(), buffer.get()}}, {}));
ASSERT_EQ(result.size(), 1);
ASSERT_EQ(result[0].size(), 1);
EXPECT_EQ(result[0][0]->GetReadyFuture().Await(), input_error);
}
TEST(StreamExecutorGpuClientTest, SendRecvChunked) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto executable,
CompileExecutable(kProgram, *client));
std::array<float, 2> sent_value = {0.0f, 0.0f};
SendCallback send_callback = {
1, [&](const PjRtTransferMetadata& m, PjRtChunk chunk,
int64_t total_size_in_bytes, bool done) {
float* data = reinterpret_cast<float*>(chunk.data());
sent_value[0] = data[0];
sent_value[1] = data[1];
return absl::OkStatus();
}};
RecvCallback recv_callback = {
2, [&](const PjRtTransferMetadata& m,
std::unique_ptr<CopyToDeviceStream> stream) {
auto chunk0 = PjRtChunk::AllocateDefault(sizeof(float));
*reinterpret_cast<float*>(chunk0.data()) = 5.0f;
TF_CHECK_OK(stream->AddChunk(std::move(chunk0)).Await());
auto chunk1 = PjRtChunk::AllocateDefault(sizeof(float));
*reinterpret_cast<float*>(chunk1.data()) = 6.0f;
TF_CHECK_OK(stream->AddChunk(std::move(chunk1)).Await());
return absl::OkStatus();
}};
std::vector<std::vector<SendCallback>> send_callbacks = {{send_callback}};
std::vector<std::vector<RecvCallback>> recv_callbacks = {{recv_callback}};
ExecuteOptions opts;
opts.send_callbacks = send_callbacks;
opts.recv_callbacks = recv_callbacks;
auto result = executable->Execute({{}}, opts);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::Literal> result_literal,
ExtractSingleResult(result));
EXPECT_EQ(sent_value[0], 2.0f);
EXPECT_EQ(sent_value[1], 3.0f);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<float>({5.0f, 6.0f}),
*result_literal));
}
TEST(StreamExecutorGpuClientTest, SendErrorNoDeadLock) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto executable,
CompileExecutable(kProgram, *client));
SendCallback send_callback = {
1,
[&](const PjRtTransferMetadata&, PjRtChunk, int64_t, bool) {
return Internal("Uh-oh, can send chunk to host");
}};
RecvCallback recv_callback = {
2, [&](const PjRtTransferMetadata& m,
std::unique_ptr<CopyToDeviceStream> stream) {
return absl::OkStatus();
}};
std::vector<std::vector<SendCallback>> send_callbacks = {{send_callback}};
std::vector<std::vector<RecvCallback>> recv_callbacks = {{recv_callback}};
ExecuteOptions opts;
opts.send_callbacks = send_callbacks;
opts.recv_callbacks = recv_callbacks;
auto result = executable->Execute({{}}, opts);
EXPECT_TRUE(absl::StrContains(result.status().message(),
"Uh-oh, can send chunk to host"));
}
TEST(StreamExecutorGpuClientTest, RecvErrorNoDeadLock) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto executable,
CompileExecutable(kProgram, *client));
SendCallback send_callback = {
1, [&](const PjRtTransferMetadata&, PjRtChunk, int64_t,
bool) { return absl::OkStatus(); }};
RecvCallback recv_callback = {
2, [&](const PjRtTransferMetadata& m,
std::unique_ptr<CopyToDeviceStream> stream) {
auto chunk = PjRtChunk::AllocateDefault(10 * sizeof(float));
stream->AddChunk(std::move(chunk)).Await().IgnoreError();
return absl::OkStatus();
}};
std::vector<std::vector<SendCallback>> send_callbacks = {{send_callback}};
std::vector<std::vector<RecvCallback>> recv_callbacks = {{recv_callback}};
ExecuteOptions opts;
opts.send_callbacks = send_callbacks;
opts.recv_callbacks = recv_callbacks;
auto result = executable->Execute({{}}, opts);
EXPECT_TRUE(absl::StrContains(result.status().message(),
"Adding chunk of size 40 would overflow buffer "
"of size 8 (0 already transferred)"));
}
struct MemsetValue {
explicit MemsetValue(float value) : value(value) {}
float value;
};
static absl::Status MemsetFromValue(
se::Stream* stream, ffi::Result<ffi::BufferR1<PrimitiveType::F32>> result,
MemsetValue* memset_value) {
uint32_t pattern;
std::memcpy(&pattern, &memset_value->value, sizeof(pattern));
se::DeviceMemoryBase base = result->device_memory();
return stream->Memset32(&base, pattern, base.size());
}
XLA_FFI_DEFINE_HANDLER(kMemsetFromValue, MemsetFromValue,
ffi::Ffi::Bind()
.Ctx<ffi::Stream>()
.Ret<ffi::BufferR1<PrimitiveType::F32>>()
.Ctx<ffi::UserData<MemsetValue>>());
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "MemsetFromValue",
PlatformUtil::CanonicalPlatformName("GPU").value(),
kMemsetFromValue);
TEST(StreamExecutorGpuClientTest, ForwardUserDataToFfiHandler) {
static constexpr char const* kProgram = R"(
HloModule ffi_handler
ENTRY main {
ROOT %custom-call = f32[4] custom-call(),
custom_call_target="MemsetFromValue",
api_version=API_VERSION_TYPED_FFI
})";
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto executable,
CompileExecutable(kProgram, *client));
ExecuteContext context;
TF_ASSERT_OK(context.ffi_context().Emplace<MemsetValue>(42.0f));
ExecuteOptions opts;
opts.context = &context;
auto result = executable->Execute({{}}, opts);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::Literal> result_literal,
ExtractSingleResult(result));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR1<float>({42.0f, 42.0f, 42.0f, 42.0f}),
*result_literal));
}
TEST(StreamExecutorGpuClientTest, ToLiteralAsync) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->addressable_devices().size(), 1);
auto src_literal = LiteralUtil::CreateR1<float>({41.0f, 42.0f, 43.0f, 44.0f});
TF_ASSERT_OK_AND_ASSIGN(
auto transfer_manager,
client->CreateBuffersForAsyncHostToDevice(
{src_literal.shape()}, client->addressable_devices()[0]));
auto buffer = transfer_manager->RetrieveBuffer(0);
absl::Mutex mu;
auto literal = std::make_shared<Literal>(
ShapeUtil::DeviceShapeToHostShape(buffer->on_device_shape()));
bool got_literal = false;
TF_ASSERT_OK(
transfer_manager->TransferLiteralToBuffer(0, src_literal, [&]() {}));
buffer->ToLiteral(literal.get()).OnReady([&](absl::Status s) {
absl::MutexLock l(&mu);
TF_ASSERT_OK(s);
got_literal = true;
});
buffer.reset();
{
absl::MutexLock l(&mu);
mu.Await(absl::Condition(&got_literal));
}
ASSERT_TRUE(ShapeUtil::Compatible(src_literal.shape(), literal->shape()));
ASSERT_EQ(src_literal.data<float>(),
literal->Relayout(src_literal.shape().layout()).data<float>());
}
TEST(StreamExecutorGpuClientTest, ToLiteralAsyncWithNonCompactLayout) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->addressable_devices().size(), 1);
xla::Shape transposed_shape = xla::ShapeUtil::MakeShapeWithDenseLayout(
xla::S32, {2, 3}, {0, 1});
xla::Literal src_literal = xla::LiteralUtil::CreateR2WithLayout<int32_t>(
{{3, 14, 25}, {36, 47, 58}}, transposed_shape.layout());
PjRtClient::ShapeSpec spec;
spec.element_type = src_literal.shape().element_type();
spec.dims = DimensionVector(src_literal.shape().dimensions().begin(),
src_literal.shape().dimensions().end());
TF_ASSERT_OK_AND_ASSIGN(
auto transfer_manager,
client->CreateBuffersForAsyncHostToDevice(
{spec},
std::make_optional<absl::Span<const Layout>>(
{transposed_shape.layout()}),
client->addressable_devices()[0]->memory_spaces()[0]));
auto buffer = transfer_manager->RetrieveBuffer(0);
absl::Mutex mu;
auto literal = std::make_shared<Literal>(
ShapeUtil::DeviceShapeToHostShape(buffer->on_device_shape()));
bool got_literal = false;
TF_ASSERT_OK(
transfer_manager->TransferLiteralToBuffer(0, src_literal, [&]() {}));
buffer->ToLiteral(literal.get()).OnReady([&](absl::Status s) {
absl::MutexLock l(&mu);
TF_ASSERT_OK(s);
got_literal = true;
});
buffer.reset();
{
absl::MutexLock l(&mu);
mu.Await(absl::Condition(&got_literal));
}
ASSERT_TRUE(ShapeUtil::Compatible(src_literal.shape(), literal->shape()));
ASSERT_EQ(src_literal.data<int32_t>(),
literal->Relayout(src_literal.shape().layout()).data<int32_t>());
}
TEST(StreamExecutorGpuClientTest, ToLiteralAsyncBeforeBufferReady) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->addressable_devices().size(), 1);
auto src_literal = LiteralUtil::CreateR1<float>({41.0f, 42.0f, 43.0f, 44.0f});
TF_ASSERT_OK_AND_ASSIGN(
auto transfer_manager,
client->CreateBuffersForAsyncHostToDevice(
{src_literal.shape()}, client->addressable_devices()[0]));
auto buffer = transfer_manager->RetrieveBuffer(0);
absl::Mutex mu;
auto literal = std::make_shared<Literal>(
ShapeUtil::DeviceShapeToHostShape(buffer->on_device_shape()));
bool got_literal = false;
buffer->ToLiteral(literal.get()).OnReady([&](absl::Status s) {
absl::MutexLock l(&mu);
TF_ASSERT_OK(s);
got_literal = true;
});
absl::SleepFor(absl::Milliseconds(10));
ASSERT_FALSE(got_literal);
TF_ASSERT_OK(
transfer_manager->TransferLiteralToBuffer(0, src_literal, [&]() {}));
buffer.reset();
{
absl::MutexLock l(&mu);
mu.Await(absl::Condition(&got_literal));
}
ASSERT_TRUE(ShapeUtil::Compatible(src_literal.shape(), literal->shape()));
ASSERT_EQ(src_literal.data<float>(),
literal->Relayout(src_literal.shape().layout()).data<float>());
}
TEST(StreamExecutorGpuClientTest, FromHostAsync) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->addressable_devices().size(), 1);
std::vector<Literal> src_literals;
std::vector<Shape> src_shapes;
for (int i = 0; i < 4; ++i) {
std::vector<float> data(i + 1);
std::iota(data.begin(), data.end(), static_cast<float>(i + 10));
src_literals.emplace_back(LiteralUtil::CreateR1<float>(data));
src_shapes.push_back(src_literals.back().shape());
}
TF_ASSERT_OK_AND_ASSIGN(auto transfer_manager,
client->CreateBuffersForAsyncHostToDevice(
src_shapes, client->addressable_devices()[0]));
std::vector<std::unique_ptr<PjRtBuffer>> buffers;
for (int i = 0; i < src_shapes.size(); ++i) {
buffers.emplace_back(transfer_manager->RetrieveBuffer(i));
}
for (int i = 0; i < src_shapes.size(); ++i) {
TF_ASSERT_OK(transfer_manager->TransferRawDataToBuffer(
i,
absl::string_view(static_cast<char*>(src_literals[i].untyped_data()),
src_literals[i].size_bytes()),
[&]() {}));
}
absl::Mutex mu;
std::vector<std::shared_ptr<Literal>> literals;
int got_literal_count = 0;
int got_callback_count = 0;
for (auto& buffer : buffers) {
literals.push_back(std::make_shared<Literal>(
ShapeUtil::DeviceShapeToHostShape(buffer->on_device_shape())));
buffer->ToLiteral(literals.back().get()).OnReady([&](absl::Status s) {
absl::MutexLock l(&mu);
TF_ASSERT_OK(s);
++got_literal_count;
});
buffer->GetReadyFuture().OnReady([&](absl::Status s) {
absl::MutexLock l(&mu);
TF_ASSERT_OK(s);
++got_callback_count;
});
buffer.reset();
}
{
auto done = [&]() {
return got_literal_count == src_literals.size() &&
got_callback_count == src_literals.size();
};
absl::MutexLock l(&mu);
mu.Await(absl::Condition(&done));
}
for (int i = 0; i < src_literals.size(); ++i) {
ASSERT_TRUE(
ShapeUtil::Compatible(src_literals[i].shape(), literals[i]->shape()));
ASSERT_EQ(
src_literals[i].data<float>(),
literals[i]->Relayout(src_literals[i].shape().layout()).data<float>());
}
}
TEST(StreamExecutorGpuClientTest, FromHostAsyncPinnedHost) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->addressable_devices().size(), 1);
TF_ASSERT_OK_AND_ASSIGN(
auto* pinned_memory_space,
client->addressable_devices()[0]->memory_space_by_kind(
PinnedHostMemorySpace::kKind));
std::vector<Literal> src_literals;
std::vector<Shape> src_shapes;
for (int i = 0; i < 4; ++i) {
std::vector<float> data(i + 1);
std::iota(data.begin(), data.end(), static_cast<float>(i + 10));
src_literals.emplace_back(LiteralUtil::CreateR1<float>(data));
src_shapes.push_back(src_literals.back().shape());
}
TF_ASSERT_OK_AND_ASSIGN(auto transfer_manager,
client->CreateBuffersForAsyncHostToDevice(
src_shapes, pinned_memory_space));
std::vector<std::unique_ptr<PjRtBuffer>> buffers;
for (int i = 0; i < src_shapes.size(); ++i) {
buffers.emplace_back(transfer_manager->RetrieveBuffer(i));
}
for (int i = 0; i < src_shapes.size(); ++i) {
TF_ASSERT_OK(transfer_manager->TransferRawDataToBuffer(
i,
absl::string_view(static_cast<char*>(src_literals[i].untyped_data()),
src_literals[i].size_bytes()),
[&]() {}));
}
absl::Mutex mu;
std::vector<std::shared_ptr<Literal>> literals;
int got_literal_count = 0;
int got_callback_count = 0;
for (auto& buffer : buffers) {
literals.push_back(std::make_shared<Literal>(
ShapeUtil::DeviceShapeToHostShape(buffer->on_device_shape())));
buffer->ToLiteral(literals.back().get()).OnReady([&](absl::Status s) {
absl::MutexLock l(&mu);
TF_ASSERT_OK(s);
++got_literal_count;
});
buffer->GetReadyFuture().OnReady([&](absl::Status s) {
absl::MutexLock l(&mu);
TF_ASSERT_OK(s);
++got_callback_count;
});
buffer.reset();
}
{
auto done = [&]() {
return got_literal_count == src_literals.size() &&
got_callback_count == src_literals.size();
};
absl::MutexLock l(&mu);
mu.Await(absl::Condition(&done));
}
for (int i = 0; i < src_literals.size(); ++i) {
ASSERT_TRUE(
ShapeUtil::Compatible(src_literals[i].shape(), literals[i]->shape()));
ASSERT_EQ(
src_literals[i].data<float>(),
literals[i]->Relayout(src_literals[i].shape().layout()).data<float>());
}
}
TEST(StreamExecutorGpuClientTest, FromHostAsyncPinnedHostChunked) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtClient> client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_THAT(client->addressable_devices(), SizeIs(Gt(0)));
TF_ASSERT_OK_AND_ASSIGN(
PjRtMemorySpace * memspace,
client->addressable_devices()[0]->memory_space_by_kind(
PinnedHostMemorySpace::kKind));
std::vector<float> data{1, 3, 5, 7, 11, 13, 17, 19};
Shape shape = ShapeUtil::MakeShape(F32, {static_cast<int64_t>(data.size())});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager> txm,
client->CreateBuffersForAsyncHostToDevice({shape}, memspace));
std::unique_ptr<PjRtBuffer> buf = txm->RetrieveBuffer(0);
ASSERT_THAT(buf->GetReadyFuture().IsReady(), Eq(false));
absl::string_view raw_view(reinterpret_cast<char*>(data.data()),
data.size() * sizeof(data[0]));
int offset = 0;
while (true) {
int end = offset + 3;
if (end > raw_view.size()) {
end = raw_view.size();
}
int sz = end - offset;
bool reaches_end = end == raw_view.size();
TF_ASSERT_OK(txm->TransferRawDataToSubBuffer(
0, raw_view.data() + offset, offset, sz, reaches_end,
[]() {}));
if (reaches_end) {
break;
}
offset = end;
}
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<Literal> lit, buf->ToLiteralSync());
EXPECT_THAT(lit->data<float>(), ElementsAreArray(data));
}
TEST(StreamExecutorGpuClientTest, DeleteBufferThenFulfillBufferNoDeadLock) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtClient> client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_THAT(client->addressable_devices(), SizeIs(Gt(0)));
TF_ASSERT_OK_AND_ASSIGN(
PjRtMemorySpace * memspace,
client->addressable_devices()[0]->memory_space_by_kind(
PinnedHostMemorySpace::kKind));
std::vector<float> data{1, 3, 5, 7, 11, 13, 17, 19};
Shape shape = ShapeUtil::MakeShape(F32, {static_cast<int64_t>(data.size())});
std::vector<std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager>>
txms;
for (int i = 0; i < 10000; ++i) {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager> txm,
client->CreateBuffersForAsyncHostToDevice({shape}, memspace));
std::unique_ptr<PjRtBuffer> buf = txm->RetrieveBuffer(0);
ASSERT_THAT(buf->GetReadyFuture().IsReady(), Eq(false));
txms.push_back(std::move(txm));
}
absl::string_view raw_view(reinterpret_cast<char*>(data.data()),
data.size() * sizeof(data[0]));
for (auto& txm : txms) {
int offset = 0;
while (true) {
int end = offset + 3;
if (end > raw_view.size()) {
end = raw_view.size();
}
int sz = end - offset;
bool reaches_end = end == raw_view.size();
TF_ASSERT_OK(txm->TransferRawDataToSubBuffer(
0, raw_view.data() + offset, offset, sz, reaches_end,
[]() {}));
if (reaches_end) {
break;
}
offset = end;
}
}
}
TEST(StreamExecutorGpuClientTest, CopyRawToHostFullBuffer) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
auto literal = xla::LiteralUtil::CreateR1<float>({41.0f, 42.0f});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtBuffer> buffer,
client->BufferFromHostLiteral(literal, client->addressable_devices()[0]));
void* dst = aligned_alloc(buffer->GetOnDeviceSizeInBytes().value(), 0);
auto result =
buffer->CopyRawToHost(dst, 0, buffer->GetOnDeviceSizeInBytes().value());
TF_EXPECT_OK(result.Await());
EXPECT_EQ(*(static_cast<float*>(dst)), 41.0f);
EXPECT_EQ(*(static_cast<float*>(dst) + 1), 42.0f);
free(dst);
}
TEST(StreamExecutorGpuClientTest, CopyRawToHostSubBuffer) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
auto literal = xla::LiteralUtil::CreateR1<float>({41.0f, 42.0f});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtBuffer> buffer,
client->BufferFromHostLiteral(literal, client->addressable_devices()[0]));
void* dst = aligned_alloc(buffer->GetOnDeviceSizeInBytes().value(), 0);
auto result = buffer->CopyRawToHost(dst, 0, sizeof(float));
TF_EXPECT_OK(result.Await());
EXPECT_EQ(*(static_cast<float*>(dst)), 41.0f);
free(dst);
}
TEST(StreamExecutorGpuClientTest, CopyRawToHostOutOfRange) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
auto literal = xla::LiteralUtil::CreateR1<float>({41.0f, 42.0f});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtBuffer> buffer,
client->BufferFromHostLiteral(literal, client->addressable_devices()[0]));
void* dst = aligned_alloc(buffer->GetOnDeviceSizeInBytes().value(), 0);
auto result =
buffer->CopyRawToHost(dst, 1, buffer->GetOnDeviceSizeInBytes().value());
EXPECT_THAT(result.Await(), StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("invalid offset 1")));
free(dst);
}
TEST(StreamExecutorGpuClientTest, CopyRawToHostFuture) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
auto literal = xla::LiteralUtil::CreateR1<float>({41.0f, 42.0f});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtBuffer> buffer,
client->BufferFromHostLiteral(literal, client->addressable_devices()[0]));
auto dst_promise = xla::PjRtFuture<void*>::CreatePromise();
xla::PjRtFuture<void*> dst_future(dst_promise);
TF_ASSERT_OK_AND_ASSIGN(int64_t size, buffer->GetOnDeviceSizeInBytes());
auto ready = buffer->GetReadyFuture();
auto result = buffer->CopyRawToHostFuture(dst_future, 0, size);
buffer.reset();
ready.OnReady([dst_promise = std::move(dst_promise),
size](absl::Status status) mutable {
dst_promise.Set(aligned_alloc(size, 0));
});
TF_EXPECT_OK(result.Await());
TF_ASSERT_OK_AND_ASSIGN(auto* dst, dst_future.Await());
EXPECT_EQ(*(static_cast<float*>(dst)), 41.0f);
EXPECT_EQ(*(static_cast<float*>(dst) + 1), 42.0f);
free(dst);
}
TEST(StreamExecutorGpuClientTest, AsyncCopyToDevice) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->addressable_devices().size(), 2);
auto* d0 = client->addressable_devices()[0];
auto* d1 = client->addressable_devices()[1];
auto src_literal = LiteralUtil::CreateR1<float>({41.0f, 42.0f, 43.0f, 44.0f});
TF_ASSERT_OK_AND_ASSIGN(
auto transfer_manager,
client->CreateBuffersForAsyncHostToDevice({src_literal.shape()}, d0));
auto src_buffer = transfer_manager->RetrieveBuffer(0);
auto local_recv_buffer = *src_buffer->CopyToDevice(d1);
TF_ASSERT_OK(
transfer_manager->TransferLiteralToBuffer(0, src_literal, []() {}));
auto literal = std::make_shared<Literal>(src_literal.shape());
auto local_recv_literal = local_recv_buffer->ToLiteral(literal.get());
TF_EXPECT_OK(local_recv_literal.Await());
ASSERT_TRUE(ShapeUtil::Compatible(src_literal.shape(), literal->shape()));
ASSERT_EQ(src_literal.data<float>(),
literal->Relayout(src_literal.shape().layout()).data<float>());
}
TEST(StreamExecutorGpuClientTest, CreateMixOfErrorBuffers) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->addressable_devices().size(), 1);
std::vector<Literal> src_literals;
std::vector<Shape> src_shapes;
for (int i = 0; i < 4; ++i) {
std::vector<float> data(i + 1);
std::iota(data.begin(), data.end(), static_cast<float>(i + 10));
src_literals.emplace_back(LiteralUtil::CreateR1<float>(data));
src_shapes.push_back(src_literals.back().shape());
}
TF_ASSERT_OK_AND_ASSIGN(
auto transfer_manager,
client->CreateBuffersForAsyncHostToDevice(
src_shapes, client->addressable_devices()[0]->memory_spaces()[0]));
std::vector<std::unique_ptr<PjRtBuffer>> buffers;
for (int i = 0; i < src_shapes.size(); ++i) {
buffers.emplace_back(transfer_manager->RetrieveBuffer(i));
}
absl::Mutex mu;
int got_callback_count = 0;
for (int i = 0; i < 4; ++i) {
auto& buffer = buffers[i];
if (i == 0 || i == 3) {
TF_ASSERT_OK(transfer_manager->TransferLiteralToBuffer(i, src_literals[i],
[&]() {}));
buffer->GetReadyFuture().OnReady([&](absl::Status s) {
absl::MutexLock l(&mu);
TF_ASSERT_OK(s);
++got_callback_count;
});
} else {
absl::Status error = Internal("error %d", i);
transfer_manager->SetBufferError(i, error);
buffer->GetReadyFuture().OnReady(
[error, &mu, &got_callback_count](absl::Status s) {
absl::MutexLock l(&mu);
ASSERT_EQ(s, error);
++got_callback_count;
});
}
buffer.reset();
}
{
auto done = [&]() { return got_callback_count == src_literals.size(); };
absl::MutexLock l(&mu);
QCHECK(mu.AwaitWithTimeout(absl::Condition(&done), absl::Seconds(60)));
}
}
TEST(GpuTopology, FromProto) {
GpuTopologyProto msg;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
device_ids: [ 3, 2, 1 ]
platform_version: "platform_version"
num_slices: 2
num_hosts_per_slice: 1
num_devices_per_host: 3
)pb",
&msg));
std::unique_ptr<const GpuTopology> gpu_topology = GpuTopology::FromProto(msg);
EXPECT_THAT(gpu_topology->device_ids(), ElementsAre(3, 2, 1));
EXPECT_THAT(gpu_topology->platform_version(), "platform_version");
EXPECT_THAT(gpu_topology->num_slices(), 2);
EXPECT_THAT(gpu_topology->num_hosts_per_slice(), 1);
EXPECT_THAT(gpu_topology->num_devices_per_host(), 3);
}
TEST(GpuTopology, ToProto) {
GpuTopology gpu_topology({3, 2, 1},
"platform_version",
2,
1,
3);
GpuTopologyProto msg = gpu_topology.ToProto();
EXPECT_THAT(msg.device_ids(), ElementsAre(3, 2, 1));
EXPECT_THAT(msg.platform_version(), "platform_version");
EXPECT_THAT(msg.num_slices(), 2);
EXPECT_THAT(msg.num_hosts_per_slice(), 1);
EXPECT_THAT(msg.num_devices_per_host(), 3);
}
TEST(StreamExecutorGpuClientTest, DistributedInit) {
auto kv_store = std::make_shared<InMemoryKeyValueStore>();
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "DistributeInit", 4);
int num_nodes = 2;
for (int i = 0; i < num_nodes; i++) {
thread_pool.Schedule([kv_store, i, num_nodes] {
GpuClientOptions options;
options.node_id = i;
options.num_nodes = num_nodes;
options.kv_store = kv_store;
TF_ASSERT_OK_AND_ASSIGN(auto client, GetStreamExecutorGpuClient(options));
EXPECT_TRUE(client->platform_name() == "cuda" ||
client->platform_name() == "rocm");
EXPECT_EQ(client->addressable_device_count(), 2);
EXPECT_EQ(client->device_count(), 4);
});
}
}
TEST(StreamExecutorGpuClientTest, GetAllocatorStatsTest) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->addressable_devices().size(), 2);
for (auto device : client->addressable_devices()) {
const xla::Literal literal = xla::LiteralUtil::CreateR0<int32_t>(0);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtBuffer> buffer,
client->BufferFromHostLiteral(literal, device));
auto stats = device->GetAllocatorStats();
TF_ASSERT_OK(stats.status());
ASSERT_GT(stats.value().peak_bytes_in_use, 0);
}
}
TEST(StreamExecutorGpuClientTest, GpuDeviceDescriptionTest) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
for (int device_index = 0; device_index < client->device_count();
device_index++) {
auto coords =
static_cast<PjRtStreamExecutorDevice*>(client->devices()[device_index])
->description()
.coords();
EXPECT_EQ(coords[0], device_index);
}
}
TEST(StreamExecutorGpuClientTest, MockNcclClientTest) {
const int num_nodes = 4;
GpuClientOptions options;
options.num_nodes = num_nodes;
options.enable_mock_nccl = true;
TF_ASSERT_OK_AND_ASSIGN(auto client, GetStreamExecutorGpuClient(options));
auto devices_per_host = client->addressable_device_count();
EXPECT_EQ(devices_per_host, 2);
EXPECT_EQ(client->device_count(), devices_per_host * num_nodes);
for (int i = 0; i < client->device_count(); i++) {
auto device = client->devices()[i];
auto slice_index =
std::get<int64_t>(device->Attributes().at("slice_index"));
auto host_index = device->process_index();
EXPECT_EQ(slice_index, host_index);
}
}
TEST(StreamExecutorGpuClientTest, BufferFromHostBufferPinnedMemory) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
std::vector<int32_t> data{1, 2, 3, 4};
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto* pinned_memory_space,
client->addressable_devices()[0]->memory_space_by_kind(
PinnedHostMemorySpace::kKind));
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
pinned_memory_space, nullptr));
EXPECT_EQ(buffer->memory_space()->kind(), "pinned_host");
EXPECT_TRUE(buffer->IsOnCpu());
TF_ASSERT_OK_AND_ASSIGN(auto literal, buffer->ToLiteralSync());
std::vector<int32_t> expected{1, 2, 3, 4};
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST(StreamExecutorGpuClientTest, CopyToPinnedHostMemorySpace) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
std::vector<int32_t> data{1, 2, 3, 4};
Shape shape = ShapeUtil::MakeShape(S32, {4});
auto device = client->addressable_devices()[0];
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
device));
EXPECT_EQ(buffer->memory_space()->kind(), "device");
auto* pinned_memory_space = device->memory_spaces()[1];
EXPECT_EQ(pinned_memory_space->kind_id(), PinnedHostMemorySpace::kKindId);
TF_ASSERT_OK_AND_ASSIGN(auto result,
buffer->CopyToMemorySpace(pinned_memory_space));
EXPECT_EQ(result->memory_space()->kind(), "pinned_host");
EXPECT_TRUE(result->IsOnCpu());
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
std::vector<int32_t> expected{1, 2, 3, 4};
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST(StreamExecutorGpuClientTest, CopyToPinnedHostMemorySpaceInt4) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
std::vector<int8_t> data{1, 2, 3, 4};
Shape shape = ShapeUtil::MakeShape(S4, {4});
auto device = client->addressable_devices()[0];
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
device));
EXPECT_EQ(buffer->memory_space()->kind(), "device");
auto* pinned_memory_space = device->memory_spaces()[1];
EXPECT_EQ(pinned_memory_space->kind_id(), PinnedHostMemorySpace::kKindId);
TF_ASSERT_OK_AND_ASSIGN(auto result,
buffer->CopyToMemorySpace(pinned_memory_space));
EXPECT_EQ(result->memory_space()->kind(), "pinned_host");
EXPECT_TRUE(result->IsOnCpu());
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
std::vector<xla::s4> expected{xla::s4(1), xla::s4(2), xla::s4(3), xla::s4(4)};
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<xla::s4>(expected),
*literal));
}
TEST(StreamExecutorGpuClientTest, OpaqueDeviceMemoryDataPointer) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtClient> client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_THAT(client->addressable_devices(), SizeIs(Gt(0)));
PjRtDevice* device = client->addressable_devices()[0];
TF_ASSERT_OK_AND_ASSIGN(
PjRtMemorySpace * memspace,
device->memory_space_by_kind(PinnedHostMemorySpace::kKind));
std::vector<float> float_data{12.0, 34.0, 56.0, 78.0};
Shape shape = ShapeUtil::MakeShapeWithType<float>({4});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtBuffer> buf,
client->BufferFromHostBuffer(
static_cast<const void*>(float_data.data()), shape.element_type(),
shape.dimensions(), std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall,
nullptr, memspace,
nullptr));
ASSERT_THAT(buf->IsOnCpu(), true);
TF_ASSERT_OK_AND_ASSIGN(size_t buf_sz, buf->GetOnDeviceSizeInBytes());
ASSERT_THAT(buf_sz, Ge(sizeof(float) * 4));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtBuffer::ExternalReference> ref,
buf->AcquireExternalReference());
TF_ASSERT_OK(buf->GetReadyFuture().Await());
const float* float_ptr =
reinterpret_cast<const float*>(ref->OpaqueDeviceMemoryDataPointer());
EXPECT_THAT(*float_ptr, FloatEq(12.0));
EXPECT_THAT(*(float_ptr + 1), FloatEq(34.0));
EXPECT_THAT(*(float_ptr + 2), FloatEq(56.0));
EXPECT_THAT(*(float_ptr + 3), FloatEq(78.0));
TF_ASSERT_OK_AND_ASSIGN(PjRtMemorySpace * default_ms,
device->default_memory_space());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager> txm,
client->CreateBuffersForAsyncHostToDevice({shape}, default_ms));
TF_ASSERT_OK(txm->TransferRawDataToBuffer(
0,
absl::string_view(
static_cast<const char*>(ref->OpaqueDeviceMemoryDataPointer()),
buf_sz),
[]() {}));
std::unique_ptr<PjRtBuffer> hbm_buf = txm->RetrieveBuffer(0);
EXPECT_THAT(hbm_buf->GetOnDeviceSizeInBytes(), IsOkAndHolds(buf_sz));
EXPECT_THAT(hbm_buf->HostShape(), IsOkAndHolds(shape));
TF_ASSERT_OK(hbm_buf->GetReadyFuture().Await());
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::Literal> literal,
hbm_buf->ToLiteralSync());
EXPECT_THAT(literal->data<float>(), ElementsAreArray(float_data));
}
namespace {
absl::StatusOr<std::unique_ptr<PjRtBuffer>> CreateDeviceBufferForTest(
xla::PjRtClient* client) {
auto device = client->addressable_devices()[0];
TF_EXPECT_OK(device->default_memory_space());
std::vector<int32_t> data{1, 2, 3, 4};
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(S32, {4}, {0});
TF_ASSIGN_OR_RETURN(
auto input, client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall,
nullptr, device));
EXPECT_EQ(input->memory_space()->kind(), "device");
return input;
}
constexpr char const* kD2HProgram = R"(
HloModule f
ENTRY main.5 {
p = s32[4]{0} parameter(0)
ROOT cc = s32[4] custom-call(p),
custom_call_target="annotate_device_placement",
frontend_attributes={_xla_buffer_placement="pinned_host"}
}
)";
constexpr char const* kD2HProgramTupleOutput = R"(
HloModule f
ENTRY main.5 {
p = s32[4]{0} parameter(0)
cc = s32[4] custom-call(p),
custom_call_target="annotate_device_placement",
frontend_attributes={_xla_buffer_placement="pinned_host"}
ROOT tuple = (s32[4]{0}, s32[4]{0}) tuple(s32[4]{0} p, s32[4]{0} cc)
}
)";
constexpr char const* kCollectiveMemorySpaceOutput = R"(
HloModule jit__psum, entry_computation_layout={(s32[1,4]{1,0})->s32[4]{0}}
region_0.3 {
Arg_0.0 = s32[] parameter(0)
Arg_1.0 = s32[] parameter(1)
ROOT add.0 = s32[] add(Arg_0.0, Arg_1.0)
}
ENTRY main.10_spmd {
param = s32[1,4]{1,0} parameter(0)
reshape = s32[4]{0} reshape(param)
ROOT all-reduce = s32[4]{0} all-reduce(reshape), channel_id=1, to_apply=region_0.3
}
)";
}
TEST(StreamExecutorGpuClientTest, ExecutePinnedHostOutputTest) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto input, CreateDeviceBufferForTest(client.get()));
TF_ASSERT_OK_AND_ASSIGN(auto executable,
CompileExecutable(kD2HProgram, *client));
TF_ASSERT_OK_AND_ASSIGN(
auto result, executable->Execute({{input.get()}}, ExecuteOptions()));
std::vector<std::unique_ptr<xla::PjRtBuffer>>& result_buffers = result[0];
EXPECT_EQ(result_buffers[0]->memory_space()->kind(), "pinned_host");
TF_ASSERT_OK_AND_ASSIGN(auto memory_stats,
executable->GetCompiledMemoryStats());
EXPECT_EQ(memory_stats.output_size_in_bytes, 0);
EXPECT_EQ(memory_stats.host_output_size_in_bytes, 16);
}
TEST(StreamExecutorGpuClientTest, ExecutePinnedHostOutputTupleTest) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto input, CreateDeviceBufferForTest(client.get()));
Shape host_shape = input->on_device_shape();
host_shape.mutable_layout()->set_memory_space(Layout::kHostMemorySpace);
Shape out_shape =
ShapeUtil::MakeTupleShape({input->on_device_shape(), host_shape});
xla::CompileOptions options;
options.executable_build_options.set_result_layout(out_shape);
TF_ASSERT_OK_AND_ASSIGN(
auto executable,
CompileExecutable(kD2HProgramTupleOutput, *client, options));
ExecuteOptions execute_options;
execute_options.untuple_result = true;
TF_ASSERT_OK_AND_ASSIGN(
auto result, executable->Execute({{input.get()}}, execute_options));
std::vector<std::unique_ptr<xla::PjRtBuffer>>& result_buffers = result[0];
EXPECT_EQ(result_buffers.size(), 2);
EXPECT_EQ(result_buffers[0]->memory_space()->kind(), "device");
EXPECT_EQ(result_buffers[1]->memory_space()->kind(), "pinned_host");
}
TEST(StreamExecutorGpuClientTest, ExecutablePinnedHostOutputMemoryKindTest) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto executable,
CompileExecutable(kD2HProgram, *client));
TF_ASSERT_OK_AND_ASSIGN(auto memory_kinds,
executable->GetOutputMemoryKinds());
EXPECT_EQ(memory_kinds.size(), 1);
EXPECT_EQ(memory_kinds[0].size(), 1);
EXPECT_EQ(memory_kinds[0][0], "pinned_host");
}
TEST(StreamExecutorGpuClientTest,
ExecutableCollectiveMemoryOutputMemoryKindTest) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
xla::CompileOptions options;
options.executable_build_options.mutable_debug_options()
->set_xla_gpu_enable_nccl_user_buffers(true);
TF_ASSERT_OK_AND_ASSIGN(
auto executable,
CompileExecutable(kCollectiveMemorySpaceOutput, *client, options));
std::vector<int32_t> data{1, 2, 3, 4};
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(S32, {1, 4},
{1, 0});
shape.mutable_layout()->set_memory_space(Layout::kDefaultMemorySpace);
auto device = client->addressable_devices()[0];
TF_EXPECT_OK(device->default_memory_space());
TF_ASSERT_OK_AND_ASSIGN(
auto input, client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall,
nullptr, device));
EXPECT_EQ(input->memory_space()->kind(), "device");
TF_ASSERT_OK_AND_ASSIGN(auto memory_kinds,
executable->GetOutputMemoryKinds());
EXPECT_EQ(memory_kinds.size(), 1);
EXPECT_EQ(memory_kinds[0].size(), 1);
EXPECT_EQ(memory_kinds[0][0], "device");
TF_ASSERT_OK_AND_ASSIGN(
auto result, executable->Execute({{input.get()}}, ExecuteOptions()));
std::vector<std::unique_ptr<xla::PjRtBuffer>>& result_buffers = result[0];
EXPECT_EQ(result_buffers[0]->memory_space()->kind(), "device");
Shape result_shape = result_buffers[0]->on_device_shape();
auto memory_space = result_shape.layout().memory_space();
EXPECT_EQ(memory_space, 1);
}
TEST(StreamExecutorGpuClientTest,
ExecutablePinnedHostTupleOutputMemoryKindTest) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(S32, {4}, {0});
Shape host_shape = shape;
host_shape.mutable_layout()->set_memory_space(Layout::kHostMemorySpace);
Shape out_shape = ShapeUtil::MakeTupleShape({shape, host_shape});
xla::CompileOptions options;
options.executable_build_options.set_result_layout(out_shape);
TF_ASSERT_OK_AND_ASSIGN(
auto executable,
CompileExecutable(kD2HProgramTupleOutput, *client, options));
TF_ASSERT_OK_AND_ASSIGN(auto memory_kinds,
executable->GetOutputMemoryKinds());
EXPECT_EQ(memory_kinds.size(), 1);
EXPECT_EQ(memory_kinds[0].size(), 2);
EXPECT_EQ(memory_kinds[0][0], "device");
EXPECT_EQ(memory_kinds[0][1], "pinned_host");
}
TEST(StreamExecutorGpuClientTest, MlirParameterHostMemorySpaceIsSetInHlo) {
constexpr char kMlirH2D[] =
R"(
func.func public @main(%arg0: tensor<8x2xi32> {
mhlo.layout_mode = "{1,0}",
mhlo.memory_kind = "pinned_host",
mhlo.sharding = "{devices=[2,2]<=[4]}"
}) -> (tensor<8x2xi32> {
jax.result_info = "",
mhlo.layout_mode = "default",
mhlo.memory_kind = "device",
mhlo.sharding = "{devices=[2,2]<=[4]}"}) {
%0 = stablehlo.custom_call @annotate_device_placement(%arg0) {
has_side_effect = true,
mhlo.frontend_attributes = {_xla_buffer_placement = "device"}
} : (tensor<8x2xi32>) -> tensor<8x2xi32>
return %0 : tensor<8x2xi32>
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(auto module,
xla::ParseMlirModuleString(kMlirH2D, context));
TF_ASSERT_OK_AND_ASSIGN(auto executable, client->Compile(*module, {}));
TF_ASSERT_OK_AND_ASSIGN(auto modules, executable->GetHloModules());
auto first_param_layout =
modules[0]->entry_computation_layout().parameter_layout(0).layout();
EXPECT_EQ(first_param_layout.memory_space(), Layout::kHostMemorySpace);
auto result_layout =
modules[0]->entry_computation_layout().result_layout().layout();
EXPECT_EQ(result_layout.memory_space(), Layout::kDefaultMemorySpace);
}
TEST(StreamExecutorGpuClientTest, MlirResultHostMemorySpaceIsSetInHlo) {
constexpr char kMlirD2H[] =
R"(
func.func public @main(%arg0: tensor<8x2xi32> {
mhlo.layout_mode = "{1,0}",
mhlo.memory_kind = "device",
mhlo.sharding = "{devices=[2,2]<=[4]}"
}) -> (tensor<8x2xi32> {
jax.result_info = "",
mhlo.layout_mode = "default",
mhlo.memory_kind = "pinned_host",
mhlo.sharding = "{devices=[2,2]<=[4]}"}) {
%0 = stablehlo.custom_call @annotate_device_placement(%arg0) {
has_side_effect = true,
mhlo.frontend_attributes = {_xla_buffer_placement = "pinned_host"}
} : (tensor<8x2xi32>) -> tensor<8x2xi32>
return %0 : tensor<8x2xi32>
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(auto module,
xla::ParseMlirModuleString(kMlirD2H, context));
TF_ASSERT_OK_AND_ASSIGN(auto executable, client->Compile(*module, {}));
TF_ASSERT_OK_AND_ASSIGN(auto modules, executable->GetHloModules());
auto first_param_layout =
modules[0]->entry_computation_layout().parameter_layout(0).layout();
EXPECT_EQ(first_param_layout.memory_space(), Layout::kDefaultMemorySpace);
auto result_layout =
modules[0]->entry_computation_layout().result_layout().layout();
EXPECT_EQ(result_layout.memory_space(), Layout::kHostMemorySpace);
}
TEST(StreamExecutorGpuClientTest, MlirAutoResultLayoutIsSet) {
constexpr char kMlirWithParameterLayout[] =
R"(
func.func public @main(%arg0: tensor<2x4x2xi32> {
mhlo.layout_mode = "{2, 1, 0}"
}) -> (tensor<2x2x4xi32> {
jax.result_info = "",
mhlo.layout_mode = "auto"}) {
%0 = stablehlo.transpose %arg0, dims = [0, 2, 1]
: (tensor<2x4x2xi32>) -> tensor<2x2x4xi32>
return %0 : tensor<2x2x4xi32>
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(auto module, xla::ParseMlirModuleString(
kMlirWithParameterLayout, context));
TF_ASSERT_OK_AND_ASSIGN(auto executable, client->Compile(*module, {}));
TF_ASSERT_OK_AND_ASSIGN(auto modules, executable->GetHloModules());
auto result_layout =
modules[0]->entry_computation_layout().result_layout().layout();
EXPECT_EQ(result_layout, Layout({1, 2, 0}));
}
TEST(StreamExecutorGpuClientTest, MlirAutoParameterLayoutIsSet) {
constexpr char kMlirWithParameterLayout[] =
R"(
func.func public @main(%arg0: tensor<2x4x2xi32> {
mhlo.layout_mode = "auto"
}) -> (tensor<2x2x4xi32> {
jax.result_info = "",
mhlo.layout_mode = "{2, 1, 0}"}) {
%0 = stablehlo.transpose %arg0, dims = [0, 2, 1]
: (tensor<2x4x2xi32>) -> tensor<2x2x4xi32>
return %0 : tensor<2x2x4xi32>
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(auto module, xla::ParseMlirModuleString(
kMlirWithParameterLayout, context));
TF_ASSERT_OK_AND_ASSIGN(auto executable, client->Compile(*module, {}));
TF_ASSERT_OK_AND_ASSIGN(auto modules, executable->GetHloModules());
auto first_param_layout =
modules[0]->entry_computation_layout().parameter_layout(0).layout();
EXPECT_EQ(first_param_layout, Layout({1, 2, 0}));
}
TEST(StreamExecutorGpuClientTest, MlirParameterLayoutIsSetInHlo) {
constexpr char kMlirWithParameterLayout[] =
R"(
func.func public @main(%arg0: tensor<2x2x2xi32> {
mhlo.layout_mode = "{0, 2, 1}"
}) -> (tensor<2x2x2xi32> {
jax.result_info = "",
mhlo.layout_mode = "default"}) {
return %arg0 : tensor<2x2x2xi32>
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(auto module, xla::ParseMlirModuleString(
kMlirWithParameterLayout, context));
TF_ASSERT_OK_AND_ASSIGN(auto executable, client->Compile(*module, {}));
TF_ASSERT_OK_AND_ASSIGN(auto modules, executable->GetHloModules());
auto first_param_layout =
modules[0]->entry_computation_layout().parameter_layout(0).layout();
EXPECT_EQ(first_param_layout, Layout({0, 2, 1}));
}
TEST(StreamExecutorGpuClientTest, MlirParameterLayoutFromOptionsIsSetInHlo) {
constexpr char kMlirCopy[] =
R"(
func.func public @main(%arg0: tensor<2x2x2xi32> {
mhlo.layout_mode = "default"
}) -> (tensor<2x2x2xi32> {
jax.result_info = "",
mhlo.layout_mode = "default"}) {
return %arg0 : tensor<2x2x2xi32>
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(auto module,
xla::ParseMlirModuleString(kMlirCopy, context));
xla::CompileOptions options;
options.argument_layouts = {
{ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 2, 2}, {0, 2, 1})}};
TF_ASSERT_OK_AND_ASSIGN(auto executable, client->Compile(*module, options));
TF_ASSERT_OK_AND_ASSIGN(auto modules, executable->GetHloModules());
auto first_param_layout =
modules[0]->entry_computation_layout().parameter_layout(0).layout();
EXPECT_EQ(first_param_layout, Layout({0, 2, 1}));
}
TEST(StreamExecutorGpuClientTest,
MlirResultHostMemorySpaceIsSetInHloWithShardingPropagation) {
constexpr absl::string_view mlir_mul_explicit_sharding_layout_and_memory =
R"mlir(
module @jit_f attributes {
mhlo.num_partitions = 2 : i32,
mhlo.num_replicas = 1 : i32
} {
func.func public @main(%arg0: tensor<8x2xi32> {
mhlo.layout_mode = "{1,0}",
mhlo.memory_kind = "device",
mhlo.sharding = "{devices=[1,2]<=[2]}"
}) -> (tensor<8x2xi32> {
jax.result_info = "",
mhlo.layout_mode = "{0,1}",
mhlo.memory_kind = "pinned_host"
}) {
%c = stablehlo.constant dense<2> : tensor<i32>
%0 = stablehlo.broadcast_in_dim %c, dims = []
: (tensor<i32>) -> tensor<8x2xi32>
%1 = stablehlo.multiply %arg0, %0 : tensor<8x2xi32>
%2 = stablehlo.custom_call @Sharding(%1) {
mhlo.sharding = "{devices=[1,2]<=[2]}"
} : (tensor<8x2xi32>) -> tensor<8x2xi32>
%3 = stablehlo.custom_call @annotate_device_placement(%2) {
has_side_effect = true,
mhlo.frontend_attributes = {
_xla_buffer_placement = "pinned_host"
}
} : (tensor<8x2xi32>) -> tensor<8x2xi32>
return %3 : tensor<8x2xi32>
}
})mlir";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(
auto module, xla::ParseMlirModuleString(
mlir_mul_explicit_sharding_layout_and_memory, context));
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
xla::CompileOptions options;
options.executable_build_options.set_num_partitions(2)
.set_use_spmd_partitioning(true)
.set_allow_spmd_sharding_propagation_to_output({true});
TF_ASSERT_OK_AND_ASSIGN(auto executable, client->Compile(*module, options));
TF_ASSERT_OK_AND_ASSIGN(auto modules, executable->GetHloModules());
auto first_param_layout =
modules[0]->entry_computation_layout().parameter_layout(0).layout();
EXPECT_EQ(first_param_layout.memory_space(), Layout::kDefaultMemorySpace);
auto result_layout =
modules[0]->entry_computation_layout().result_layout().layout();
EXPECT_EQ(result_layout,
Layout({0, 1}).set_memory_space(Layout::kHostMemorySpace));
EXPECT_EQ(executable->GetCompileOptions()
.value()
.executable_build_options.layout_canonicalization_callback(),
nullptr);
}
TEST(StreamExecutorGpuClientTest, GetDefaultLayout) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
auto shape = ShapeUtil::MakeShape(S4, {2, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto layout,
client->GetDefaultLayout(shape.element_type(), shape.dimensions()));
EXPECT_EQ(layout.element_size_in_bits(), 4);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/gpu/se_gpu_pjrt_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/gpu/se_gpu_pjrt_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bc8d5b74-26e7-4156-b57d-9d1ba2f0bdd2 | cpp | tensorflow/tensorflow | model_builder | tensorflow/lite/delegates/gpu/common/model_builder.cc | tensorflow/lite/delegates/gpu/common/model_builder_test.cc | #include "tensorflow/lite/delegates/gpu/common/model_builder.h"
#include <algorithm>
#include <cstdint>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/gpu/common/custom_parsers.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/lstm_parser.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_builder_helper.h"
#include "tensorflow/lite/delegates/gpu/common/model_builder_internal.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/object_reader.h"
#include "tensorflow/lite/delegates/gpu/common/operation_parser.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/transformations/model_transformations.h"
#include "tensorflow/lite/delegates/utils.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/tools/versioning/gpu_compatibility.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace gpu {
namespace {
absl::Status GetFullyConnectedAttributes(int weights_tensor_id,
int bias_tensor_id,
ObjectReader* reader,
FullyConnectedAttributes* attr) {
Tensor<HW, DataType::FLOAT32> weights;
RETURN_IF_ERROR(reader->ReadTensor(weights_tensor_id, &weights));
attr->weights.data = std::move(weights.data);
attr->weights.id = weights.id;
attr->weights.shape.h = 1;
attr->weights.shape.w = 1;
attr->weights.shape.o = weights.shape.h;
attr->weights.shape.i = weights.shape.w;
reader->ReadTensor(bias_tensor_id, &attr->bias).IgnoreError();
return absl::OkStatus();
}
template <typename ParamsT>
absl::Status RetrieveBuiltinData(const TfLiteNode* tflite_node,
const ParamsT** tf_options) {
*tf_options = static_cast<const ParamsT*>(tflite_node->builtin_data);
if (!*tf_options) {
return absl::InternalError("Unable to retrieve builtin_data.");
}
return absl::OkStatus();
}
template <typename ParamsT>
absl::Status RetrieveCustomInitialData(const TfLiteNode* tflite_node,
const ParamsT** tf_options) {
*tf_options = static_cast<const ParamsT*>(tflite_node->custom_initial_data);
if (!*tf_options) {
return absl::InternalError("Unable to retrieve custom_initial_data.");
}
return absl::OkStatus();
}
absl::Status NewConstNode(TensorFloat32 t, GraphFloat32* graph, Value** value) {
ConstTensorAttributes attr;
attr.tensor = std::move(t);
Node* node = graph->NewNode();
node->operation.attributes = attr;
node->operation.type = ToString(OperationType::CONSTANT);
*value = graph->NewValue();
RETURN_IF_ERROR(graph->SetProducer(node->id, (*value)->id));
(*value)->tensor.ref = attr.tensor.id;
(*value)->tensor.type = attr.tensor.kType;
(*value)->tensor.shape = attr.tensor.shape;
return absl::OkStatus();
}
template <DataType DataTypeT, typename T>
absl::Status ParseInputsWithConstTensorImpl(
Node* node, ObjectReader* reader,
TensorOrScalarBase<DataTypeT, T>* tensor_or_scalar) {
const std::string& opname = node->operation.type;
const TfLiteTensor* input0 = reader->GetInputTensor(0);
if (!input0) {
return absl::InvalidArgumentError("Couldn't get the 1st input tensor for " +
opname);
}
const TfLiteTensor* input1 = reader->GetInputTensor(1);
if (!input1) {
return absl::InvalidArgumentError("Couldn't get the 2nd input tensor for " +
opname);
}
const bool constant_tensor0 = IsConstantTensor(input0);
const bool constant_tensor1 = IsConstantTensor(input1);
if (constant_tensor0 && constant_tensor1) {
return absl::InvalidArgumentError("No runtime input tensors for " + opname);
}
const bool runtime_tensor0 = !constant_tensor0;
const bool runtime_tensor1 = !constant_tensor1;
if (runtime_tensor0 && runtime_tensor1) {
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddInput(node, 1));
} else {
int runtime_tensor = 0;
int constant_tensor = 1;
TfLiteIntArray* constant_dims = input1->dims;
if (constant_tensor0 && runtime_tensor1) {
runtime_tensor = 1;
constant_tensor = 0;
constant_dims = input0->dims;
}
RETURN_IF_ERROR(reader->AddInput(node, runtime_tensor));
if (constant_dims->size <= 0 || NumElements(constant_dims) == 1) {
Tensor<Scalar, DataTypeT> tensor;
RETURN_IF_ERROR(reader->ReadTensor(constant_tensor, &tensor));
*tensor_or_scalar = static_cast<T>(tensor.data[0]);
} else {
if (CheckIfLinearConvertible(constant_dims).ok()) {
Tensor<Linear, DataTypeT> tensor;
RETURN_IF_ERROR(reader->ReadTensor(constant_tensor, &tensor));
*tensor_or_scalar = std::move(tensor);
} else if (constant_dims->size == 2) {
Tensor<HW, DataTypeT> tensor_hw;
RETURN_IF_ERROR(reader->ReadTensor(constant_tensor, &tensor_hw));
Tensor<HWC, DataTypeT> tensor;
tensor.id = tensor_hw.id;
tensor.shape = HWC(1, tensor_hw.shape.h, tensor_hw.shape.w);
tensor.data = tensor_hw.data;
*tensor_or_scalar = std::move(tensor);
} else {
Tensor<HWC, DataTypeT> tensor;
RETURN_IF_ERROR(reader->ReadTensor(constant_tensor, &tensor));
if (tensor.data.size() == 1) {
*tensor_or_scalar = static_cast<T>(tensor.data[0]);
} else {
*tensor_or_scalar = std::move(tensor);
}
}
}
}
return absl::OkStatus();
}
absl::Status ParseInputsWithConstTensor(Node* node, ObjectReader* reader,
const TfLiteTensor* input0) {
switch (input0->type) {
case kTfLiteBool: {
ElementwiseAttributesBase<DataType::BOOL, bool> attr;
RETURN_IF_ERROR(
ParseInputsWithConstTensorImpl(node, reader, &attr.param));
attr.runtime_tensor_is_second =
IsConstantTensor(reader->GetInputTensor(0));
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
case kTfLiteInt32: {
ElementwiseAttributesBase<DataType::INT32, int32_t> attr;
RETURN_IF_ERROR(
ParseInputsWithConstTensorImpl(node, reader, &attr.param));
attr.runtime_tensor_is_second =
IsConstantTensor(reader->GetInputTensor(0));
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
default: {
ElementwiseAttributes attr;
RETURN_IF_ERROR(
ParseInputsWithConstTensorImpl(node, reader, &attr.param));
attr.runtime_tensor_is_second =
IsConstantTensor(reader->GetInputTensor(0));
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
}
}
absl::Status MaybeFuseActivationForElementwiseNode(
OperationType operation_type, const TfLiteNode* tflite_node,
GraphFloat32* graph, Node* node) {
TfLiteFusedActivation activation = kTfLiteActNone;
switch (operation_type) {
case OperationType::MUL: {
const TfLiteMulParams* tf_options;
if (RetrieveBuiltinData(tflite_node, &tf_options).ok()) {
activation = tf_options->activation;
}
break;
}
case OperationType::ADD: {
const TfLiteAddParams* tf_options;
if (RetrieveBuiltinData(tflite_node, &tf_options).ok()) {
activation = tf_options->activation;
}
break;
}
case OperationType::SUB: {
const TfLiteSubParams* tf_options;
if (RetrieveBuiltinData(tflite_node, &tf_options).ok()) {
activation = tf_options->activation;
}
break;
}
case OperationType::DIV: {
const TfLiteDivParams* tf_options;
if (RetrieveBuiltinData(tflite_node, &tf_options).ok()) {
activation = tf_options->activation;
}
break;
}
default:
activation = kTfLiteActNone;
}
if (activation) {
return MaybeFuseActivation(activation, graph, node);
}
return absl::OkStatus();
}
struct TensorInfo {
std::vector<std::pair<TfLiteNode*, TfLiteRegistration*>> producers;
std::vector<std::pair<TfLiteNode*, TfLiteRegistration*>> consumers;
};
absl::Status GetTensorInfo(const TfLiteContext* context, int tensor_id,
TensorInfo* result) {
TfLiteIntArray* execution_plan = nullptr;
if (context->GetExecutionPlan(const_cast<TfLiteContext*>(context),
&execution_plan) != kTfLiteOk) {
return absl::UnavailableError("Unable to get graph execution plan.");
}
for (int i = 0; i < execution_plan->size; ++i) {
const int node_index = execution_plan->data[i];
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
if (context->GetNodeAndRegistration(const_cast<TfLiteContext*>(context),
node_index, &node,
®istration) != kTfLiteOk) {
return absl::UnavailableError(
"Unable to get node and registration for node.");
}
for (int j = 0; j < node->inputs->size; ++j) {
if (tensor_id == node->inputs->data[j]) {
result->consumers.push_back({node, registration});
}
}
for (int j = 0; j < node->outputs->size; ++j) {
if (tensor_id == node->outputs->data[j]) {
result->producers.push_back({node, registration});
}
}
}
return absl::OkStatus();
}
bool IsLogicalCode(int32_t builtin_code) {
return builtin_code == kTfLiteBuiltinGreater ||
builtin_code == kTfLiteBuiltinGreaterEqual ||
builtin_code == kTfLiteBuiltinLess ||
builtin_code == kTfLiteBuiltinLessEqual ||
builtin_code == kTfLiteBuiltinEqual ||
builtin_code == kTfLiteBuiltinNotEqual;
}
bool IsLogicalOp(tflite::gpu::OperationType op_type) {
return op_type == tflite::gpu::OperationType::GREATER ||
op_type == tflite::gpu::OperationType::GREATER_EQUAL ||
op_type == tflite::gpu::OperationType::LESS ||
op_type == tflite::gpu::OperationType::LESS_EQUAL ||
op_type == tflite::gpu::OperationType::EQUAL ||
op_type == tflite::gpu::OperationType::NOT_EQUAL;
}
class BatchedMatMulOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
if (reader->GetNumberOfRuntimeInputs() == 2) {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::BATCHED_MATMUL);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddInput(node, 1));
RETURN_IF_ERROR(reader->AddOutputs(node));
return absl::OkStatus();
} else if (reader->GetNumberOfRuntimeInputs() == 1) {
const TfLiteTensor* second_input = reader->GetInputTensor(1);
if (!IsConstantTensor(second_input) || second_input->dims->size != 2) {
return absl::UnavailableError("Not supported batched mat mul case");
}
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONVOLUTION_2D);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
Tensor<HW, DataType::FLOAT32> weights;
RETURN_IF_ERROR(reader->ReadTensor(1, &weights));
Convolution2DAttributes attr;
attr.weights.data.resize(weights.shape.w * weights.shape.h);
for (int i = 0; i < weights.shape.w; ++i) {
for (int j = 0; j < weights.shape.h; ++j) {
attr.weights.data[i * weights.shape.h + j] =
weights.data[j * weights.shape.w + i];
}
}
attr.weights.id = weights.id;
attr.weights.shape.h = 1;
attr.weights.shape.w = 1;
attr.weights.shape.o = weights.shape.w;
attr.weights.shape.i = weights.shape.h;
attr.strides = HW(1, 1);
attr.dilations = HW(1, 1);
attr.padding.appended = HW(0, 0);
attr.padding.prepended = HW(0, 0);
node->operation.attributes = std::move(attr);
return absl::OkStatus();
} else {
return absl::UnavailableError("Not supported batched mat mul case");
}
return absl::OkStatus();
}
};
class CastOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
TfLiteType src_type = context->tensors[tflite_node->inputs->data[0]].type;
TfLiteType dst_type = context->tensors[tflite_node->outputs->data[0]].type;
if (src_type == kTfLiteBool &&
(dst_type == kTfLiteFloat16 || dst_type == kTfLiteFloat32)) {
TensorInfo input_tensor_info;
RETURN_IF_ERROR(GetTensorInfo(context, tflite_node->inputs->data[0],
&input_tensor_info));
if (input_tensor_info.producers.size() != 1 ||
input_tensor_info.consumers.size() != 1) {
return absl::UnavailableError("Not supported cast case");
}
TensorInfo output_tensor_info;
RETURN_IF_ERROR(GetTensorInfo(context, tflite_node->outputs->data[0],
&output_tensor_info));
if (output_tensor_info.consumers.size() != 1) {
return absl::UnavailableError(
"Cast from bool not supported for outputs");
}
if (IsLogicalCode(input_tensor_info.producers[0].second->builtin_code)) {
return absl::OkStatus();
}
}
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CAST);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
return absl::OkStatus();
}
};
class ClampOperationsParser : public TFLiteOperationParser {
public:
explicit ClampOperationsParser(float clamp_a, float clamp_b)
: clamp_a_(clamp_a), clamp_b_(clamp_b) {}
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return absl::OkStatus();
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node_sub = graph->NewNode();
Node* node_relu = graph->NewNode();
Node* node_add = graph->NewNode();
ElementwiseAttributes sub_attr;
sub_attr.param = -clamp_a_;
node_sub->operation.type = ToString(OperationType::ADD);
node_sub->operation.attributes = std::move(sub_attr);
ReLUAttributes relu_attr;
relu_attr.alpha = 0.0f;
relu_attr.activation_max = clamp_b_ - clamp_a_;
node_relu->operation.type = ToString(OperationType::RELU);
node_relu->operation.attributes = relu_attr;
ElementwiseAttributes add_attr;
add_attr.param = clamp_a_;
node_add->operation.type = ToString(OperationType::ADD);
node_add->operation.attributes = std::move(add_attr);
RETURN_IF_ERROR(reader->AddInput(node_sub, 0));
auto input = graph->FindInputs(node_sub->id)[0];
Value* v0 = graph->NewValue();
Value* v1 = graph->NewValue();
v0->tensor.type = input->tensor.type;
v0->tensor.shape = input->tensor.shape;
v1->tensor.type = input->tensor.type;
v1->tensor.shape = input->tensor.shape;
RETURN_IF_ERROR(graph->SetProducer(node_sub->id, v0->id));
RETURN_IF_ERROR(graph->AddConsumer(node_relu->id, v0->id));
RETURN_IF_ERROR(graph->SetProducer(node_relu->id, v1->id));
RETURN_IF_ERROR(graph->AddConsumer(node_add->id, v1->id));
RETURN_IF_ERROR(reader->AddOutputs(node_add));
return absl::OkStatus();
}
private:
const float clamp_a_, clamp_b_;
};
class ConcatenationOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
ConcatAttributes attr;
std::vector<const Value*> inputs;
for (uint32_t idx = 0; idx < tflite_node->inputs->size; ++idx) {
Value* value;
const auto status = reader->ReadValue(idx, &value);
if (status.ok()) {
inputs.push_back(value);
} else {
TensorFloat32 tensor;
RETURN_IF_ERROR(reader->ReadTensor(idx, &tensor));
Value* value;
RETURN_IF_ERROR(NewConstNode(std::move(tensor), graph, &value));
inputs.push_back(value);
}
}
for (int i = 0; i < inputs.size(); ++i) {
for (int j = 0; j < i; ++j) {
if (inputs[i] == inputs[j]) {
Node* node_copy = graph->NewNode();
node_copy->operation.type = ToString(OperationType::COPY);
RETURN_IF_ERROR(graph->AddConsumer(node_copy->id, inputs[j]->id));
Value* copy_value = graph->NewValue();
copy_value->tensor.type = inputs[j]->tensor.type;
copy_value->tensor.shape = inputs[j]->tensor.shape;
RETURN_IF_ERROR(graph->SetProducer(node_copy->id, copy_value->id));
inputs[i] = copy_value;
break;
}
}
}
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONCAT);
RETURN_IF_ERROR(reader->AddOutputs(node));
for (int i = 0; i < inputs.size(); ++i) {
RETURN_IF_ERROR(graph->AddConsumer(node->id, inputs[i]->id));
}
std::vector<BHWC> input_shapes;
for (auto input : graph->FindInputs(node->id)) {
input_shapes.push_back(input->tensor.shape);
}
RETURN_IF_ERROR(SetAxis(input_shapes, &attr.axis));
BHWC output_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
for (auto input : graph->FindInputs(node->id)) {
if (input->tensor.shape.h != output_shape.h) {
attr.axis = Axis::HEIGHT;
break;
}
if (input->tensor.shape.w != output_shape.w) {
attr.axis = Axis::WIDTH;
break;
}
if (input->tensor.shape.c != output_shape.c) {
attr.axis = Axis::CHANNELS;
break;
}
}
const TfLiteConcatenationParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
RETURN_IF_ERROR(MaybeFuseActivation(tf_options->activation, graph, node));
node->operation.attributes = attr;
return absl::OkStatus();
}
private:
absl::Status SetAxis(const std::vector<BHWC>& input_shapes, Axis* axis) {
*axis = Axis::BATCH;
for (int i = 1; i < input_shapes.size(); i++) {
if (input_shapes[0].h != input_shapes[i].h &&
input_shapes[0].w != input_shapes[i].w &&
input_shapes[0].c != input_shapes[i].c) {
*axis = Axis::HEIGHT;
break;
}
}
if (*axis == Axis::BATCH) return absl::OkStatus();
for (int i = 1; i < input_shapes.size(); i++) {
if (input_shapes[0].b != input_shapes[i].b &&
input_shapes[0].w != input_shapes[i].w &&
input_shapes[0].c != input_shapes[i].c) {
*axis = Axis::WIDTH;
break;
}
}
if (*axis == Axis::HEIGHT) return absl::OkStatus();
for (int i = 1; i < input_shapes.size(); i++) {
if (input_shapes[0].b != input_shapes[i].b &&
input_shapes[0].h != input_shapes[i].h &&
input_shapes[0].c != input_shapes[i].c) {
*axis = Axis::CHANNELS;
break;
}
}
if (*axis == Axis::WIDTH) return absl::OkStatus();
for (int i = 1; i < input_shapes.size(); i++) {
if (input_shapes[0].b != input_shapes[i].b &&
input_shapes[0].w != input_shapes[i].w &&
input_shapes[0].h != input_shapes[i].h) {
return absl::UnimplementedError(
"Can concatenate tensors only by batch, height, width, or "
"channels.");
}
}
return absl::OkStatus();
}
};
class Conv2DOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 6));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
const TfLiteConvParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
Convolution2DAttributes attr;
RETURN_IF_ERROR(ReadAttributes(tflite_node, tf_options, reader, &attr));
const int runtime_inputs = reader->GetNumberOfRuntimeInputs();
if (runtime_inputs == 2) {
const TfLiteTensor* src_tensor = reader->GetInputTensor(0);
const TfLiteTensor* weights_tensor = reader->GetInputTensor(1);
BHWC src_shape, weights_shape;
RETURN_IF_ERROR(ExtractTensorShape(*src_tensor, &src_shape));
RETURN_IF_ERROR(ExtractTensorShape(*weights_tensor, &weights_shape));
if (src_shape.c != weights_shape.c) {
return absl::InternalError(
"No support of CONVOLUTION_2D with runtime grouped weights.");
}
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONVOLUTION_2D);
node->operation.attributes = std::move(attr);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddInput(node, 1));
RETURN_IF_ERROR(reader->AddOutputs(node));
RETURN_IF_ERROR(MaybeFuseActivation(tf_options->activation, graph, node));
return absl::OkStatus();
} else {
BHWC src_shape, dst_shape;
RETURN_IF_ERROR(
ExtractTensorShape(*reader->GetInputTensor(0), &src_shape));
RETURN_IF_ERROR(
ExtractTensorShape(*reader->GetOutputTensor(0), &dst_shape));
const int src_group_size = attr.weights.shape.i;
if (attr.weights.shape.i == 1 && src_shape.c == dst_shape.c) {
DepthwiseConvolution2DAttributes dw_attr;
dw_attr.weights.id = attr.weights.id;
dw_attr.weights.shape =
OHWI(attr.weights.shape.i, attr.weights.shape.h,
attr.weights.shape.w, attr.weights.shape.o);
dw_attr.weights.data.resize(dw_attr.weights.shape.DimensionsProduct());
for (int o = 0; o < dw_attr.weights.shape.o; ++o) {
for (int h = 0; h < dw_attr.weights.shape.h; ++h) {
for (int w = 0; w < dw_attr.weights.shape.w; ++w) {
for (int i = 0; i < dw_attr.weights.shape.i; ++i) {
dw_attr.weights
.data[dw_attr.weights.shape.LinearIndex({o, h, w, i})] =
attr.weights
.data[attr.weights.shape.LinearIndex({i, h, w, o})];
}
}
}
}
dw_attr.bias = attr.bias;
dw_attr.strides = attr.strides;
dw_attr.dilations = attr.dilations;
dw_attr.padding = attr.padding;
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::DEPTHWISE_CONVOLUTION);
node->operation.attributes = std::move(dw_attr);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
RETURN_IF_ERROR(
MaybeFuseActivation(tf_options->activation, graph, node));
return absl::OkStatus();
}
const int dst_group_size = attr.weights.shape.o / attr.groups;
const bool supported_grouped_conv =
src_group_size % 4 == 0 && dst_group_size % 4 == 0;
if (attr.groups != 1 && !supported_grouped_conv) {
return ResolveGroupedConvolution(attr, tf_options, reader, graph);
} else {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONVOLUTION_2D);
node->operation.attributes = std::move(attr);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
RETURN_IF_ERROR(
MaybeFuseActivation(tf_options->activation, graph, node));
return absl::OkStatus();
}
}
}
private:
absl::Status ReadAttributes(const TfLiteNode* tflite_node,
const TfLiteConvParams* tf_options,
ObjectReader* reader,
Convolution2DAttributes* attr) {
const TfLiteTensor* src_tensor = reader->GetInputTensor(0);
BHWC src_shape;
RETURN_IF_ERROR(ExtractTensorShape(*src_tensor, &src_shape));
const int runtime_inputs = reader->GetNumberOfRuntimeInputs();
if (runtime_inputs == 1) {
RETURN_IF_ERROR(reader->ReadTensor(1, &attr->weights));
attr->groups = src_shape.c / attr->weights.shape.i;
} else {
const TfLiteTensor* weights_tensor = reader->GetInputTensor(1);
if (!weights_tensor) {
return absl::InternalError("Expected second runtime tensor.");
}
BHWC weights_shape;
RETURN_IF_ERROR(ExtractTensorShape(*weights_tensor, &weights_shape));
attr->weights.shape = OHWI(weights_shape.b, weights_shape.h,
weights_shape.w, weights_shape.c);
attr->groups = 1;
}
reader->ReadTensor(2, &attr->bias).IgnoreError();
attr->strides = ToHW(tf_options->stride_height, tf_options->stride_width);
attr->dilations = HW(tf_options->dilation_height_factor,
tf_options->dilation_width_factor);
UpdatePadding(tf_options->padding, src_shape, attr);
return absl::OkStatus();
}
absl::Status ResolveGroupedConvolution(const Convolution2DAttributes& attr,
const TfLiteConvParams* tf_options,
ObjectReader* reader,
GraphFloat32* graph) {
const TfLiteTensor* src_tensor = reader->GetInputTensor(0);
const TfLiteTensor* dst_tensor = reader->GetOutputTensor(0);
BHWC src_shape, dst_shape;
RETURN_IF_ERROR(ExtractTensorShape(*src_tensor, &src_shape));
RETURN_IF_ERROR(ExtractTensorShape(*dst_tensor, &dst_shape));
DataType src_type = DataType::FLOAT32;
if (src_tensor->type == kTfLiteFloat16) {
src_type = DataType::FLOAT16;
}
DataType dst_type = DataType::FLOAT32;
if (dst_tensor->type == kTfLiteFloat16) {
dst_type = DataType::FLOAT16;
}
const int src_group_size = attr.weights.shape.i;
const int dst_group_size = attr.weights.shape.o / attr.groups;
Node* split_node = graph->NewNode();
RETURN_IF_ERROR(reader->AddInput(split_node, 0));
{
SplitAttributes split_attr;
split_attr.axis = Axis::CHANNELS;
split_node->operation.type = ToString(OperationType::SPLIT);
split_node->operation.attributes = split_attr;
}
std::vector<Node*> conv_nodes(attr.groups);
std::vector<Value*> conv_src(attr.groups);
std::vector<Value*> conv_dst(attr.groups);
for (int i = 0; i < attr.groups; ++i) {
conv_nodes[i] = graph->NewNode();
conv_src[i] = graph->NewValue();
conv_dst[i] = graph->NewValue();
conv_src[i]->tensor.shape = src_shape;
conv_src[i]->tensor.type = src_type;
conv_src[i]->tensor.shape.c = src_group_size;
conv_dst[i]->tensor.shape = dst_shape;
conv_dst[i]->tensor.type = dst_type;
conv_dst[i]->tensor.shape.c = dst_group_size;
Convolution2DAttributes conv_attr;
conv_attr = attr;
conv_attr.groups = 1;
conv_attr.weights.id = -1;
conv_attr.weights.shape.o = dst_group_size;
conv_attr.weights.data.resize(
conv_attr.weights.shape.DimensionsProduct());
for (int out_i = 0; out_i < dst_group_size; ++out_i) {
for (int in_i = 0; in_i < src_group_size; ++in_i) {
for (int ky = 0; ky < attr.weights.shape.h; ++ky) {
for (int kx = 0; kx < attr.weights.shape.w; ++kx) {
const int src_index = attr.weights.shape.LinearIndex(
{{i * dst_group_size + out_i, ky, kx, in_i}});
const int dst_index =
conv_attr.weights.shape.LinearIndex({{out_i, ky, kx, in_i}});
conv_attr.weights.data[dst_index] = attr.weights.data[src_index];
}
}
}
}
conv_attr.bias.shape.v = dst_group_size;
conv_attr.bias.data.resize(conv_attr.bias.shape.DimensionsProduct());
for (int out_i = 0; out_i < dst_group_size; ++out_i) {
if (i * dst_group_size + out_i < attr.bias.data.size()) {
conv_attr.bias.data[out_i] =
attr.bias.data[i * dst_group_size + out_i];
} else {
conv_attr.bias.data[out_i] = 0.0f;
}
}
conv_nodes[i]->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv_nodes[i]->operation.attributes = conv_attr;
RETURN_IF_ERROR(graph->SetProducer(split_node->id, conv_src[i]->id));
RETURN_IF_ERROR(graph->AddConsumer(conv_nodes[i]->id, conv_src[i]->id));
RETURN_IF_ERROR(graph->SetProducer(conv_nodes[i]->id, conv_dst[i]->id));
}
Node* concat_node = graph->NewNode();
{
ConcatAttributes concat_attr;
concat_attr.axis = Axis::CHANNELS;
concat_node->operation.type = ToString(OperationType::CONCAT);
concat_node->operation.attributes = concat_attr;
}
for (int i = 0; i < attr.groups; ++i) {
RETURN_IF_ERROR(graph->AddConsumer(concat_node->id, conv_dst[i]->id));
}
RETURN_IF_ERROR(reader->AddOutputs(concat_node));
RETURN_IF_ERROR(
MaybeFuseActivation(tf_options->activation, graph, concat_node));
return absl::OkStatus();
}
};
class CumsumOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
CumsumAttributes attr;
const TfLiteTensor* input_tensor = reader->GetInputTensor(0);
const TfLiteTensor* axis_tensor = reader->GetInputTensor(1);
const TfLiteIntArray* shape = input_tensor->dims;
const int tflite_axis = GetTensorData<int32_t>(axis_tensor)[0];
const Axis axes[4] = {Axis::BATCH, Axis::HEIGHT, Axis::WIDTH,
Axis::CHANNELS};
attr.axis = axes[tflite_axis + 4 - shape->size];
node->operation.type = ToString(OperationType::CUMSUM);
node->operation.attributes = std::move(attr);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
return absl::OkStatus();
}
};
class DensifyOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::DENSIFY);
const TfLiteTensor* const_tensor = reader->GetInputTensor(0);
if (!const_tensor->sparsity) {
return absl::InvalidArgumentError("Input tensor must be sparse.");
}
TensorFloat32 sparse_tensor;
RETURN_IF_ERROR(reader->ReadTensor(0, &sparse_tensor));
DensifyAttributes attributes;
attributes.tensor = std::move(sparse_tensor);
node->operation.attributes = attributes;
return reader->AddOutputs(node);
}
};
class DepthwiseConvolutionOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 6));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::DEPTHWISE_CONVOLUTION);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
DepthwiseConvolution2DAttributes attr;
const int runtime_inputs = reader->GetNumberOfRuntimeInputs();
if (runtime_inputs == 2) {
RETURN_IF_ERROR(reader->AddInput(node, 1));
auto weights_shape = graph->FindInputs(node->id)[1]->tensor.shape;
attr.weights.shape = OHWI(weights_shape.b, weights_shape.h,
weights_shape.w, weights_shape.c);
} else {
RETURN_IF_ERROR(reader->ReadTensor(1, &attr.weights));
}
reader->ReadTensor(2, &attr.bias).IgnoreError();
const TfLiteDepthwiseConvParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
attr.strides = ToHW(tf_options->stride_height, tf_options->stride_width);
attr.dilations = HW(std::max(1, tf_options->dilation_height_factor),
std::max(1, tf_options->dilation_width_factor));
UpdatePadding(tf_options->padding,
graph->FindInputs(node->id)[0]->tensor.shape, &attr);
RETURN_IF_ERROR(MaybeFuseActivation(tf_options->activation, graph, node));
const int depth_multiplier = tf_options->depth_multiplier;
if (depth_multiplier != 1) {
const TfLiteTensor* input = reader->GetInputTensor(0);
const TfLiteTensor* filter = reader->GetInputTensor(1);
const TfLiteTensor* output = reader->GetOutputTensor(0);
TransposeWeights(input, filter, output, depth_multiplier, &attr);
}
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
private:
static void TransposeWeights(const TfLiteTensor* input,
const TfLiteTensor* filter,
const TfLiteTensor* output, int depth_multiplier,
DepthwiseConvolution2DAttributes* attr) {
const int input_depth = input->dims->data[3];
const int filter_height = filter->dims->data[1];
const int filter_width = filter->dims->data[2];
const int output_depth = output->dims->data[3];
Tensor<OHWI, DataType::FLOAT32> weights;
weights.id = attr->weights.id;
weights.shape =
OHWI(output_depth, filter_height, filter_width, input_depth);
weights.data.resize(weights.shape.DimensionsProduct());
float* dst = &weights.data[0];
for (int j = 0; j < output_depth; ++j) {
const float* src = attr->weights.data.data() + j;
for (int i = 0; i < filter_height * filter_width; ++i) {
*dst = *src;
dst++;
src += output_depth;
}
}
attr->weights = std::move(weights);
}
};
class DepthToSpaceOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::DEPTH_TO_SPACE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
const TfLiteDepthToSpaceParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
SpaceToDepthAttributes attr;
attr.block_size = tf_options->block_size;
node->operation.attributes = attr;
return absl::OkStatus();
}
};
class DequantizeOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 3));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
const int runtime_inputs = reader->GetNumberOfRuntimeInputs();
if (runtime_inputs == 0) {
ConstTensorAttributes attr;
RETURN_IF_ERROR(reader->ReadTensor(0, &attr.tensor));
Node* node = graph->NewNode();
node->operation.attributes = attr;
node->operation.type = ToString(OperationType::CONSTANT);
return reader->AddOutputs(node);
}
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::QUANTIZE_AND_DEQUANTIZE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
auto input_value = graph->FindInputs(node->id)[0];
if (!input_value->quant_params) {
if (runtime_inputs == 1) {
return absl::OkStatus();
}
return absl::InvalidArgumentError(
"Encountered Dequantize input with no quant params");
}
QuantizeAndDequantizeAttributes attr;
attr.min = input_value->quant_params.value().min;
attr.max = input_value->quant_params.value().max;
attr.scale = input_value->quant_params.value().scale;
node->operation.attributes = attr;
return absl::OkStatus();
}
};
class ElementwiseOperationParser : public TFLiteOperationParser {
public:
explicit ElementwiseOperationParser(OperationType operation_type)
: operation_type_(operation_type) {}
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
const int kMaxSupportedOpVersion =
operation_type_ == OperationType::MUL ? 3 : 2;
RETURN_IF_ERROR(
CheckMaxSupportedOpVersion(registration, kMaxSupportedOpVersion));
if (IsLogicalOp(operation_type_)) {
TensorInfo output_tensor_info;
RETURN_IF_ERROR(GetTensorInfo(context, tflite_node->outputs->data[0],
&output_tensor_info));
if (output_tensor_info.producers.size() != 1 ||
output_tensor_info.consumers.size() != 1) {
return absl::UnavailableError("Not supported logical op case");
}
const auto& next_node = output_tensor_info.consumers[0];
TfLiteType dst_type =
context->tensors[next_node.first->outputs->data[0]].type;
int next_code = next_node.second->builtin_code;
if ((next_code == kTfLiteBuiltinCast ||
next_code == kTfLiteBuiltinSelect ||
next_code == kTfLiteBuiltinSelectV2) &&
(dst_type == kTfLiteFloat16 || dst_type == kTfLiteFloat32)) {
return absl::OkStatus();
} else {
return absl::UnimplementedError("Not supported logical op case.");
}
}
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(operation_type_);
if (operation_type_ == OperationType::ADD) {
ElementwiseAttributes attr;
node->operation.attributes = std::move(attr);
}
if (IsOneArgumentOperation()) {
RETURN_IF_ERROR(reader->VerifyInputsConstsOutputs(tflite_node,
1,
0,
1));
RETURN_IF_ERROR(reader->AddInput(node, 0));
} else if (IsTwoArgumentOperation() &&
reader
->VerifyInputsConstsOutputs(tflite_node,
2,
0,
1)
.ok()) {
if (tflite_node->inputs->size != 2) {
return absl::InvalidArgumentError("Applies only two input tensors");
}
const TfLiteTensor* input0 = reader->GetInputTensor(0);
const TfLiteTensor* input1 = reader->GetInputTensor(1);
if (input0 == input1) {
if (operation_type_ == OperationType::MUL) {
node->operation.type = ToString(OperationType::SQUARE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
} else if (operation_type_ == OperationType::ADD) {
node->operation.type = ToString(OperationType::MUL);
ElementwiseAttributes attr;
attr.param = 2.0f;
node->operation.attributes = std::move(attr);
RETURN_IF_ERROR(reader->AddInput(node, 0));
} else {
return absl::UnimplementedError(
"No support of few identical inputs in the same operation.");
}
} else {
int input_tensor0 = 0;
int input_tensor1 = 1;
if (operation_type_ == OperationType::MUL ||
operation_type_ == OperationType::ADD) {
BHWC shape0;
RETURN_IF_ERROR(ExtractTensorShape(*input0, &shape0));
BHWC shape1;
RETURN_IF_ERROR(ExtractTensorShape(*input1, &shape1));
if (shape0.h <= shape1.h && shape0.w <= shape1.w &&
shape0.c == shape1.c) {
input_tensor0 = 1;
input_tensor1 = 0;
}
}
RETURN_IF_ERROR(reader->AddInput(node, input_tensor0));
RETURN_IF_ERROR(reader->AddInput(node, input_tensor1));
}
} else if (IsTwoArgumentOperationWithConst()) {
RETURN_IF_ERROR(reader->VerifyInputsConstsOutputs(tflite_node,
1,
1,
1));
const TfLiteTensor* input_tensor0 = reader->GetInputTensor(0);
const TfLiteTensor* constant_tensor = IsConstantTensor(input_tensor0)
? input_tensor0
: reader->GetInputTensor(1);
RETURN_IF_ERROR(
ParseInputsWithConstTensor(node, reader, constant_tensor));
} else {
return absl::InvalidArgumentError("Incorrect operation type passed");
}
RETURN_IF_ERROR(reader->AddOutputs(node));
return MaybeFuseActivationForElementwiseNode(operation_type_, tflite_node,
graph, node);
}
private:
absl::Status GetActivation(const TfLiteNode* tflite_node,
TfLiteFusedActivation* activation) const {
if (operation_type_ == OperationType::DIV) {
const TfLiteDivParams* tf_options;
auto status = RetrieveBuiltinData(tflite_node, &tf_options);
*activation = status.ok() ? tf_options->activation : kTfLiteActNone;
return absl::OkStatus();
}
if (operation_type_ == OperationType::SUB) {
const TfLiteSubParams* tf_options;
auto status = RetrieveBuiltinData(tflite_node, &tf_options);
*activation = status.ok() ? tf_options->activation : kTfLiteActNone;
return absl::OkStatus();
}
*activation = kTfLiteActNone;
return absl::OkStatus();
}
bool IsOneArgumentOperation() const {
switch (operation_type_) {
case OperationType::ABS:
case OperationType::COPY:
case OperationType::COS:
case OperationType::ELU:
case OperationType::EXP:
case OperationType::FLOOR:
case OperationType::GELU:
case OperationType::LOG:
case OperationType::NEG:
case OperationType::RSQRT:
case OperationType::SIGMOID:
case OperationType::SIGN:
case OperationType::SIN:
case OperationType::SQRT:
case OperationType::SQUARE:
case OperationType::TANH:
return true;
default:
return false;
}
}
bool IsTwoArgumentOperation() const {
switch (operation_type_) {
case OperationType::ADD:
case OperationType::DIV:
case OperationType::EQUAL:
case OperationType::FLOOR_DIV:
case OperationType::FLOOR_MOD:
case OperationType::GREATER:
case OperationType::GREATER_EQUAL:
case OperationType::LESS:
case OperationType::LESS_EQUAL:
case OperationType::LOGICAL_AND:
case OperationType::MAXIMUM:
case OperationType::MINIMUM:
case OperationType::MUL:
case OperationType::NOT_EQUAL:
case OperationType::POW:
case OperationType::SQUARED_DIFF:
case OperationType::SUB:
return true;
default:
return false;
}
}
bool IsTwoArgumentOperationWithConst() const {
switch (operation_type_) {
case OperationType::ADD:
case OperationType::DIV:
case OperationType::EQUAL:
case OperationType::FLOOR_DIV:
case OperationType::FLOOR_MOD:
case OperationType::GREATER:
case OperationType::GREATER_EQUAL:
case OperationType::LESS:
case OperationType::LESS_EQUAL:
case OperationType::LOGICAL_AND:
case OperationType::MAXIMUM:
case OperationType::MINIMUM:
case OperationType::MUL:
case OperationType::NOT_EQUAL:
case OperationType::POW:
case OperationType::SQUARED_DIFF:
case OperationType::SUB:
return true;
default:
return false;
}
}
OperationType operation_type_;
};
class FullyConnectedOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 9));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
const TfLiteFullyConnectedParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
if (reader->GetNumberOfRuntimeInputs() == 2) {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONVOLUTION_2D);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddInput(node, 1));
const TfLiteTensor* input_tensor = reader->GetInputTensor(0);
BHWC input_shape;
RETURN_IF_ERROR(ExtractTensorShape(*input_tensor, &input_shape));
const TfLiteTensor* input2_tensor = reader->GetInputTensor(1);
BHWC input2_shape;
RETURN_IF_ERROR(ExtractTensorShape(*input2_tensor, &input2_shape));
const TfLiteTensor* output_tensor = reader->GetOutputTensor(0);
BHWC output_shape;
RETURN_IF_ERROR(ExtractTensorShape(*output_tensor, &output_shape));
BHWC output_ref_shape = input_shape;
output_ref_shape.c = input2_shape.b;
if (output_ref_shape != output_shape) {
Value* copy_value = graph->NewValue();
auto input_value = graph->FindInputs(node->id)[0];
copy_value->tensor.type = input_value->tensor.type;
copy_value->tensor.shape = output_ref_shape;
Node* node_reshape = graph->NewNode();
node_reshape->operation.type = ToString(OperationType::RESHAPE);
ReshapeAttributes reshape_attr;
reshape_attr.new_shape = output_shape;
node_reshape->operation.attributes = reshape_attr;
RETURN_IF_ERROR(graph->SetProducer(node->id, copy_value->id));
RETURN_IF_ERROR(graph->AddConsumer(node_reshape->id, copy_value->id));
RETURN_IF_ERROR(reader->AddOutputs(node_reshape));
} else {
RETURN_IF_ERROR(reader->AddOutputs(node));
}
Convolution2DAttributes attr;
reader->ReadTensor(2, &attr.bias).IgnoreError();
attr.strides = HW(1, 1);
attr.dilations = HW(1, 1);
attr.padding.appended = HW(0, 0);
attr.padding.prepended = HW(0, 0);
RETURN_IF_ERROR(MaybeFuseActivation(tf_options->activation, graph, node));
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
Node* node = graph->NewNode();
RETURN_IF_ERROR(reader->AddInput(node, 0));
if (tf_options->weights_format !=
kTfLiteFullyConnectedWeightsFormatDefault) {
return absl::UnimplementedError(
"Unsupported FullyConnected weights format.");
}
FullyConnectedAttributes attr;
RETURN_IF_ERROR(GetFullyConnectedAttributes(1, 2, reader, &attr));
auto input = graph->FindInputs(node->id)[0];
if (input->tensor.shape.c != attr.weights.shape.i) {
return absl::UnimplementedError(
"Amount of input channels should match weights width");
}
Node* conv = node;
if (input->tensor.shape.h != 1 || input->tensor.shape.w != 1) {
Convolution2DAttributes conv_attr;
conv_attr.strides = HW(1, 1);
conv_attr.dilations = HW(1, 1);
conv_attr.padding.appended = HW(0, 0);
conv_attr.padding.prepended = HW(0, 0);
conv_attr.weights = attr.weights;
conv_attr.bias = attr.bias;
conv->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv->operation.attributes = std::move(conv_attr);
} else {
conv->operation.type = ToString(OperationType::FULLY_CONNECTED);
conv->operation.attributes = std::move(attr);
}
RETURN_IF_ERROR(reader->AddOutputs(conv));
RETURN_IF_ERROR(MaybeFuseActivation(tf_options->activation, graph, conv));
return absl::OkStatus();
}
};
class GatherOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::GATHER);
GatherAttributes attr;
const TfLiteTensor* input_tensor = reader->GetInputTensor(0);
const TfLiteGatherParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
RETURN_IF_ERROR(
ExtractAxisFromIndex(*input_tensor, tf_options->axis, &attr.axis));
RETURN_IF_ERROR(reader->AddInput(node, 0));
const TfLiteTensor* idx_tensor = reader->GetInputTensor(1);
if (!IsConstantTensor(idx_tensor)) {
RETURN_IF_ERROR(reader->AddInput(node, 1));
} else {
RETURN_IF_ERROR(reader->ReadTensor(1, &attr.indices));
}
node->operation.attributes = std::move(attr);
return reader->AddOutputs(node);
}
};
class HardSwishOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode*, const TfLiteRegistration*,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::HARD_SWISH);
RETURN_IF_ERROR(reader->AddInput(node, 0));
return reader->AddOutputs(node);
}
};
class LSTMOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 4));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
const TfLiteLSTMParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
switch (tf_options->kernel_type) {
case kTfLiteLSTMFullKernel:
return ParseFull(tflite_node, registration, graph, reader, tf_options);
case kTfLiteLSTMBasicKernel:
return ParseBasic(tflite_node, registration, graph, reader, tf_options);
}
}
absl::flat_hash_map<int, ValueId> GetNewValueIdsForVariableInputNodes()
final {
return new_variable_input_value_map_;
}
private:
absl::Status ParseBasic(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader,
const TfLiteLSTMParams* tf_options) {
if (tflite_node->inputs->size != 5) {
return absl::InvalidArgumentError("LSTM should have 5 input tensors");
}
if (tflite_node->outputs->size != 4) {
return absl::InvalidArgumentError("LSTM should have 4 output tensors");
}
RETURN_IF_ERROR(CheckBasicParameters(tf_options));
Node* concat_node = graph->NewNode();
concat_node->operation.type = ToString(OperationType::CONCAT);
ConcatAttributes concat_attr;
concat_attr.axis = Axis::CHANNELS;
concat_node->operation.attributes = concat_attr;
Node* fc_node = graph->NewNode();
fc_node->operation.type = ToString(OperationType::FULLY_CONNECTED);
FullyConnectedAttributes fc_attr;
RETURN_IF_ERROR(GetFullyConnectedAttributes(2, 3, reader, &fc_attr));
fc_node->operation.attributes = std::move(fc_attr);
Node* lstm_node = graph->NewNode();
lstm_node->operation.type = ToString(OperationType::LSTM);
LstmAttributes lstm_attr;
lstm_attr.kernel_type = LstmKernelType::BASIC;
lstm_node->operation.attributes = lstm_attr;
Value* concat_temp;
int concat_tensor_idx = tflite_node->outputs->data[2];
RETURN_IF_ERROR(
reader->ReadValueByTensorIdx(concat_tensor_idx, &concat_temp));
Value* activ_temp;
int activ_tensor_idx = tflite_node->outputs->data[3];
RETURN_IF_ERROR(
reader->ReadValueByTensorIdx(activ_tensor_idx, &activ_temp));
RETURN_IF_ERROR(reader->AddInput(concat_node, 0));
RETURN_IF_ERROR(reader->AddInput(concat_node, 1));
RETURN_IF_ERROR(graph->SetProducer(concat_node->id, concat_temp->id));
RETURN_IF_ERROR(graph->AddConsumer(fc_node->id, concat_temp->id));
RETURN_IF_ERROR(graph->SetProducer(fc_node->id, activ_temp->id));
RETURN_IF_ERROR(graph->AddConsumer(lstm_node->id, activ_temp->id));
RETURN_IF_ERROR(reader->AddInput(lstm_node, 4));
RETURN_IF_ERROR(reader->AddOutput(lstm_node, 1));
RETURN_IF_ERROR(reader->AddOutput(lstm_node, 0));
return absl::OkStatus();
}
absl::Status CheckBasicParameters(const TfLiteLSTMParams* tf_options) {
if (tf_options->activation != kTfLiteActTanh) {
return absl::UnimplementedError("Only TANH activation is supported.");
}
if (tf_options->cell_clip != 0.0f) {
return absl::UnimplementedError("cell_clip is not supported.");
}
if (tf_options->proj_clip != 0.0f) {
return absl::UnimplementedError("proj_clip is not supported.");
}
return absl::OkStatus();
}
absl::Status ParseFull(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader,
const TfLiteLSTMParams* tf_options) {
RETURN_IF_ERROR(ParseLSTMAttributes(tflite_node, registration, graph,
reader, tf_options,
&new_variable_input_value_map_));
return absl::OkStatus();
}
absl::Status CheckFullParameters(const TfLiteLSTMParams* tf_options) {
if (tf_options->activation != kTfLiteActSigmoid &&
tf_options->activation != kTfLiteActTanh) {
return absl::UnimplementedError(
"Only sigmoid or tanh activation is supported.");
}
return absl::OkStatus();
}
absl::flat_hash_map<int, ValueId> new_variable_input_value_map_;
};
class OneHotOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
OneHotAttributes attr;
const TfLiteTensor* on_tensor = reader->GetInputTensor(2);
const TfLiteTensor* off_tensor = reader->GetInputTensor(3);
attr.on_value = GetTensorData<float>(on_tensor)[0];
attr.off_value = GetTensorData<float>(off_tensor)[0];
node->operation.type = ToString(OperationType::ONE_HOT);
node->operation.attributes = std::move(attr);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
return absl::OkStatus();
}
};
class PackOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
if (tflite_node->inputs->size == 1) {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::RESHAPE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
ReshapeAttributes attr;
attr.new_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
node->operation.attributes = attr;
return absl::OkStatus();
} else {
const TfLitePackParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
std::vector<const Value*> inputs;
for (uint32_t idx = 0; idx < tflite_node->inputs->size; ++idx) {
Value* value;
const auto status = reader->ReadValue(idx, &value);
if (status.ok()) {
inputs.push_back(value);
} else {
TensorFloat32 tensor;
RETURN_IF_ERROR(reader->ReadTensor(idx, &tensor));
Value* value;
RETURN_IF_ERROR(NewConstNode(std::move(tensor), graph, &value));
inputs.push_back(value);
}
}
const TfLiteTensor* output = reader->GetOutputTensor(0);
ConcatAttributes attr;
RETURN_IF_ERROR(
ExtractAxisFromIndex(*output, tf_options->axis, &attr.axis));
BHWC output_shape;
RETURN_IF_ERROR(ExtractTensorShape(*output, &output_shape));
BHWC input_required_shape = output_shape;
input_required_shape.set(attr.axis, 1);
for (int i = 0; i < inputs.size(); ++i) {
BHWC input_shape = inputs[i]->tensor.shape;
if (input_shape != input_required_shape) {
Node* node_reshape = graph->NewNode();
node_reshape->operation.type = ToString(OperationType::RESHAPE);
ReshapeAttributes reshape_attr;
reshape_attr.new_shape = input_required_shape;
node_reshape->operation.attributes = reshape_attr;
RETURN_IF_ERROR(graph->AddConsumer(node_reshape->id, inputs[i]->id));
Value* copy_value = graph->NewValue();
copy_value->tensor.type = inputs[i]->tensor.type;
copy_value->tensor.shape = input_required_shape;
RETURN_IF_ERROR(graph->SetProducer(node_reshape->id, copy_value->id));
inputs[i] = copy_value;
}
}
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONCAT);
RETURN_IF_ERROR(reader->AddOutputs(node));
for (const Value* input : inputs) {
RETURN_IF_ERROR(graph->AddConsumer(node->id, input->id));
}
node->operation.attributes = attr;
return absl::OkStatus();
}
}
};
class PReLUOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
return absl::OkStatus();
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::PRELU);
RETURN_IF_ERROR(reader->AddInput(node, 0));
auto input_shape = graph->FindInputs(node->id)[0]->tensor.shape;
PReLUAttributes attr;
Tensor<Linear, DataType::FLOAT32> linear_alpha;
absl::Status status = reader->ReadTensor(1, &linear_alpha);
if (status.ok()) {
if (linear_alpha.shape.v != input_shape.c) {
return absl::InvalidArgumentError(
"Linear alpha shape does not match the number of input channels.");
}
attr.alpha = std::move(linear_alpha);
} else {
Tensor<HWC, DataType::FLOAT32> hwc_alpha;
RETURN_IF_ERROR(reader->ReadTensor(1, &hwc_alpha));
if (hwc_alpha.shape.h != input_shape.h ||
hwc_alpha.shape.w != input_shape.w ||
hwc_alpha.shape.c != input_shape.c) {
return absl::InvalidArgumentError(
"Alpha shape does not match input shape.");
}
attr.alpha = std::move(hwc_alpha);
}
node->operation.attributes = std::move(attr);
return reader->AddOutputs(node);
}
};
class PadOperationParser : public TFLiteOperationParser {
public:
explicit PadOperationParser(bool mirror_pad) : mirror_pad_(mirror_pad) {}
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::PAD);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
PadAttributes attr;
if (mirror_pad_) {
attr.type = PaddingContentType::REFLECT;
} else {
attr.type = PaddingContentType::ZEROS;
}
Tensor<HW, DataType::INT32> paddings;
RETURN_IF_ERROR(reader->ReadTensor(1, &paddings));
if (registration->builtin_code == kTfLiteBuiltinPadv2 &&
tflite_node->inputs->size == 3) {
const TfLiteTensor* const_tensor = reader->GetInputTensor(2);
attr.constant_values = GetTensorData<float>(const_tensor)[0];
}
if (paddings.shape.h == 4 && paddings.shape.w == 2) {
attr.prepended = BHWC(paddings.data[0], paddings.data[2],
paddings.data[4], paddings.data[6]);
attr.appended = BHWC(paddings.data[1], paddings.data[3], paddings.data[5],
paddings.data[7]);
} else if (paddings.shape.h == 3 && paddings.shape.w == 2) {
attr.prepended =
BHWC(1, paddings.data[0], paddings.data[2], paddings.data[4]);
attr.appended =
BHWC(1, paddings.data[1], paddings.data[3], paddings.data[5]);
} else {
return absl::InvalidArgumentError(
"Paddings tensor has unexpected shape.");
}
node->operation.attributes = attr;
return absl::OkStatus();
}
private:
bool mirror_pad_ = false;
};
class Pooling2DOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
public:
explicit Pooling2DOperationParser(PoolingType type) : type_(type) {}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::POOLING_2D);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutput(node, 0));
Pooling2DAttributes attr;
attr.type = type_;
auto input_shape = graph->FindInputs(node->id)[0]->tensor.shape;
const TfLitePoolParams* tf_options;
if (!RetrieveCustomInitialData(tflite_node, &tf_options).ok()) {
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
}
RETURN_IF_ERROR(MaybeFuseActivation(tf_options->activation, graph, node));
reader->AddOutput(node, 1).IgnoreError();
auto outputs = graph->FindOutputs(node->id);
attr.output_indices = outputs.size() == 2;
if (attr.output_indices) {
outputs[1]->tensor.type = DataType::INT32;
}
RETURN_IF_ERROR(ParsePoolingAttributes(tf_options, input_shape, &attr));
node->operation.attributes = attr;
return absl::OkStatus();
}
private:
const PoolingType type_;
};
class ReduceOperationParser : public TFLiteOperationParser {
public:
explicit ReduceOperationParser(OperationType operation_type)
: operation_type_(operation_type) {}
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(operation_type_);
RETURN_IF_ERROR(reader->AddInput(node, 0));
const TfLiteReducerParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
ReduceAttributes attr;
const TfLiteTensor* input = reader->GetInputTensor(0);
const TfLiteTensor* axes = reader->GetInputTensor(1);
for (int i = 0; i < NumElements(axes->dims); i++) {
Axis axis;
RETURN_IF_ERROR(ExtractAxisFromIndex(*input, axes->data.i32[i], &axis));
attr.dims.insert(axis);
}
node->operation.attributes = attr;
if (!tf_options->keep_dims) {
const auto& input_tensor = graph->FindInputs(node->id)[0]->tensor;
auto reduce_output_shape = input_tensor.shape;
for (auto axis : attr.dims) {
reduce_output_shape.set(axis, 1);
}
Node* node_reshape = graph->NewNode();
node_reshape->operation.type = ToString(OperationType::RESHAPE);
ReshapeAttributes reshape_attr;
const TfLiteTensor* output = reader->GetOutputTensor(0);
RETURN_IF_ERROR(ExtractTensorShape(*output, &reshape_attr.new_shape));
node_reshape->operation.attributes = reshape_attr;
Value* reduce_result = graph->NewValue();
reduce_result->tensor.type = input_tensor.type;
reduce_result->tensor.shape = reduce_output_shape;
RETURN_IF_ERROR(graph->SetProducer(node->id, reduce_result->id));
RETURN_IF_ERROR(graph->AddConsumer(node_reshape->id, reduce_result->id));
RETURN_IF_ERROR(reader->AddOutputs(node_reshape));
} else {
RETURN_IF_ERROR(reader->AddOutputs(node));
}
return absl::OkStatus();
}
private:
const OperationType operation_type_;
};
class QuantizeOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::QUANTIZE_AND_DEQUANTIZE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
auto output_value = graph->FindOutputs(node->id)[0];
if (!output_value->quant_params) {
return absl::InvalidArgumentError(
"Encountered Quantize output with no quant params");
}
QuantizeAndDequantizeAttributes attr;
attr.min = output_value->quant_params.value().min;
attr.max = output_value->quant_params.value().max;
attr.scale = output_value->quant_params.value().scale;
node->operation.attributes = attr;
return absl::OkStatus();
}
};
class ReLUOperationParser : public TFLiteOperationParser {
public:
explicit ReLUOperationParser(int activation_min, int activation_max)
: activation_min_(activation_min), activation_max_(activation_max) {}
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
return absl::OkStatus();
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::RELU);
RETURN_IF_ERROR(reader->AddInput(node, 0));
ReLUAttributes attr;
const TfLiteLeakyReluParams* tf_options;
auto status = RetrieveBuiltinData(tflite_node, &tf_options);
attr.alpha = status.ok() ? tf_options->alpha : 0;
attr.activation_min = activation_min_;
attr.activation_max = activation_max_;
node->operation.attributes = attr;
return reader->AddOutputs(node);
}
private:
const int activation_min_;
const int activation_max_;
};
class ResamplerOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddInput(node, 1));
RETURN_IF_ERROR(reader->AddOutputs(node));
node->operation.type = ToString(OperationType::RESAMPLER);
auto src_shape = graph->FindInputs(node->id)[0]->tensor.shape;
auto warp_shape = graph->FindInputs(node->id)[1]->tensor.shape;
auto output_value = graph->FindOutputs(node->id)[0];
output_value->tensor.shape =
BHWC(src_shape.b, warp_shape.h, warp_shape.w, src_shape.c);
return absl::OkStatus();
}
};
class ReshapeOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::RESHAPE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
ReshapeAttributes attr;
attr.new_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
node->operation.attributes = attr;
return absl::OkStatus();
}
};
class Resize2DOperationParser : public TFLiteOperationParser {
public:
explicit Resize2DOperationParser(SamplingType sampling_type)
: sampling_type_(sampling_type) {}
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 3));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::RESIZE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
Resize2DAttributes attr;
RETURN_IF_ERROR(GetAlignCornersValue(tflite_node, &attr.align_corners));
RETURN_IF_ERROR(
GetHalfPixelCentersValue(tflite_node, &attr.half_pixel_centers));
attr.type = sampling_type_;
attr.new_shape.CopyAllDefinedAxis(
graph->FindOutputs(node->id)[0]->tensor.shape);
node->operation.attributes = attr;
return absl::OkStatus();
}
private:
absl::Status GetAlignCornersValue(const TfLiteNode* tflite_node,
bool* align_corners) {
switch (sampling_type_) {
case SamplingType::BILINEAR:
return GetAlignCornersValueForType<TfLiteResizeBilinearParams>(
tflite_node, align_corners);
case SamplingType::NEAREST:
return GetAlignCornersValueForType<TfLiteResizeNearestNeighborParams>(
tflite_node, align_corners);
case SamplingType::UNKNOWN:
return absl::InternalError("Sampling type is not specified");
}
return absl::OkStatus();
}
template <class T>
absl::Status GetAlignCornersValueForType(const TfLiteNode* tflite_node,
bool* align_corners) {
const T* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
*align_corners = tf_options->align_corners;
return absl::OkStatus();
}
absl::Status GetHalfPixelCentersValue(const TfLiteNode* tflite_node,
bool* half_pixel_centers) {
if (sampling_type_ == SamplingType::BILINEAR) {
const TfLiteResizeBilinearParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
if (tf_options->align_corners && tf_options->half_pixel_centers) {
return absl::InternalError(
"If half_pixel_centers is True, align_corners must be False.");
}
*half_pixel_centers = tf_options->half_pixel_centers;
} else {
const TfLiteResizeNearestNeighborParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
*half_pixel_centers = tf_options->half_pixel_centers;
}
return absl::OkStatus();
}
SamplingType sampling_type_ = SamplingType::UNKNOWN;
};
class SelectV2OperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
SelectV2Attributes attr;
attr.scalar_cond = NumElements(reader->GetInputTensor(0)) < 2;
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::SELECT_V2);
RETURN_IF_ERROR(reader->AddInput(node, 0));
{
const TfLiteTensor* tfl_tensor = reader->GetInputTensor(1);
attr.broadcast_true = NumElements(tfl_tensor) < 2;
if (IsConstantTensor(tfl_tensor)) {
Tensor<BHWC, DataType::FLOAT32> tensor;
if (attr.broadcast_true) {
Tensor<Scalar, DataType::FLOAT32> temp;
RETURN_IF_ERROR(reader->ReadTensor(1, &temp));
tensor.shape = BHWC(1, 1, 1, 1);
tensor.data.push_back(temp.data[0]);
} else {
RETURN_IF_ERROR(reader->ReadTensor(1, &tensor));
}
Value* value;
RETURN_IF_ERROR(NewConstNode(tensor, graph, &value));
RETURN_IF_ERROR(graph->AddConsumer(node->id, value->id));
} else {
RETURN_IF_ERROR(reader->AddInput(node, 1));
}
}
{
const TfLiteTensor* tfl_tensor = reader->GetInputTensor(2);
attr.broadcast_false = NumElements(tfl_tensor) < 2;
if (IsConstantTensor(tfl_tensor)) {
Tensor<BHWC, DataType::FLOAT32> tensor;
if (attr.broadcast_false) {
Tensor<Scalar, DataType::FLOAT32> temp;
RETURN_IF_ERROR(reader->ReadTensor(2, &temp));
tensor.shape = BHWC(1, 1, 1, 1);
tensor.data.push_back(temp.data[0]);
} else if (absl::IsInvalidArgument(reader->ReadTensor(2, &tensor))) {
Tensor<HWC, DataType::FLOAT32> temp;
RETURN_IF_ERROR(reader->ReadTensor(2, &temp));
tensor.shape = BHWC(1, temp.shape.h, temp.shape.w, temp.shape.c);
tensor.id = temp.id;
tensor.data.reserve(temp.data.size());
for (float data : temp.data) tensor.data.push_back(data);
}
Value* value;
RETURN_IF_ERROR(NewConstNode(tensor, graph, &value));
RETURN_IF_ERROR(graph->AddConsumer(node->id, value->id));
} else {
RETURN_IF_ERROR(reader->AddInput(node, 2));
}
}
RETURN_IF_ERROR(reader->AddOutputs(node));
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
};
class SliceOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::SLICE);
RETURN_IF_ERROR(reader->AddOutputs(node));
Value* input;
RETURN_IF_ERROR(reader->ReadValue(0, &input));
RETURN_IF_ERROR(graph->AddConsumer(node->id, input->id));
const TfLiteTensor* tfl_input = reader->GetInputTensor(0);
const int input_dims = tfl_input->dims->size;
SliceAttributes attr;
attr.strides = BHWC(1, 1, 1, 1);
Tensor<Linear, DataType::INT32> starts, sizes;
RETURN_IF_ERROR(reader->ReadTensor(1, &starts));
RETURN_IF_ERROR(reader->ReadTensor(2, &sizes));
if (starts.data.size() != sizes.data.size()) {
return absl::InvalidArgumentError("Starts amount != sizes amount.");
}
BHWC bhwc_starts(0, 0, 0, 0);
BHWC bhwc_sizes = input->tensor.shape;
if (input_dims == 4) {
if (starts.data.size() == 4) {
bhwc_starts.b = starts.data[0];
bhwc_starts.h = starts.data[1];
bhwc_starts.w = starts.data[2];
bhwc_starts.c = starts.data[3];
bhwc_sizes.b = sizes.data[0];
bhwc_sizes.h = sizes.data[1];
bhwc_sizes.w = sizes.data[2];
bhwc_sizes.c = sizes.data[3];
} else if (starts.data.size() == 3) {
bhwc_starts.h = starts.data[0];
bhwc_starts.w = starts.data[1];
bhwc_starts.c = starts.data[2];
bhwc_sizes.h = sizes.data[0];
bhwc_sizes.w = sizes.data[1];
bhwc_sizes.c = sizes.data[2];
} else {
return absl::UnimplementedError(
"Slicing is supported for 3 or 4 dimensional tensors only.");
}
} else if (input_dims == 3) {
if (starts.data.size() == 3) {
bhwc_starts.b = starts.data[0];
bhwc_starts.w = starts.data[1];
bhwc_starts.c = starts.data[2];
bhwc_sizes.b = sizes.data[0];
bhwc_sizes.w = sizes.data[1];
bhwc_sizes.c = sizes.data[2];
} else {
return absl::UnimplementedError(
"Slicing is supported for 3 or 4 dimensional tensors only.");
}
} else {
return absl::UnimplementedError(
"Slicing is supported for 3 or 4 dimensional tensors only.");
}
const auto& in_shape = input->tensor.shape;
if (bhwc_sizes.b == -1) {
bhwc_sizes.b = in_shape.b - bhwc_starts.b;
}
if (bhwc_sizes.h == -1) {
bhwc_sizes.h = in_shape.h - bhwc_starts.h;
}
if (bhwc_sizes.w == -1) {
bhwc_sizes.w = in_shape.w - bhwc_starts.w;
}
if (bhwc_sizes.c == -1) {
bhwc_sizes.c = in_shape.c - bhwc_starts.c;
}
attr.starts = bhwc_starts;
attr.ends =
BHWC(bhwc_starts.b + bhwc_sizes.b, bhwc_starts.h + bhwc_sizes.h,
bhwc_starts.w + bhwc_sizes.w, bhwc_starts.c + bhwc_sizes.c);
RETURN_IF_ERROR(UpdateIfNegative(in_shape, &attr));
auto out_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
if ((attr.ends.b - attr.starts.b) != out_shape.b) {
return absl::UnimplementedError("Output batch don't match");
}
if ((attr.ends.h - attr.starts.h) != out_shape.h) {
return absl::UnimplementedError("Output height doesn't match");
}
if ((attr.ends.w - attr.starts.w) != out_shape.w) {
return absl::UnimplementedError("Output width doesn't match");
}
if ((attr.ends.c - attr.starts.c) != out_shape.c) {
return absl::UnimplementedError("Output channels don't match");
}
node->operation.attributes = attr;
return absl::OkStatus();
}
private:
absl::Status UpdateIfNegative(const BHWC& input_shape,
SliceAttributes* attr) {
if (attr->ends.h < 0) {
attr->ends.h = input_shape.h + attr->ends.h;
}
if (attr->ends.w < 0) {
attr->ends.w = input_shape.w + attr->ends.w;
}
if (attr->ends.c < 0) {
attr->ends.c = input_shape.c + attr->ends.c;
}
if (attr->ends.b < 0) {
attr->ends.b = input_shape.b + attr->ends.b;
}
return absl::OkStatus();
}
};
class SoftmaxOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::SOFTMAX);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
const TfLiteSoftmaxParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
if (tf_options->beta != 1) {
return absl::UnimplementedError("Softmax.beta != 1 is not supported.");
}
SoftmaxAttributes attr;
attr.axis = Axis::CHANNELS;
node->operation.attributes = attr;
return absl::OkStatus();
}
};
class SpaceToDepthOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::SPACE_TO_DEPTH);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
const TfLiteSpaceToDepthParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
SpaceToDepthAttributes attr;
attr.block_size = tf_options->block_size;
node->operation.attributes = attr;
return absl::OkStatus();
}
};
class SplitOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
const TfLiteSplitParams* split_params;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &split_params));
if (split_params->num_splits == 1) {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::RESHAPE);
RETURN_IF_ERROR(reader->AddInput(node, 1));
RETURN_IF_ERROR(reader->AddOutputs(node));
ReshapeAttributes attr;
attr.new_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
node->operation.attributes = attr;
return absl::OkStatus();
}
const TfLiteTensor* input = reader->GetInputTensor(1);
const TfLiteTensor* axis_tensor = reader->GetInputTensor(0);
SplitAttributes attr;
RETURN_IF_ERROR(
ExtractAxisFromIndex(*input, axis_tensor->data.i32[0], &attr.axis));
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::SPLIT);
node->operation.attributes = attr;
RETURN_IF_ERROR(reader->AddInput(node, 1));
for (int i = 0; i < tflite_node->outputs->size; ++i) {
RETURN_IF_ERROR(reader->AddOutput(node, i));
}
return absl::OkStatus();
}
};
class SplitVOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
const TfLiteSplitVParams* split_params;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &split_params));
if (split_params->num_splits == 1) {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::RESHAPE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
ReshapeAttributes attr;
attr.new_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
node->operation.attributes = attr;
return absl::OkStatus();
}
const TfLiteTensor* input = reader->GetInputTensor(0);
const TfLiteTensor* axis_tensor = reader->GetInputTensor(2);
SplitAttributes attr;
RETURN_IF_ERROR(
ExtractAxisFromIndex(*input, axis_tensor->data.i32[0], &attr.axis));
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::SPLIT);
node->operation.attributes = attr;
RETURN_IF_ERROR(reader->AddInput(node, 0));
for (int i = 0; i < tflite_node->outputs->size; ++i) {
RETURN_IF_ERROR(reader->AddOutput(node, i));
}
return absl::OkStatus();
}
};
class StridedSliceOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 4));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::SLICE);
RETURN_IF_ERROR(reader->AddOutputs(node));
Value* input;
RETURN_IF_ERROR(reader->ReadValue(0, &input));
RETURN_IF_ERROR(graph->AddConsumer(node->id, input->id));
Tensor<Linear, DataType::INT32> tmp;
RETURN_IF_ERROR(reader->ReadTensor(1, &tmp));
bool read_without_batch = tmp.data.size() == 3;
bool read_with_batch = tmp.data.size() == 4;
if (!read_without_batch && !read_with_batch) {
return absl::UnimplementedError(
"Slicing is supported for 3 or 4 dimensional tensors only.");
}
const TfLiteStridedSliceParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
RETURN_IF_ERROR(CheckOptionsSupport(tf_options));
auto out_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
SliceAttributes attr;
if (read_without_batch) {
RETURN_IF_ERROR(ReadAttribsWithoutBatch(reader, tf_options,
input->tensor.shape, &attr));
}
if (read_with_batch) {
RETURN_IF_ERROR(
ReadAttribsWithBatch(reader, tf_options, input->tensor.shape, &attr));
}
if (attr.strides.b == 0 || attr.strides.h == 0 || attr.strides.w == 0 ||
attr.strides.c == 0) {
return absl::InvalidArgumentError("stride values must be non-zero");
}
if (attr.strides.b < 0 || attr.strides.h < 0 || attr.strides.w < 0 ||
attr.strides.c < 0) {
return absl::UnimplementedError("Reverse slices are not supported.");
}
if ((attr.ends.b - attr.starts.b + attr.strides.b - 1) / attr.strides.b !=
out_shape.b) {
return absl::UnimplementedError("Output batch don't match");
}
if ((attr.ends.h - attr.starts.h + attr.strides.h - 1) / attr.strides.h !=
out_shape.h) {
return absl::UnimplementedError("Output height doesn't match");
}
if ((attr.ends.w - attr.starts.w + attr.strides.w - 1) / attr.strides.w !=
out_shape.w) {
return absl::UnimplementedError("Output width doesn't match");
}
if ((attr.ends.c - attr.starts.c + attr.strides.c - 1) / attr.strides.c !=
out_shape.c) {
return absl::UnimplementedError("Output channels don't match");
}
node->operation.attributes = attr;
return absl::OkStatus();
}
private:
absl::Status UpdateWithMask(const TfLiteStridedSliceParams* tf_options,
const BHWC& input_shape, int ignore_b,
int ignore_h, int ignore_w, int ignore_c,
SliceAttributes* attr) {
if (tf_options->begin_mask & ignore_h) {
attr->starts.h = 0;
}
if (tf_options->begin_mask & ignore_w) {
attr->starts.w = 0;
}
if (tf_options->begin_mask & ignore_c) {
attr->starts.c = 0;
}
if (tf_options->begin_mask & ignore_b) {
attr->starts.b = 0;
}
if (tf_options->end_mask & ignore_h) {
attr->ends.h = input_shape.h;
}
if (tf_options->end_mask & ignore_w) {
attr->ends.w = input_shape.w;
}
if (tf_options->end_mask & ignore_c) {
attr->ends.c = input_shape.c;
}
if (tf_options->end_mask & ignore_b) {
attr->ends.b = input_shape.b;
}
return absl::OkStatus();
}
absl::Status UpdateIfNegative(const BHWC& input_shape,
SliceAttributes* attr) {
if (attr->ends.h < 0) {
attr->ends.h = input_shape.h + attr->ends.h;
}
if (attr->ends.w < 0) {
attr->ends.w = input_shape.w + attr->ends.w;
}
if (attr->ends.c < 0) {
attr->ends.c = input_shape.c + attr->ends.c;
}
if (attr->ends.b < 0) {
attr->ends.b = input_shape.b + attr->ends.b;
}
if (attr->starts.h < 0) {
attr->starts.h = input_shape.h + attr->starts.h;
}
if (attr->starts.w < 0) {
attr->starts.w = input_shape.w + attr->starts.w;
}
if (attr->starts.c < 0) {
attr->starts.c = input_shape.c + attr->starts.c;
}
if (attr->starts.b < 0) {
attr->starts.b = input_shape.b + attr->starts.b;
}
return absl::OkStatus();
}
absl::Status ReadAttribsWithBatch(const ObjectReader* reader,
const TfLiteStridedSliceParams* tf_options,
const BHWC& input_shape,
SliceAttributes* attr) {
auto read_bhwc = [&](int tensor_index, BHWC* bhwc) -> absl::Status {
Tensor<Linear, DataType::INT32> t;
RETURN_IF_ERROR(reader->ReadTensor(tensor_index, &t));
*bhwc = BHWC(t.data[0], t.data[1], t.data[2], t.data[3]);
return absl::OkStatus();
};
RETURN_IF_ERROR(read_bhwc(1, &attr->starts));
RETURN_IF_ERROR(read_bhwc(2, &attr->ends));
RETURN_IF_ERROR(read_bhwc(3, &attr->strides));
RETURN_IF_ERROR(UpdateIfNegative(input_shape, attr));
RETURN_IF_ERROR(UpdateWithMask(tf_options, input_shape, 1, 2, 4, 8, attr));
return absl::OkStatus();
}
absl::Status ReadAttribsWithoutBatch(
const ObjectReader* reader, const TfLiteStridedSliceParams* tf_options,
const BHWC& input_shape, SliceAttributes* attr) {
auto read_hwc = [&](int tensor_index, BHWC* bhwc) -> absl::Status {
Tensor<Linear, DataType::INT32> t;
RETURN_IF_ERROR(reader->ReadTensor(tensor_index, &t));
*bhwc = BHWC(0, t.data[0], t.data[1], t.data[2]);
return absl::OkStatus();
};
RETURN_IF_ERROR(read_hwc(1, &attr->starts));
RETURN_IF_ERROR(read_hwc(2, &attr->ends));
RETURN_IF_ERROR(read_hwc(3, &attr->strides));
RETURN_IF_ERROR(UpdateIfNegative(input_shape, attr));
RETURN_IF_ERROR(UpdateWithMask(tf_options, input_shape, 0, 1, 2, 4, attr));
attr->starts.b = 0;
attr->ends.b = input_shape.b;
attr->strides.b = 1;
return absl::OkStatus();
}
absl::Status CheckOptionsSupport(const TfLiteStridedSliceParams* tf_options) {
if (tf_options->ellipsis_mask) {
return absl::UnimplementedError("Slice does not support ellipsis_mask.");
}
if (tf_options->new_axis_mask) {
return absl::UnimplementedError("Slice does not support new_axis_mask.");
}
if (tf_options->shrink_axis_mask) {
return absl::UnimplementedError(
"Slice does not support shrink_axis_mask parameter. ");
}
return absl::OkStatus();
}
};
class TileOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::TILE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
return absl::OkStatus();
}
};
class TransposeConvBuiltinOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 3));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
auto* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONVOLUTION_TRANSPOSED);
Value* input;
RETURN_IF_ERROR(reader->ReadValue(2, &input));
RETURN_IF_ERROR(graph->AddConsumer(node->id, input->id));
RETURN_IF_ERROR(reader->AddOutputs(node));
const TfLiteTransposeConvParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
ConvolutionTransposedAttributes attr;
attr.stride = tf_options
? HW(tf_options->stride_height, tf_options->stride_width)
: HW(1, 1);
const int runtime_inputs = reader->GetNumberOfRuntimeInputs();
if (runtime_inputs == 2) {
RETURN_IF_ERROR(reader->AddInput(node, 1));
auto weights_shape = graph->FindInputs(node->id)[1]->tensor.shape;
attr.weights.shape = OHWI(weights_shape.b, weights_shape.h,
weights_shape.w, weights_shape.c);
} else {
RETURN_IF_ERROR(reader->ReadTensor(1, &attr.weights));
}
reader->ReadTensor(3, &attr.bias).IgnoreError();
UpdatePadding(tf_options->padding,
graph->FindInputs(node->id)[0]->tensor.shape, &attr);
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
};
class TransposeConvCustomOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
auto* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONVOLUTION_TRANSPOSED);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
const TfLiteTransposeConvParams* tf_options;
auto status = RetrieveCustomInitialData(tflite_node, &tf_options);
ConvolutionTransposedAttributes attr;
attr.stride = status.ok()
? HW(tf_options->stride_height, tf_options->stride_width)
: HW(1, 1);
RETURN_IF_ERROR(reader->ReadTensor(1, &attr.weights));
reader->ReadTensor(2, &attr.bias).IgnoreError();
UpdatePadding(status.ok() ? tf_options->padding : kTfLitePaddingUnknown,
graph->FindInputs(node->id)[0]->tensor.shape, &attr);
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
};
class TransposeOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 4));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::TRANSPOSE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
TransposeAttributes attr;
Tensor<Linear, DataType::INT32> perm;
RETURN_IF_ERROR(reader->ReadTensor(1, &perm));
std::map<Axis, int> axis_to_index = {{Axis::BATCH, 0},
{Axis::HEIGHT, 1},
{Axis::WIDTH, 2},
{Axis::CHANNELS, 3}};
if (perm.data.size() == 4) {
attr.perm = BHWC(perm.data[0], perm.data[1], perm.data[2], perm.data[3]);
} else if (perm.data.size() == 3) {
std::vector<Axis> index_to_axis = {Axis::BATCH, Axis::WIDTH,
Axis::CHANNELS};
attr.perm.b = axis_to_index[index_to_axis[perm.data[0]]];
attr.perm.h = 1;
attr.perm.w = axis_to_index[index_to_axis[perm.data[1]]];
attr.perm.c = axis_to_index[index_to_axis[perm.data[2]]];
} else if (perm.data.size() == 2) {
std::vector<Axis> index_to_axis = {Axis::BATCH, Axis::CHANNELS};
attr.perm.b = axis_to_index[index_to_axis[perm.data[0]]];
attr.perm.h = 1;
attr.perm.w = 2;
attr.perm.c = axis_to_index[index_to_axis[perm.data[1]]];
} else {
return absl::InvalidArgumentError(
"Permutation for transpose is invalid.");
}
node->operation.attributes = attr;
return absl::OkStatus();
}
};
class UnpackOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return absl::OkStatus();
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
const TfLiteUnpackParams* unpack_params;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &unpack_params));
if (unpack_params->num == 1) {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::RESHAPE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
ReshapeAttributes attr;
attr.new_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
node->operation.attributes = attr;
return absl::OkStatus();
}
const TfLiteTensor* input = reader->GetInputTensor(0);
BHWC input_shape;
RETURN_IF_ERROR(ExtractTensorShape(*input, &input_shape));
SplitAttributes attr;
RETURN_IF_ERROR(
ExtractAxisFromIndex(*input, unpack_params->axis, &attr.axis));
BHWC output_required_shape = input_shape;
output_required_shape.set(attr.axis, 1);
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::SPLIT);
node->operation.attributes = attr;
RETURN_IF_ERROR(reader->AddInput(node, 0));
auto input_value = graph->FindInputs(node->id)[0];
for (int i = 0; i < tflite_node->outputs->size; ++i) {
const TfLiteTensor* output = reader->GetOutputTensor(i);
BHWC output_shape;
RETURN_IF_ERROR(ExtractTensorShape(*output, &output_shape));
if (output_shape != output_required_shape) {
Value* copy_value = graph->NewValue();
copy_value->tensor.type = input_value->tensor.type;
copy_value->tensor.shape = output_required_shape;
RETURN_IF_ERROR(graph->SetProducer(node->id, copy_value->id));
Node* node_reshape = graph->NewNode();
node_reshape->operation.type = ToString(OperationType::RESHAPE);
ReshapeAttributes reshape_attr;
reshape_attr.new_shape = output_shape;
node_reshape->operation.attributes = reshape_attr;
RETURN_IF_ERROR(graph->AddConsumer(node_reshape->id, copy_value->id));
RETURN_IF_ERROR(reader->AddOutput(node_reshape, i));
} else {
RETURN_IF_ERROR(reader->AddOutput(node, i));
}
}
return absl::OkStatus();
}
};
class Unpooling2DOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::MAX_UNPOOLING_2D);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddInput(node, 1));
RETURN_IF_ERROR(reader->AddOutputs(node));
auto input_shape = graph->FindInputs(node->id)[0]->tensor.shape;
MaxUnpooling2DAttributes attr;
const TfLitePoolParams* tf_options;
RETURN_IF_ERROR(RetrieveCustomInitialData(tflite_node, &tf_options));
attr.kernel = ToHW(tf_options->filter_height, tf_options->filter_width);
attr.strides = ToHW(tf_options->stride_height, tf_options->stride_width);
UpdatePadding(tf_options->padding, input_shape, &attr);
node->operation.attributes = attr;
auto output_value = graph->FindOutputs(node->id)[0];
output_value->tensor.shape = CalculateOutputShape(input_shape, attr);
return absl::OkStatus();
}
};
class BatchToSpaceOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return absl::OkStatus();
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
auto* node = graph->NewNode();
node->operation.type = ToString(OperationType::BATCH_TO_SPACE);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
BatchToSpaceAttributes bs_attr;
Tensor<Linear, DataType::INT32> block;
RETURN_IF_ERROR(reader->ReadTensor(1, &block));
if (block.shape.v != 2) {
return absl::InternalError("Space has to be HxW.");
}
bs_attr.block.h = block.data[0];
bs_attr.block.w = block.data[1];
Tensor<HW, DataType::INT32> crop;
RETURN_IF_ERROR(reader->ReadTensor(2, &crop));
auto crop_shape = crop.shape;
if (crop_shape.h != 2 && crop_shape.w != 2) {
return absl::InternalError("Space has to be HxW.");
}
bs_attr.crop.prepended.h = crop.data[0];
bs_attr.crop.prepended.w = crop.data[2];
bs_attr.crop.appended.h = crop.data[1];
bs_attr.crop.appended.w = crop.data[3];
node->operation.attributes = std::move(bs_attr);
return absl::OkStatus();
}
};
class SpaceToBatchOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
auto* node = graph->NewNode();
node->operation.type = ToString(OperationType::SPACE_TO_BATCH);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
SpaceToBatchAttributes sb_attr;
Tensor<Linear, DataType::INT32> block;
RETURN_IF_ERROR(reader->ReadTensor(1, &block));
if (block.shape.v != 2) {
return absl::InternalError("Space has to be HxW.");
}
sb_attr.block.h = block.data[0];
sb_attr.block.w = block.data[1];
Tensor<HW, DataType::INT32> padding;
RETURN_IF_ERROR(reader->ReadTensor(2, &padding));
auto padding_shape = padding.shape;
if (padding_shape.h != 2 && padding_shape.w != 2) {
return absl::InternalError("Space has to be HxW.");
}
sb_attr.padding.prepended.h = padding.data[0];
sb_attr.padding.prepended.w = padding.data[2];
sb_attr.padding.appended.h = padding.data[1];
sb_attr.padding.appended.w = padding.data[3];
node->operation.attributes = std::move(sb_attr);
return absl::OkStatus();
}
};
class MeanOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
auto* node = graph->NewNode();
node->operation.type = ToString(OperationType::MEAN);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
MeanAttributes attr;
const TfLiteTensor* input = reader->GetInputTensor(0);
const TfLiteTensor* axes = reader->GetInputTensor(1);
for (int i = 0; i < NumElements(axes->dims); i++) {
Axis axis;
RETURN_IF_ERROR(ExtractAxisFromIndex(*input, axes->data.i32[i], &axis));
attr.dims.insert(axis);
}
node->operation.attributes = attr;
return absl::OkStatus();
}
};
class UnsupportedOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return absl::UnimplementedError("Operation is not supported.");
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
return absl::UnimplementedError("Operation is not supported.");
}
};
absl::Status IsSupported(
const TfLiteContext* context, TfLiteNode* node,
const TfLiteRegistration* registration, bool allow_quant_ops = false,
const absl::flat_hash_set<TfLiteBuiltinOperator>* excluded_ops = nullptr) {
return NewOperationParser(registration, allow_quant_ops, excluded_ops)
->IsSupported(context, node, registration);
}
bool IsAllAllowedTensors(TfLiteContext* context,
const TfLiteIntArray* tensor_indices,
const std::vector<TfLiteType>& allowed_types) {
for (int i = 0; i < tensor_indices->size; ++i) {
int tensor_idx = tensor_indices->data[i];
if (tensor_idx == kTfLiteOptionalTensor) continue;
const TfLiteTensor* t = &context->tensors[tensor_idx];
if (t->dims && t->dims->size >= 5) {
return false;
}
bool type_supported = false;
for (auto allowed_type : allowed_types) {
if (t->type == allowed_type) {
type_supported = true;
break;
}
}
if (t->allocation_type == kTfLiteArenaRw && !type_supported) {
return false;
}
}
return true;
}
}
std::unique_ptr<TFLiteOperationParser> NewOperationParser(
const TfLiteRegistration* registration, bool allow_quant_ops,
const absl::flat_hash_set<TfLiteBuiltinOperator>* excluded_ops) {
const auto builtin_code = registration->builtin_code;
if (excluded_ops != nullptr &&
excluded_ops->contains(
static_cast<TfLiteBuiltinOperator>(builtin_code))) {
return std::make_unique<UnsupportedOperationParser>();
}
switch (builtin_code) {
case kTfLiteBuiltinAbs:
return std::make_unique<ElementwiseOperationParser>(OperationType::ABS);
case kTfLiteBuiltinAdd:
case kTfLiteBuiltinAddN:
return std::make_unique<ElementwiseOperationParser>(OperationType::ADD);
case kTfLiteBuiltinAveragePool2d:
return std::make_unique<Pooling2DOperationParser>(PoolingType::AVERAGE);
case kTfLiteBuiltinBatchMatmul:
return std::make_unique<BatchedMatMulOperationParser>();
case kTfLiteBuiltinCast:
return std::make_unique<CastOperationParser>();
case kTfLiteBuiltinConcatenation:
return std::make_unique<ConcatenationOperationParser>();
case kTfLiteBuiltinConv2d:
return std::make_unique<Conv2DOperationParser>();
case kTfLiteBuiltinCos:
return std::make_unique<ElementwiseOperationParser>(OperationType::COS);
case kTfLiteBuiltinCumsum:
return std::make_unique<CumsumOperationParser>();
case kTfLiteBuiltinDensify:
return std::make_unique<DensifyOperationParser>();
case kTfLiteBuiltinDepthwiseConv2d:
return std::make_unique<DepthwiseConvolutionOperationParser>();
case kTfLiteBuiltinDepthToSpace:
return std::make_unique<DepthToSpaceOperationParser>();
case kTfLiteBuiltinDequantize:
if (allow_quant_ops) {
return std::make_unique<DequantizeOperationParser>();
}
break;
case kTfLiteBuiltinDiv:
return std::make_unique<ElementwiseOperationParser>(OperationType::DIV);
case kTfLiteBuiltinEqual:
return std::make_unique<ElementwiseOperationParser>(OperationType::EQUAL);
case kTfLiteBuiltinElu:
return std::make_unique<ElementwiseOperationParser>(OperationType::ELU);
case kTfLiteBuiltinExp:
return std::make_unique<ElementwiseOperationParser>(OperationType::EXP);
case kTfLiteBuiltinFloor:
return std::make_unique<ElementwiseOperationParser>(OperationType::FLOOR);
case kTfLiteBuiltinFloorDiv:
return std::make_unique<ElementwiseOperationParser>(
OperationType::FLOOR_DIV);
case kTfLiteBuiltinFloorMod:
return std::make_unique<ElementwiseOperationParser>(
OperationType::FLOOR_MOD);
case kTfLiteBuiltinFullyConnected:
return std::make_unique<FullyConnectedOperationParser>();
case kTfLiteBuiltinGather:
return std::make_unique<GatherOperationParser>();
case kTfLiteBuiltinGelu:
return std::make_unique<ElementwiseOperationParser>(OperationType::GELU);
case kTfLiteBuiltinGreater:
return std::make_unique<ElementwiseOperationParser>(
OperationType::GREATER);
case kTfLiteBuiltinGreaterEqual:
return std::make_unique<ElementwiseOperationParser>(
OperationType::GREATER_EQUAL);
case kTfLiteBuiltinHardSwish:
return std::make_unique<HardSwishOperationParser>();
case kTfLiteBuiltinLess:
return std::make_unique<ElementwiseOperationParser>(OperationType::LESS);
case kTfLiteBuiltinLessEqual:
return std::make_unique<ElementwiseOperationParser>(
OperationType::LESS_EQUAL);
case kTfLiteBuiltinLogistic:
return std::make_unique<ElementwiseOperationParser>(
OperationType::SIGMOID);
case kTfLiteBuiltinLog:
return std::make_unique<ElementwiseOperationParser>(OperationType::LOG);
case kTfLiteBuiltinLogicalAnd:
return std::make_unique<ElementwiseOperationParser>(
OperationType::LOGICAL_AND);
case kTfLiteBuiltinLstm:
return std::make_unique<LSTMOperationParser>();
case kTfLiteBuiltinMaximum:
return std::make_unique<ElementwiseOperationParser>(
OperationType::MAXIMUM);
case kTfLiteBuiltinMaxPool2d:
return std::make_unique<Pooling2DOperationParser>(PoolingType::MAX);
case kTfLiteBuiltinMean:
return std::make_unique<MeanOperationParser>();
case kTfLiteBuiltinMinimum:
return std::make_unique<ElementwiseOperationParser>(
OperationType::MINIMUM);
case kTfLiteBuiltinMirrorPad:
return std::make_unique<PadOperationParser>(true);
case kTfLiteBuiltinMul:
return std::make_unique<ElementwiseOperationParser>(OperationType::MUL);
case kTfLiteBuiltinNeg:
return std::make_unique<ElementwiseOperationParser>(OperationType::NEG);
case kTfLiteBuiltinNotEqual:
return std::make_unique<ElementwiseOperationParser>(
OperationType::NOT_EQUAL);
case kTfLiteBuiltinOneHot:
return std::make_unique<OneHotOperationParser>();
case kTfLiteBuiltinPack:
return std::make_unique<PackOperationParser>();
case kTfLiteBuiltinPad:
return std::make_unique<PadOperationParser>(false);
case kTfLiteBuiltinPadv2:
return std::make_unique<PadOperationParser>(false);
case kTfLiteBuiltinPow:
return std::make_unique<ElementwiseOperationParser>(OperationType::POW);
case kTfLiteBuiltinReduceMax:
return std::make_unique<ReduceOperationParser>(
OperationType::REDUCE_MAXIMUM);
case kTfLiteBuiltinReduceMin:
return std::make_unique<ReduceOperationParser>(
OperationType::REDUCE_MINIMUM);
case kTfLiteBuiltinReduceProd:
return std::make_unique<ReduceOperationParser>(
OperationType::REDUCE_PRODUCT);
case kTfLiteBuiltinQuantize:
if (allow_quant_ops) {
return std::make_unique<QuantizeOperationParser>();
}
break;
case kTfLiteBuiltinRelu:
return std::make_unique<ReLUOperationParser>(0, 0);
case kTfLiteBuiltinRelu6:
return std::make_unique<ReLUOperationParser>(0, 6);
case kTfLiteBuiltinReluN1To1:
return std::make_unique<ReLUOperationParser>(-1.0, 1.0);
case kTfLiteBuiltinLeakyRelu:
return std::make_unique<ReLUOperationParser>(0, 0);
case kTfLiteBuiltinPrelu:
return std::make_unique<PReLUOperationParser>();
case kTfLiteBuiltinReshape:
return std::make_unique<ReshapeOperationParser>();
case kTfLiteBuiltinResizeBilinear:
return std::make_unique<Resize2DOperationParser>(SamplingType::BILINEAR);
case kTfLiteBuiltinResizeNearestNeighbor:
return std::make_unique<Resize2DOperationParser>(SamplingType::NEAREST);
case kTfLiteBuiltinRsqrt:
return std::make_unique<ElementwiseOperationParser>(OperationType::RSQRT);
case kTfLiteBuiltinSelect:
case kTfLiteBuiltinSelectV2:
return std::make_unique<SelectV2OperationParser>();
case kTfLiteBuiltinSign:
return std::make_unique<ElementwiseOperationParser>(OperationType::SIGN);
case kTfLiteBuiltinSin:
return std::make_unique<ElementwiseOperationParser>(OperationType::SIN);
case kTfLiteBuiltinSlice:
return std::make_unique<SliceOperationParser>();
case kTfLiteBuiltinSoftmax:
return std::make_unique<SoftmaxOperationParser>();
case kTfLiteBuiltinSpaceToDepth:
return std::make_unique<SpaceToDepthOperationParser>();
case kTfLiteBuiltinSplit:
return std::make_unique<SplitOperationParser>();
case kTfLiteBuiltinSplitV:
return std::make_unique<SplitVOperationParser>();
case kTfLiteBuiltinSqrt:
return std::make_unique<ElementwiseOperationParser>(OperationType::SQRT);
case kTfLiteBuiltinSquare:
return std::make_unique<ElementwiseOperationParser>(
OperationType::SQUARE);
case kTfLiteBuiltinSquaredDifference:
return std::make_unique<ElementwiseOperationParser>(
OperationType::SQUARED_DIFF);
case kTfLiteBuiltinStridedSlice:
return std::make_unique<StridedSliceOperationParser>();
case kTfLiteBuiltinSub:
return std::make_unique<ElementwiseOperationParser>(OperationType::SUB);
case kTfLiteBuiltinSum:
return std::make_unique<ReduceOperationParser>(OperationType::REDUCE_SUM);
case kTfLiteBuiltinTanh:
return std::make_unique<ElementwiseOperationParser>(OperationType::TANH);
case kTfLiteBuiltinTile:
return std::make_unique<TileOperationParser>();
case kTfLiteBuiltinTranspose:
return std::make_unique<TransposeOperationParser>();
case kTfLiteBuiltinTransposeConv:
return std::make_unique<TransposeConvBuiltinOperationParser>();
case kTfLiteBuiltinUnpack:
return std::make_unique<UnpackOperationParser>();
case kTfLiteBuiltinCustom: {
const absl::string_view custom_name = registration->custom_name;
if (custom_name == "Convolution2DTransposeBias") {
return std::make_unique<TransposeConvCustomOperationParser>();
}
if (custom_name == "MaxPoolingWithArgmax2D") {
return std::make_unique<Pooling2DOperationParser>(PoolingType::MAX);
}
if (custom_name == "MaxUnpooling2D") {
return std::make_unique<Unpooling2DOperationParser>();
}
if (custom_name == "Resampler") {
return std::make_unique<ResamplerOperationParser>();
}
return NewCustomOperationParser(registration->custom_name);
}
}
return std::make_unique<UnsupportedOperationParser>();
}
TfLiteIntArray* GetOpsToReplace(
TfLiteContext* context, bool allow_quant_ops, int max_delegated_partitions,
const absl::flat_hash_set<TfLiteBuiltinOperator>* excluded_ops,
int start_node_index, int end_node_index) {
delegates::IsNodeSupportedFn node_supported_fn =
[=](TfLiteContext* context, TfLiteNode* node,
TfLiteRegistration* registration,
std::string* unsupported_details) -> bool {
const auto status =
IsSupported(context, node, registration, allow_quant_ops, excluded_ops);
if (!status.ok()) {
if (unsupported_details) {
*unsupported_details = std::string(status.message());
}
return false;
}
std::vector<TfLiteType> allowed_in_types = {kTfLiteFloat32, kTfLiteFloat16};
std::vector<TfLiteType> allowed_out_types = {kTfLiteFloat32,
kTfLiteFloat16};
if (allow_quant_ops) {
allowed_in_types.push_back(kTfLiteInt8);
allowed_in_types.push_back(kTfLiteUInt8);
allowed_out_types.push_back(kTfLiteInt8);
allowed_out_types.push_back(kTfLiteUInt8);
}
if (IsLogicalCode(registration->builtin_code)) {
allowed_out_types.push_back(kTfLiteBool);
}
if (registration->builtin_code == kTfLiteBuiltinCast) {
allowed_in_types.push_back(kTfLiteBool);
allowed_in_types.push_back(kTfLiteFloat32);
allowed_in_types.push_back(kTfLiteInt32);
allowed_out_types.push_back(kTfLiteFloat32);
allowed_out_types.push_back(kTfLiteInt32);
allowed_out_types.push_back(kTfLiteBool);
}
if (registration->builtin_code == kTfLiteBuiltinOneHot) {
allowed_in_types.push_back(kTfLiteInt32);
}
if (registration->builtin_code == kTfLiteBuiltinSelect ||
registration->builtin_code == kTfLiteBuiltinSelectV2) {
allowed_in_types.push_back(kTfLiteBool);
}
if (registration->builtin_code == kTfLiteBuiltinLogicalAnd) {
allowed_in_types.push_back(kTfLiteBool);
allowed_out_types.push_back(kTfLiteBool);
}
if (registration->builtin_code == kTfLiteBuiltinGather) {
allowed_in_types.push_back(kTfLiteInt32);
}
if (!IsAllAllowedTensors(context, node->inputs, allowed_in_types) ||
!IsAllAllowedTensors(context, node->outputs, allowed_out_types)) {
if (unsupported_details) {
*unsupported_details =
"OP is supported, but tensor type/shape isn't compatible.";
}
return false;
}
return true;
};
delegates::FP16GraphPartitionHelper partition_helper(context,
node_supported_fn);
std::set<std::string> unsupported_nodes_info;
#ifndef TFLITE_DEBUG_DELEGATE
auto res = partition_helper.Partition(&unsupported_nodes_info);
#else
auto res = partition_helper.Partition(&unsupported_nodes_info,
start_node_index, end_node_index);
#endif
if (res != kTfLiteOk) {
return TfLiteIntArrayCreate(0);
}
std::vector<int> ops_to_replace =
partition_helper.GetNodesOfFirstNLargestPartitions(
max_delegated_partitions);
if (!unsupported_nodes_info.empty() &&
partition_helper.num_total_nodes() > ops_to_replace.size()) {
std::string unsupported = absl::StrJoin(unsupported_nodes_info, "\n");
std::string error_message = absl::StrCat(
"Following operations are not supported by GPU delegate:\n",
unsupported, "\n");
if (!ops_to_replace.empty()) {
absl::StrAppend(
&error_message, ops_to_replace.size(),
" operations will run on the GPU, and the remaining ",
partition_helper.num_total_nodes() - ops_to_replace.size());
} else {
absl::StrAppend(&error_message,
"No operations will run on the GPU, and all ",
partition_helper.num_total_nodes());
}
absl::StrAppend(&error_message, " operations will run on the CPU.");
TF_LITE_KERNEL_LOG(context, error_message.c_str());
}
return ConvertVectorToTfLiteIntArray(ops_to_replace);
}
absl::Status PrecreateInputTensors(
TfLiteContext* context, GraphFloat32* graph,
const std::vector<int>& input_ids,
absl::flat_hash_map<int, int>* quant_conversion_map,
absl::flat_hash_map<int, Value*>* tensor_to_value) {
for (const auto& id : input_ids) {
const TfLiteTensor& tflite_tensor = context->tensors[id];
if (tflite::IsConstantTensor(&tflite_tensor)) continue;
RETURN_IF_ERROR(ObjectReader::ReadNonConstantTensor(
context, tensor_to_value, quant_conversion_map, graph, id));
}
return absl::OkStatus();
}
absl::Status PrecreateOutputTensors(
TfLiteContext* context, GraphFloat32* graph,
const std::vector<int>& output_ids,
absl::flat_hash_map<int, int>* quant_conversion_map,
absl::flat_hash_map<int, Value*>* tensor_to_value) {
for (const auto& id : output_ids) {
const TfLiteTensor& tflite_tensor = context->tensors[id];
if (tflite::IsConstantTensor(&tflite_tensor)) continue;
Value* value;
RETURN_IF_ERROR(ObjectReader::ReadNonConstantTensor(
context, tensor_to_value, quant_conversion_map, graph, id, &value));
graph->AddKnownGraphOutput(value);
}
return absl::OkStatus();
}
absl::Status CopyVariableTensorOutputs(
TfLiteNode* tflite_node, TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader& reader,
const absl::flat_hash_map<int, ValueId>& new_variable_tensor_values) {
absl::flat_hash_map<int, ValueId> new_variable_tensor_values_copy(
new_variable_tensor_values);
for (int i = 0; i < tflite_node->inputs->size; i++) {
int tensor_idx = tflite_node->inputs->data[i];
Value* value;
if (!reader.ReadValueByTensorIdx(tensor_idx, &value).ok()) continue;
if (value->tensor.is_variable_input) {
if (new_variable_tensor_values_copy.find(i) ==
new_variable_tensor_values_copy.end()) {
return absl::InvalidArgumentError(
absl::StrCat(GetOpNameByRegistration(*registration),
" did not provide a new value for the variable input "
"tensor with index ",
tensor_idx));
} else {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::COPY);
RETURN_IF_ERROR(graph->AddConsumer(
node->id, new_variable_tensor_values_copy.at(i)));
RETURN_IF_ERROR(reader.AddUpdate(node, i));
new_variable_tensor_values_copy.erase(
new_variable_tensor_values_copy.find(i));
}
}
}
if (!new_variable_tensor_values_copy.empty()) {
return absl::InvalidArgumentError(
"More input variable tensors asked to be copied than present on the "
"node");
}
return absl::OkStatus();
}
absl::Status BuildModel(TfLiteContext* context,
const TfLiteDelegateParams* delegate_params,
GraphFloat32* graph,
absl::flat_hash_map<int, int>* quant_conversion_map) {
std::vector<int> inputs(delegate_params->input_tensors->size);
std::vector<int> outputs(delegate_params->output_tensors->size);
for (int i = 0; i < delegate_params->input_tensors->size; i++) {
inputs[i] = delegate_params->input_tensors->data[i];
}
for (int i = 0; i < delegate_params->output_tensors->size; i++) {
outputs[i] = delegate_params->output_tensors->data[i];
}
return BuildModelEnforceIO(context, delegate_params, inputs, outputs, graph,
quant_conversion_map);
}
absl::Status BuildModelEnforceIO(
TfLiteContext* context, const TfLiteDelegateParams* delegate_params,
const std::vector<int>& input_ids, const std::vector<int>& output_ids,
GraphFloat32* graph, absl::flat_hash_map<int, int>* quant_conversion_map) {
std::vector<std::unique_ptr<TFLiteOperationParser>> operations;
std::vector<int> tflite_nodes;
for (int i = 0; i < delegate_params->nodes_to_replace->size; ++i) {
TfLiteNode* tflite_node = nullptr;
TfLiteRegistration* registration = nullptr;
RETURN_IF_ERROR(GetNodeAndRegistration(
context, delegate_params->nodes_to_replace->data[i], &tflite_node,
®istration));
if (registration->builtin_code == kTfLiteBuiltinDequantize &&
context->tensors[tflite_node->inputs->data[0]].type ==
TfLiteType::kTfLiteFloat16 &&
context->tensors[tflite_node->inputs->data[0]].allocation_type ==
TfLiteAllocationType::kTfLiteMmapRo) {
continue;
}
auto op_parser = NewOperationParser(
registration, quant_conversion_map != nullptr);
if (!op_parser) {
return absl::UnimplementedError(
absl::StrCat("Operation ", registration->builtin_code, "(",
registration->custom_name,
") is not supported by TFLite GPU Delegate."));
}
operations.push_back(std::move(op_parser));
tflite_nodes.push_back(i);
}
absl::flat_hash_map<int, Value*> tensor_to_value;
std::vector<ValueId> variable_inputs_to_value_id;
RETURN_IF_ERROR(PrecreateInputTensors(
context, graph, input_ids, quant_conversion_map, &tensor_to_value));
RETURN_IF_ERROR(PrecreateOutputTensors(
context, graph, output_ids, quant_conversion_map, &tensor_to_value));
for (int i = 0; i < operations.size(); ++i) {
TfLiteNode* tflite_node;
TfLiteRegistration* registration;
RETURN_IF_ERROR(GetNodeAndRegistration(
context, delegate_params->nodes_to_replace->data[tflite_nodes[i]],
&tflite_node, ®istration));
ObjectReader reader(graph, context, tflite_node, &tensor_to_value,
quant_conversion_map);
const auto status =
operations[i]->Parse(tflite_node, registration, graph, &reader);
if (!status.ok()) {
return absl::InternalError(absl::StrCat(
GetOpNameByRegistration(*registration), ": ", status.message()));
}
absl::flat_hash_map<int, ValueId> new_value_for_variable_input_tensors =
operations[i]->GetNewValueIdsForVariableInputNodes();
RETURN_IF_ERROR(
CopyVariableTensorOutputs(tflite_node, registration, graph, reader,
new_value_for_variable_input_tensors));
}
for (auto value_id : variable_inputs_to_value_id) {
if (!graph->IsGraphOutput(value_id)) {
return absl::InvalidArgumentError(
absl::StrCat("Variable input tensors must be a graph output. Value ",
value_id, " is not a graph output"));
}
}
return absl::OkStatus();
}
absl::Status BuildFinalModel(
TfLiteContext* context, const TfLiteDelegateParams* delegate_params,
GraphFloat32* graph, absl::flat_hash_map<int, int>* quant_conversion_map) {
RETURN_IF_ERROR(
BuildModel(context, delegate_params, graph, quant_conversion_map));
ModelTransformer transformer(graph);
if (!ApplyModelTransformations(&transformer)) {
return absl::InternalError("Graph transformations failed");
}
return absl::OkStatus();
}
namespace {
class DelegateContext {
public:
struct DelegateData {
std::vector<int> input_ids;
std::vector<int> output_ids;
GraphFloat32* graph;
std::unique_ptr<absl::flat_hash_map<int, int>> quant_conversion_map;
};
bool Init(TfLiteContext* context,
const TfLiteDelegateParams* delegate_params) {
const auto* delegate_data =
reinterpret_cast<DelegateData*>(delegate_params->delegate->data_);
return delegate_data->graph &&
BuildModelEnforceIO(context, delegate_params,
delegate_data->input_ids,
delegate_data->output_ids, delegate_data->graph,
delegate_data->quant_conversion_map.get())
.ok();
}
};
TfLiteStatus DelegatePrepare(TfLiteContext* context, TfLiteDelegate* delegate) {
TfLiteRegistration registration{};
registration.init = [](TfLiteContext* context, const char* buffer,
size_t) -> void* {
auto* delegate_context = new DelegateContext();
if (!delegate_context->Init(
context, reinterpret_cast<const TfLiteDelegateParams*>(buffer))) {
delete delegate_context;
return nullptr;
}
return delegate_context;
};
registration.free = [](TfLiteContext* context, void* buffer) -> void {
delete reinterpret_cast<DelegateContext*>(buffer);
};
registration.prepare = [](TfLiteContext* context,
TfLiteNode* node) -> TfLiteStatus {
return node->user_data ? kTfLiteOk : kTfLiteError;
};
const auto* delegate_data =
reinterpret_cast<const DelegateContext::DelegateData*>(delegate->data_);
TfLiteIntArray* ops_to_replace = GetOpsToReplace(
context, static_cast<bool>(delegate_data->quant_conversion_map));
const auto status = context->ReplaceNodeSubsetsWithDelegateKernels(
context, registration, ops_to_replace, delegate);
TfLiteIntArrayFree(ops_to_replace);
return status;
}
}
absl::Status BuildFromFlatBuffer(const tflite::FlatBufferModel& flatbuffer,
const tflite::OpResolver& op_resolver,
GraphFloat32* graph, bool allow_quant_ops) {
std::unique_ptr<tflite::Interpreter> interpreter;
tflite::InterpreterBuilder interpreter_builder(flatbuffer, op_resolver);
if (interpreter_builder(&interpreter) != kTfLiteOk || !interpreter) {
return absl::InternalError("Unable to prepare TfLite interpreter.");
}
TfLiteDelegate delegate;
DelegateContext::DelegateData delegate_data{interpreter->inputs(),
interpreter->outputs(), graph};
if (allow_quant_ops) {
delegate_data.quant_conversion_map =
std::make_unique<absl::flat_hash_map<int, int>>();
}
delegate.data_ = &delegate_data;
delegate.flags = kTfLiteDelegateFlagsNone;
delegate.Prepare = DelegatePrepare;
delegate.CopyFromBufferHandle = nullptr;
delegate.CopyToBufferHandle = nullptr;
delegate.FreeBufferHandle = nullptr;
if (interpreter->ModifyGraphWithDelegate(&delegate) != kTfLiteOk) {
return absl::InternalError("Conversion from TfLite model failed.");
}
ModelTransformer transformer(graph);
if (!ApplyModelTransformations(&transformer)) {
return absl::InternalError("Graph transformations failed");
}
return absl::OkStatus();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/model_builder.h"
#include <stddef.h>
#include <stdint.h>
#include <cstdlib>
#include <cstring>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model_builder_internal.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/interpreter.h"
namespace tflite {
namespace gpu {
namespace {
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefSucceedsForRank0) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteFloat32;
tflite_tensor.dims = TfLiteIntArrayCreate(1);
tflite_tensor.dims->data[0] = 4;
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
ASSERT_TRUE(status.ok());
EXPECT_EQ(tensor_ref.type, DataType::FLOAT32);
EXPECT_EQ(tensor_ref.shape, BHWC(4, 1, 1, 1));
}
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefSucceedsForRank1) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteInt32;
tflite_tensor.dims = TfLiteIntArrayCreate(2);
tflite_tensor.dims->data[0] = 4;
tflite_tensor.dims->data[1] = 5;
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
ASSERT_TRUE(status.ok());
EXPECT_EQ(tensor_ref.type, DataType::INT32);
EXPECT_EQ(tensor_ref.shape, BHWC(4, 1, 1, 5));
}
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefSucceedsForRank2) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteInt64;
tflite_tensor.dims = TfLiteIntArrayCreate(3);
tflite_tensor.dims->data[0] = 4;
tflite_tensor.dims->data[1] = 5;
tflite_tensor.dims->data[2] = 6;
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
ASSERT_TRUE(status.ok());
EXPECT_EQ(tensor_ref.type, DataType::INT64);
EXPECT_EQ(tensor_ref.shape, BHWC(4, 1, 5, 6));
}
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefSucceedsForRank3) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteUInt8;
tflite_tensor.dims = TfLiteIntArrayCreate(4);
tflite_tensor.dims->data[0] = 4;
tflite_tensor.dims->data[1] = 5;
tflite_tensor.dims->data[2] = 6;
tflite_tensor.dims->data[3] = 7;
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
ASSERT_TRUE(status.ok());
EXPECT_EQ(tensor_ref.type, DataType::UINT8);
EXPECT_EQ(tensor_ref.shape, BHWC(4, 5, 6, 7));
}
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefFailsForRankLT0) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteFloat32;
tflite_tensor.dims = TfLiteIntArrayCreate(0);
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
ASSERT_FALSE(status.ok());
}
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefFailsForRankGT3) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteFloat32;
tflite_tensor.dims = TfLiteIntArrayCreate(5);
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
ASSERT_FALSE(status.ok());
}
class DelegatedInterpreter {
public:
explicit DelegatedInterpreter(int num_nodes) {
exec_plan_ = TfLiteIntArrayCreate(num_nodes);
}
virtual ~DelegatedInterpreter() {
TfLiteIntArrayFree(exec_plan_);
for (auto params : delegate_params_) {
TfLiteIntArrayFree(params.nodes_to_replace);
TfLiteIntArrayFree(params.input_tensors);
TfLiteIntArrayFree(params.output_tensors);
}
}
TfLiteContext* context() { return interpreter_.primary_subgraph().context(); }
TfLiteNode* node(int index) {
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_registration =
interpreter_.primary_subgraph().node_and_registration(index);
return const_cast<TfLiteNode*>(&node_and_registration->first);
}
TfLiteRegistration* registration(int index) {
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_registration =
interpreter_.primary_subgraph().node_and_registration(index);
return const_cast<TfLiteRegistration*>(&node_and_registration->second);
}
TfLiteIntArray* exec_plan() {
const int num_nodes = exec_plan_->size;
TfLiteIntArray* new_array = TfLiteIntArrayCreate(num_nodes);
std::memcpy(new_array->data, exec_plan_->data, num_nodes * sizeof(int32_t));
TfLiteIntArrayFree(exec_plan_);
exec_plan_ = new_array;
return exec_plan_;
}
TfLiteDelegateParams* add_delegate_params() {
delegate_params_.push_back(TfLiteDelegateParams());
return &delegate_params_.back();
}
TfLiteDelegateParams* delegate_params() { return &delegate_params_.front(); }
int num_delegate_params() { return delegate_params_.size(); }
protected:
Interpreter interpreter_;
private:
TfLiteIntArray* exec_plan_ = nullptr;
std::vector<TfLiteDelegateParams> delegate_params_;
};
class InterpreterFp16 : public DelegatedInterpreter {
public:
explicit InterpreterFp16(TfLiteBuiltinOperator op,
bool const_dequantize_inputs = true)
: DelegatedInterpreter(3) {
void* builtin_data = malloc(sizeof(int));
EXPECT_EQ(interpreter_.AddTensors(5), kTfLiteOk);
EXPECT_EQ(interpreter_.SetInputs({0, 1}), kTfLiteOk);
EXPECT_EQ(interpreter_.SetOutputs({4}), kTfLiteOk);
const TfLiteRegistration reg_dequant0 = {
nullptr, nullptr, nullptr, nullptr, nullptr, kTfLiteBuiltinDequantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{0}, {1}, nullptr,
0, nullptr,
®_dequant0),
kTfLiteOk);
const TfLiteRegistration reg_dequant1 = {
nullptr, nullptr, nullptr, nullptr, nullptr, kTfLiteBuiltinDequantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{2}, {3}, nullptr,
0, nullptr,
®_dequant1),
kTfLiteOk);
const TfLiteRegistration reg_op0 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
op};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{1, 3}, {4}, nullptr,
0,
builtin_data,
®_op0),
kTfLiteOk);
const std::vector<int> dims = {1};
TfLiteQuantization quantization;
quantization.type = kTfLiteNoQuantization;
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
0, TfLiteType::kTfLiteFloat16, "t0", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
2, TfLiteType::kTfLiteFloat16, "t2", dims, quantization, false),
kTfLiteOk);
if (const_dequantize_inputs) {
auto* tensor0 = interpreter_.tensor(0);
auto* tensor2 = interpreter_.tensor(2);
tensor0->allocation_type = kTfLiteMmapRo;
tensor2->allocation_type = kTfLiteMmapRo;
}
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
1, TfLiteType::kTfLiteFloat32, "t1", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
3, TfLiteType::kTfLiteFloat32, "t3", dims, quantization, false),
kTfLiteOk);
exec_plan()->data[0] = 0;
exec_plan()->data[1] = 1;
exec_plan()->data[2] = 2;
}
};
InterpreterFp16* interpreter_fp16_add_op =
new InterpreterFp16(kTfLiteBuiltinAdd);
TEST(ModelBuilderTest, GetOpsToReplaceAcceptsFp16DequantizeNodes) {
TfLiteContext* context = interpreter_fp16_add_op->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_fp16_add_op->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_fp16_add_op->node(node_index);
*registration = interpreter_fp16_add_op->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
EXPECT_EQ(nodes_to_replace->size, 1);
auto params = interpreter_fp16_add_op->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 2;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 1;
params->input_tensors->data[1] = 3;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 4;
*partition_params_array = interpreter_fp16_add_op->delegate_params();
*num_partitions = interpreter_fp16_add_op->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(context);
EXPECT_EQ(ops_to_replace->size, 3);
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
context->GetNodeAndRegistration(context, 2, &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat16);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat16);
TfLiteIntArrayFree(ops_to_replace);
}
InterpreterFp16* interpreter_fp16_non_constant =
new InterpreterFp16(kTfLiteBuiltinAdd, false);
TEST(ModelBuilderTest, GetOpsToReplaceRejectsNonConstantFp16DequantizeNodes) {
TfLiteContext* context = interpreter_fp16_non_constant->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_fp16_non_constant->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_fp16_non_constant->node(node_index);
*registration = interpreter_fp16_non_constant->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
EXPECT_EQ(nodes_to_replace->size, 1);
auto params = interpreter_fp16_non_constant->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 2;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 1;
params->input_tensors->data[1] = 3;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 4;
*partition_params_array =
interpreter_fp16_non_constant->delegate_params();
*num_partitions = interpreter_fp16_non_constant->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(context);
EXPECT_EQ(ops_to_replace->size, 1);
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
context->GetNodeAndRegistration(context, ops_to_replace->data[0], &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat32);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat32);
TfLiteIntArrayFree(ops_to_replace);
}
InterpreterFp16* interpreter_fp16_gt_op =
new InterpreterFp16(kTfLiteBuiltinGreater);
TEST(ModelBuilderTest, GetOpsToReplaceRejectsFp16DequantizeNodes) {
TfLiteContext* context = interpreter_fp16_gt_op->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_fp16_gt_op->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_fp16_gt_op->node(node_index);
*registration = interpreter_fp16_gt_op->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
EXPECT_EQ(nodes_to_replace->size, 0);
*partition_params_array = nullptr;
*num_partitions = 0;
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(context);
EXPECT_EQ(ops_to_replace->size, 0);
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
const int kGreaterOpIndex = 2;
context->GetNodeAndRegistration(context, kGreaterOpIndex, &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat32);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat32);
TfLiteIntArrayFree(ops_to_replace);
}
class InterpreterFp32 : public DelegatedInterpreter {
public:
InterpreterFp32() : DelegatedInterpreter(2) {
void* builtin_data = malloc(sizeof(int));
EXPECT_EQ(interpreter_.AddTensors(4), kTfLiteOk);
EXPECT_EQ(interpreter_.SetInputs({0, 2}), kTfLiteOk);
EXPECT_EQ(interpreter_.SetOutputs({3}), kTfLiteOk);
const TfLiteRegistration reg_dequant0 = {nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinDequantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{0}, {1}, nullptr,
0, nullptr,
®_dequant0),
kTfLiteOk);
const TfLiteRegistration reg_add0 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinAdd};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{1, 2}, {3}, nullptr,
0,
builtin_data,
®_add0),
kTfLiteOk);
const std::vector<int> dims = {1};
TfLiteQuantization quantization;
quantization.type = kTfLiteNoQuantization;
EXPECT_EQ(interpreter_.SetTensorParametersReadWrite(
0, TfLiteType::kTfLiteUInt8, "t0", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
1, TfLiteType::kTfLiteFloat32, "t1", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
2, TfLiteType::kTfLiteFloat32, "t2", dims, quantization, false),
kTfLiteOk);
exec_plan()->data[0] = 0;
exec_plan()->data[1] = 1;
}
};
InterpreterFp32* interpreter_fp32 = new InterpreterFp32();
TEST(ModelBuilderTest, GetOpsToReplaceDoesNotPruneUint8) {
TfLiteContext* context = interpreter_fp32->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_fp32->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_fp32->node(node_index);
*registration = interpreter_fp32->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
auto params = interpreter_fp32->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 1;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 1;
params->input_tensors->data[1] = 2;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 3;
*partition_params_array = interpreter_fp32->delegate_params();
*num_partitions = interpreter_fp32->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(context);
EXPECT_EQ(ops_to_replace->size, 1);
EXPECT_EQ(1, ops_to_replace->data[0]);
TfLiteIntArrayFree(ops_to_replace);
}
class Interpreter2Fp32 : public DelegatedInterpreter {
public:
Interpreter2Fp32() : DelegatedInterpreter(4) {
void* builtin_data = malloc(sizeof(int));
EXPECT_EQ(interpreter_.AddTensors(8), kTfLiteOk);
EXPECT_EQ(interpreter_.SetInputs({0, 2, 4, 6}), kTfLiteOk);
EXPECT_EQ(interpreter_.SetOutputs({7}), kTfLiteOk);
const TfLiteRegistration reg_dequant = {nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinDequantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{0}, {1}, nullptr,
0, nullptr,
®_dequant),
kTfLiteOk);
const TfLiteRegistration reg_add0 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinAdd};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{1, 2}, {3}, nullptr,
0,
builtin_data,
®_add0),
kTfLiteOk);
const TfLiteRegistration reg_pack = {nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinPack};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{3, 4}, {5}, nullptr,
0, nullptr,
®_pack),
kTfLiteOk);
const TfLiteRegistration reg_add1 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int[2]);
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinAdd};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{5, 6}, {7}, nullptr,
0,
builtin_data,
®_add1),
kTfLiteOk);
std::vector<int> dims = {1};
TfLiteQuantization quantization;
quantization.type = kTfLiteNoQuantization;
EXPECT_EQ(interpreter_.SetTensorParametersReadWrite(
0, TfLiteType::kTfLiteUInt8, "t0", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
1, TfLiteType::kTfLiteFloat32, "t1", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
2, TfLiteType::kTfLiteFloat32, "t2", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
3, TfLiteType::kTfLiteFloat32, "t3", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
4, TfLiteType::kTfLiteFloat32, "t4", dims, quantization, false),
kTfLiteOk);
dims.push_back(2);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
5, TfLiteType::kTfLiteFloat32, "t5", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
6, TfLiteType::kTfLiteFloat32, "t6", dims, quantization, false),
kTfLiteOk);
exec_plan()->data[0] = 0;
exec_plan()->data[1] = 1;
exec_plan()->data[2] = 2;
exec_plan()->data[3] = 3;
}
};
Interpreter2Fp32* interpreter2_fp32 = new Interpreter2Fp32();
TEST(ModelBuilderTest, GetOpsToReplaceMultiplePartitions) {
TfLiteContext* context = interpreter2_fp32->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter2_fp32->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter2_fp32->node(node_index);
*registration = interpreter2_fp32->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
auto params = interpreter2_fp32->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 1;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 1;
params->input_tensors->data[1] = 2;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 3;
params = interpreter2_fp32->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 3;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 5;
params->input_tensors->data[1] = 6;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 7;
*partition_params_array = interpreter2_fp32->delegate_params();
*num_partitions = interpreter2_fp32->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(
context, false, 2);
ASSERT_EQ(ops_to_replace->size, 2);
EXPECT_THAT(absl::MakeConstSpan(ops_to_replace->data, 2),
testing::UnorderedElementsAre(1, 3));
TfLiteIntArrayFree(ops_to_replace);
}
class InterpreterMultiNode : public DelegatedInterpreter {
public:
explicit InterpreterMultiNode(bool both_ops_supported = true)
: DelegatedInterpreter(5) {
void* builtin_data = malloc(sizeof(int));
EXPECT_EQ(interpreter_.AddTensors(8), kTfLiteOk);
EXPECT_EQ(interpreter_.SetInputs({0, 1, 2}), kTfLiteOk);
EXPECT_EQ(interpreter_.SetOutputs({6, 7}), kTfLiteOk);
for (int i = 0; i < 3; ++i) {
const TfLiteRegistration reg_dequant = {nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinDequantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{i}, {i + 3}, nullptr,
0, nullptr,
®_dequant),
kTfLiteOk);
}
if (both_ops_supported) {
const TfLiteRegistration reg_add0 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinAdd};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{4, 5}, {7}, nullptr,
0,
builtin_data,
®_add0),
kTfLiteOk);
const TfLiteRegistration reg_add1 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinAdd};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{3, 4}, {6}, nullptr,
0,
builtin_data,
®_add1),
kTfLiteOk);
} else {
const TfLiteRegistration reg_greater = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinGreater};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{3, 4}, {6}, nullptr,
0,
builtin_data,
®_greater),
kTfLiteOk);
const TfLiteRegistration reg_add0 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinAdd};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{4, 5}, {7}, nullptr,
0,
builtin_data,
®_add0),
kTfLiteOk);
}
const std::vector<int> dims = {1};
TfLiteQuantization quantization;
quantization.type = kTfLiteNoQuantization;
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
0, TfLiteType::kTfLiteFloat16, "t0", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
1, TfLiteType::kTfLiteFloat16, "t1", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
2, TfLiteType::kTfLiteFloat16, "t2", dims, quantization, false),
kTfLiteOk);
auto* tensor0 = interpreter_.tensor(0);
auto* tensor1 = interpreter_.tensor(1);
auto* tensor2 = interpreter_.tensor(2);
tensor0->allocation_type = kTfLiteMmapRo;
tensor1->allocation_type = kTfLiteMmapRo;
tensor2->allocation_type = kTfLiteMmapRo;
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
3, TfLiteType::kTfLiteFloat32, "t3", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
4, TfLiteType::kTfLiteFloat32, "t4", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
5, TfLiteType::kTfLiteFloat32, "t5", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
6, TfLiteType::kTfLiteFloat32, "t5", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
7, TfLiteType::kTfLiteFloat32, "t5", dims, quantization, false),
kTfLiteOk);
exec_plan()->data[0] = 0;
exec_plan()->data[1] = 1;
exec_plan()->data[2] = 2;
exec_plan()->data[3] = 3;
exec_plan()->data[4] = 4;
}
};
InterpreterMultiNode* interpreter_mn =
new InterpreterMultiNode( false);
TEST(ModelBuilderTest,
GetOpsToReplaceSelectsCorrectFp16Nodes_SingleDelegatedPartition) {
TfLiteContext* context = interpreter_mn->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_mn->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_mn->node(node_index);
*registration = interpreter_mn->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
EXPECT_EQ(nodes_to_replace->size, 1);
EXPECT_EQ(nodes_to_replace->data[0], 4);
auto params = interpreter_mn->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 4;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 1;
params->input_tensors->data[1] = 3;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 7;
*partition_params_array = interpreter_mn->delegate_params();
*num_partitions = interpreter_mn->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(context);
EXPECT_EQ(ops_to_replace->size, 1);
EXPECT_EQ(ops_to_replace->data[0], 4);
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
context->GetNodeAndRegistration(context, ops_to_replace->data[0], &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat16);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat16);
TfLiteIntArrayFree(ops_to_replace);
}
InterpreterMultiNode* interpreter_mn2 =
new InterpreterMultiNode( true);
TEST(ModelBuilderTest,
GetOpsToReplaceSelectsCorrectFp16Nodes_MultipleDelegatedPartitions) {
TfLiteContext* context = interpreter_mn2->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_mn2->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_mn2->node(node_index);
*registration = interpreter_mn2->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
EXPECT_EQ(nodes_to_replace->size, 2);
EXPECT_EQ(nodes_to_replace->data[0], 3);
EXPECT_EQ(nodes_to_replace->data[1], 4);
auto params = interpreter_mn2->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 3;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 3;
params->input_tensors->data[1] = 4;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 6;
params = interpreter_mn2->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 4;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 4;
params->input_tensors->data[1] = 5;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 7;
*partition_params_array = interpreter_mn2->delegate_params();
*num_partitions = interpreter_mn2->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(
context, false, 2);
EXPECT_EQ(ops_to_replace->size, 5);
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
context->GetNodeAndRegistration(context, 3, &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat16);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat16);
context->GetNodeAndRegistration(context, 4, &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat16);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat16);
TfLiteIntArrayFree(ops_to_replace);
}
class InterpreterQuantized : public DelegatedInterpreter {
public:
InterpreterQuantized() : DelegatedInterpreter(4) {
void* builtin_data = malloc(sizeof(int));
EXPECT_EQ(interpreter_.AddTensors(6), kTfLiteOk);
EXPECT_EQ(interpreter_.SetInputs({0, 3}), kTfLiteOk);
EXPECT_EQ(interpreter_.SetOutputs({5}), kTfLiteOk);
const TfLiteRegistration reg_quant0 = {nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinQuantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{0}, {1}, nullptr,
0, nullptr,
®_quant0),
kTfLiteOk);
const TfLiteRegistration reg_quant1 = {nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinQuantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{3}, {2}, nullptr,
0, nullptr,
®_quant1),
kTfLiteOk);
const TfLiteRegistration reg_add0 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinAdd};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{1, 2}, {4}, nullptr,
0,
builtin_data,
®_add0),
kTfLiteOk);
const TfLiteRegistration reg_dequant0 = {nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinDequantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{4}, {5}, nullptr,
0, nullptr,
®_dequant0),
kTfLiteOk);
const std::vector<int> dims = {1, 3, 3, 2};
TfLiteQuantization no_quantization;
no_quantization.type = kTfLiteNoQuantization;
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
0, TfLiteType::kTfLiteFloat32, "t0", dims, no_quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
3, TfLiteType::kTfLiteFloat32, "t3", dims, no_quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
5, TfLiteType::kTfLiteFloat32, "t5", dims, no_quantization, false),
kTfLiteOk);
float scale = 0.5f;
int32_t zero_point = 12;
TfLiteQuantization rw_quantization;
rw_quantization.type = kTfLiteAffineQuantization;
auto* rw_affine_quantization = static_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
rw_affine_quantization->scale = TfLiteFloatArrayCreate(1);
rw_affine_quantization->zero_point = TfLiteIntArrayCreate(1);
rw_affine_quantization->scale->data[0] = scale;
rw_affine_quantization->zero_point->data[0] = zero_point;
rw_quantization.params = rw_affine_quantization;
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
1, TfLiteType::kTfLiteInt8, "t1", dims, rw_quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
2, TfLiteType::kTfLiteInt8, "t2", dims, rw_quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
4, TfLiteType::kTfLiteInt8, "t4", dims, rw_quantization, false),
kTfLiteOk);
exec_plan()->data[0] = 0;
exec_plan()->data[1] = 1;
exec_plan()->data[2] = 2;
exec_plan()->data[3] = 3;
}
};
InterpreterQuantized* interpreter_quant = new InterpreterQuantized();
TEST(ModelBuilderTest, GetOpsToReplace_AllowQuantOps) {
TfLiteContext* context = interpreter_quant->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_quant->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_quant->node(node_index);
*registration = interpreter_quant->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
if (nodes_to_replace->size == 0) {
*num_partitions = 0;
return kTfLiteOk;
} else if (nodes_to_replace->size == 4) {
auto params = interpreter_quant->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(4);
params->nodes_to_replace->data[0] = 0;
params->nodes_to_replace->data[1] = 1;
params->nodes_to_replace->data[2] = 2;
params->nodes_to_replace->data[2] = 3;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 0;
params->input_tensors->data[1] = 3;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 5;
*partition_params_array = interpreter_quant->delegate_params();
*num_partitions = interpreter_quant->num_delegate_params();
return kTfLiteOk;
} else {
return kTfLiteError;
}
};
TfLiteIntArray* ops_to_replace =
GetOpsToReplace(context, true);
EXPECT_EQ(ops_to_replace->size, 4);
TfLiteIntArray* ops_to_replace_without_quant =
GetOpsToReplace(context, false);
EXPECT_EQ(ops_to_replace_without_quant->size, 0);
TfLiteIntArrayFree(ops_to_replace);
TfLiteIntArrayFree(ops_to_replace_without_quant);
}
InterpreterFp16* interpreter_fp16_split_op =
new InterpreterFp16(kTfLiteBuiltinSplit);
TEST(ModelBuilderTest, GetOpsToReplaceAcceptsSplitOpCl) {
TfLiteContext* context = interpreter_fp16_split_op->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_fp16_split_op->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_fp16_split_op->node(node_index);
*registration = interpreter_fp16_split_op->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
EXPECT_EQ(nodes_to_replace->size, 1);
auto params = interpreter_fp16_split_op->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 2;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 1;
params->input_tensors->data[1] = 3;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 4;
*partition_params_array = interpreter_fp16_split_op->delegate_params();
*num_partitions = interpreter_fp16_split_op->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(context);
EXPECT_EQ(ops_to_replace->size, 3);
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
context->GetNodeAndRegistration(context, 2, &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat16);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat16);
TfLiteIntArrayFree(ops_to_replace);
}
InterpreterFp16* interpreter_fp16_split_op2 =
new InterpreterFp16(kTfLiteBuiltinSplit);
TEST(ModelBuilderTest, GetOpsToReplaceRejectsSplitOpGl) {
TfLiteContext* context = interpreter_fp16_split_op2->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_fp16_split_op2->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_fp16_split_op2->node(node_index);
*registration = interpreter_fp16_split_op2->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
EXPECT_EQ(nodes_to_replace->size, 0);
*partition_params_array = nullptr;
*num_partitions = 0;
return kTfLiteOk;
};
absl::flat_hash_set<TfLiteBuiltinOperator> excluded_ops = {
kTfLiteBuiltinSplit};
TfLiteIntArray* ops_to_replace =
GetOpsToReplace(context, false,
1, &excluded_ops);
EXPECT_EQ(ops_to_replace->size, 0);
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
context->GetNodeAndRegistration(context, 2, &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat32);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat32);
TfLiteIntArrayFree(ops_to_replace);
}
class StubTfLiteContext : public TfLiteContext {
public:
StubTfLiteContext(const int builtin_code, const int op_version,
const int num_inputs)
: TfLiteContext({0}) {
exec_plan_ = TfLiteIntArrayCreate(3);
for (int i = 0; i < 3; ++i) exec_plan_->data[i] = i;
int tensor_no = 0;
std::memset(nodes_, 0, sizeof(nodes_));
std::memset(registrations_, 0, sizeof(registrations_));
nodes_[0].inputs = TfLiteIntArrayCreate(1);
nodes_[0].inputs->data[0] = tensor_no++;
nodes_[0].outputs = TfLiteIntArrayCreate(1);
nodes_[0].outputs->data[0] = tensor_no;
nodes_[0].builtin_data = nullptr;
nodes_[1].inputs = TfLiteIntArrayCreate(num_inputs);
for (int i = 0; i < num_inputs; i++) {
nodes_[1].inputs->data[i] = tensor_no++;
}
nodes_[1].outputs = TfLiteIntArrayCreate(1);
nodes_[1].outputs->data[0] = tensor_no;
nodes_[1].builtin_data = malloc(1024);
std::memset(nodes_[1].builtin_data, 0, 1024);
nodes_[2].inputs = TfLiteIntArrayCreate(1);
nodes_[2].inputs->data[0] = tensor_no++;
nodes_[2].outputs = TfLiteIntArrayCreate(1);
nodes_[2].outputs->data[0] = tensor_no++;
nodes_[2].builtin_data = nullptr;
tensors_.resize(tensor_no);
for (size_t i = 0; i < tensors_.size(); i++) {
std::memset(&tensors_[i], 0, sizeof(tensors_[i]));
tensors_[i].buffer_handle = kTfLiteNullBufferHandle;
tensors_[i].type = kTfLiteFloat32;
tensors_[i].dims = TfLiteIntArrayCreate(4);
for (int d = 0; d < 4; d++) {
tensors_[i].dims->data[d] = 1;
}
}
tensors = tensors_.data();
tensors_size = tensors_.size();
registrations_[0].builtin_code = kTfLiteBuiltinAdd;
registrations_[1].builtin_code = builtin_code;
registrations_[1].version = op_version;
registrations_[2].builtin_code = kTfLiteBuiltinAdd;
this->GetExecutionPlan = StubGetExecutionPlan;
this->GetNodeAndRegistration = StubGetNodeAndRegistration;
}
~StubTfLiteContext() {
for (auto& node : nodes_) {
TfLiteIntArrayFree(node.inputs);
TfLiteIntArrayFree(node.outputs);
if (node.builtin_data) {
free(node.builtin_data);
}
}
for (auto& tensor : tensors_) {
TfLiteIntArrayFree(tensor.dims);
}
TfLiteIntArrayFree(exec_plan_);
}
TfLiteIntArray* exec_plan() const { return exec_plan_; }
TfLiteNode* node() { return &nodes_[1]; }
TfLiteRegistration* registration() { return ®istrations_[1]; }
TfLiteNode* node(int node_index) { return &nodes_[node_index]; }
TfLiteRegistration* registration(int reg_index) {
return ®istrations_[reg_index];
}
TfLiteTensor* tensor(int tensor_index) { return &tensors_[tensor_index]; }
private:
static TfLiteStatus StubGetExecutionPlan(TfLiteContext* context,
TfLiteIntArray** execution_plan) {
StubTfLiteContext* stub = reinterpret_cast<StubTfLiteContext*>(context);
*execution_plan = stub->exec_plan();
return kTfLiteOk;
}
static TfLiteStatus StubGetNodeAndRegistration(
TfLiteContext* context, int node_index, TfLiteNode** node,
TfLiteRegistration** registration) {
StubTfLiteContext* stub = reinterpret_cast<StubTfLiteContext*>(context);
*node = stub->node(node_index);
*registration = stub->registration(node_index);
return kTfLiteOk;
}
TfLiteIntArray* exec_plan_;
TfLiteNode nodes_[3];
TfLiteRegistration registrations_[3];
std::vector<TfLiteTensor> tensors_;
};
TEST(AddOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinAdd,
3,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinAdd,
2,
2);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(BatchMatMulOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinBatchMatmul,
1,
3);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinBatchMatmul,
1,
2);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(CastOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCast,
1,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCast,
1,
1);
context->tensor(1)->type = kTfLiteFloat32;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->type = kTfLiteInt32;
context->tensor(2)->type = kTfLiteFloat32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->type = kTfLiteInt8;
context->tensor(2)->type = kTfLiteFloat32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->type = kTfLiteBool;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->registration(0)->builtin_code = kTfLiteBuiltinGreater;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ClampOperationsParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinReluN1To1,
1,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ConcatenationOperationParserTest, TestIsSupported) {
auto context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinConcatenation,
3,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinConcatenation,
2,
2);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(Conv2DOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinConv2d,
6,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinConv2d,
5,
2);
TfLiteConvParams* tf_options =
static_cast<TfLiteConvParams*>(context->node()->builtin_data);
tf_options->stride_width = 0;
tf_options->stride_height = 0;
tf_options->dilation_width_factor = 0;
tf_options->dilation_height_factor = 0;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->dilation_width_factor = 0;
tf_options->dilation_height_factor = 0;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->dilation_width_factor = 1;
tf_options->dilation_height_factor = 1;
tf_options->activation = kTfLiteActSignBit;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->dilation_width_factor = 1;
tf_options->dilation_height_factor = 1;
tf_options->activation = kTfLiteActRelu;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(DensifyOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDensify,
2,
0);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDensify,
1,
0);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(DepthwiseConvolutionOperationParserTest, TestIsSupported) {
auto context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDepthwiseConv2d,
7,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDepthwiseConv2d,
6,
2);
TfLiteDepthwiseConvParams* tf_options =
static_cast<TfLiteDepthwiseConvParams*>(context->node()->builtin_data);
tf_options->stride_width = 0;
tf_options->stride_height = 0;
tf_options->dilation_width_factor = 0;
tf_options->dilation_height_factor = 0;
tf_options->depth_multiplier = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->dilation_width_factor = 0;
tf_options->dilation_height_factor = 0;
tf_options->depth_multiplier = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->dilation_width_factor = 1;
tf_options->dilation_height_factor = 1;
tf_options->depth_multiplier = 1;
tf_options->activation = kTfLiteActSignBit;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->dilation_width_factor = 1;
tf_options->dilation_height_factor = 1;
tf_options->depth_multiplier = 0;
tf_options->activation = kTfLiteActRelu;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->dilation_width_factor = 1;
tf_options->dilation_height_factor = 1;
tf_options->depth_multiplier = 1;
tf_options->activation = kTfLiteActRelu;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(DepthToSpaceOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDepthToSpace,
1,
0);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDepthToSpace,
1,
1);
TfLiteDepthToSpaceParams* d2s_params =
static_cast<TfLiteDepthToSpaceParams*>(context->node()->builtin_data);
d2s_params->block_size = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
d2s_params->block_size = 2;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(DequantizeOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDequantize,
4,
1);
auto parser =
NewOperationParser(context->registration(), true);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDequantize,
1,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDequantize,
1,
1);
context->tensor(1)->type = kTfLiteInt16;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->type = kTfLiteInt8;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(LogicalElementwiseOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinEqual,
3,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinEqual,
2,
1);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->registration(2)->builtin_code = kTfLiteBuiltinCast;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->registration(2)->builtin_code = kTfLiteBuiltinSelect;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->registration(2)->builtin_code = kTfLiteBuiltinSelectV2;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ArithmeticUnaryElementwiseOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinAbs,
3,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinAbs,
2,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ArithmeticBinaryElementwiseOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDiv,
3,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinDiv,
2,
2);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(FullyConnectedOperationParserTest, TestIsSupported) {
auto context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinFullyConnected,
10,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinFullyConnected,
9,
3);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinFullyConnected,
9,
2);
TfLiteFullyConnectedParams* tf_options =
static_cast<TfLiteFullyConnectedParams*>(context->node()->builtin_data);
tf_options->weights_format =
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
tf_options->keep_num_dims = true;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->dims->size = 3;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(GatherOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinGather,
1,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinGather,
1,
3);
context->tensor(2)->dims->size = 1;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinGather,
1,
2);
context->tensor(2)->dims->size = 2;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinGather,
1,
2);
context->tensor(2)->dims->size = 1;
context->tensor(2)->type = kTfLiteFloat32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinGather,
1,
2);
context->tensor(2)->dims->size = 1;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinGather,
1,
2);
context->tensor(2)->dims->size = 1;
context->tensor(2)->type = kTfLiteInt32;
context->tensor(2)->allocation_type = kTfLiteMmapRo;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(HardSwishOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinHardSwish,
1,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinHardSwish,
1,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(LSTMOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinLstm,
5,
24);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinLstm,
1,
1);
TfLiteLSTMParams* tf_options =
static_cast<TfLiteLSTMParams*>(context->node()->builtin_data);
tf_options->kernel_type = kTfLiteLSTMFullKernel;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinLstm,
1,
24);
tf_options = static_cast<TfLiteLSTMParams*>(context->node()->builtin_data);
tf_options->kernel_type = kTfLiteLSTMFullKernel;
tf_options->activation = kTfLiteActRelu;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->activation = kTfLiteActSigmoid;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(MulOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMul,
4,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMul,
3,
2);
TfLiteMulParams* tf_options =
static_cast<TfLiteMulParams*>(context->node()->builtin_data);
tf_options->activation = kTfLiteActSignBit;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->activation = kTfLiteActSignBit;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->activation = kTfLiteActSigmoid;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->dims->data[0] = 256;
context->tensor(2)->dims->data[1] = 256;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(PackOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinPack,
1,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(PReLUOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinPrelu,
2,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinPrelu,
1,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(PadOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinPad,
3,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinPad,
2,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinPad,
2,
2);
context->tensor(2)->allocation_type = kTfLiteMmapRo;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->dims->size = 2;
context->tensor(2)->dims->data[0] = 4;
context->tensor(2)->dims->data[1] = 2;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->dims->size = 2;
context->tensor(2)->dims->data[0] = 4;
context->tensor(2)->dims->data[1] = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(MirrorPadOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMirrorPad,
3,
1);
TfLiteMirrorPaddingParams* tf_options =
static_cast<TfLiteMirrorPaddingParams*>(context->node()->builtin_data);
tf_options->mode = kTfLiteMirrorPaddingSymmetric;
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMirrorPad,
3,
1);
tf_options =
static_cast<TfLiteMirrorPaddingParams*>(context->node()->builtin_data);
tf_options->mode = kTfLiteMirrorPaddingReflect;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMirrorPad,
2,
2);
tf_options =
static_cast<TfLiteMirrorPaddingParams*>(context->node()->builtin_data);
tf_options->mode = kTfLiteMirrorPaddingReflect;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMirrorPad,
2,
2);
tf_options =
static_cast<TfLiteMirrorPaddingParams*>(context->node()->builtin_data);
tf_options->mode = kTfLiteMirrorPaddingReflect;
context->tensor(2)->allocation_type = kTfLiteMmapRo;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->dims->size = 2;
context->tensor(2)->dims->data[0] = 4;
context->tensor(2)->dims->data[1] = 2;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->dims->size = 2;
context->tensor(2)->dims->data[0] = 4;
context->tensor(2)->dims->data[1] = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(AveragePooling2DOperationParserTest, TestIsSupported) {
auto context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinAveragePool2d,
3,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinAveragePool2d,
2,
1);
TfLitePoolParams* tf_options =
static_cast<TfLitePoolParams*>(context->node()->builtin_data);
tf_options->filter_height = 0;
tf_options->filter_width = 0;
tf_options->stride_width = 0;
tf_options->stride_height = 0;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->filter_height = 0;
tf_options->filter_width = 0;
tf_options->stride_width = 1;
tf_options->stride_height = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->filter_height = 1;
tf_options->filter_width = 1;
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->activation = kTfLiteActSignBit;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->filter_height = 1;
tf_options->filter_width = 1;
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->activation = kTfLiteActTanh;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(MaxPooling2DOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMaxPool2d,
3,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMaxPool2d,
2,
1);
TfLitePoolParams* tf_options =
static_cast<TfLitePoolParams*>(context->node()->builtin_data);
tf_options->filter_height = 0;
tf_options->filter_width = 0;
tf_options->stride_width = 0;
tf_options->stride_height = 0;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->filter_height = 0;
tf_options->filter_width = 0;
tf_options->stride_width = 1;
tf_options->stride_height = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->filter_height = 1;
tf_options->filter_width = 1;
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->activation = kTfLiteActSignBit;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->filter_height = 1;
tf_options->filter_width = 1;
tf_options->stride_width = 1;
tf_options->stride_height = 1;
tf_options->activation = kTfLiteActTanh;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(CustomMaxPooling2DOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
2,
1);
context->registration()->custom_name = "MaxPoolingWithArgmax2D";
TfLitePoolParams tf_options;
context->node()->custom_initial_data = &tf_options;
TfLiteIntArrayFree(context->node()->outputs);
context->node()->outputs = TfLiteIntArrayCreate(2);
context->node()->outputs->data[0] = 2;
context->node()->outputs->data[1] = 3;
auto parser = NewOperationParser(context->registration());
tf_options.filter_height = 0;
tf_options.filter_width = 0;
tf_options.stride_width = 0;
tf_options.stride_height = 0;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options.filter_height = 0;
tf_options.filter_width = 0;
tf_options.stride_width = 1;
tf_options.stride_height = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options.filter_height = 1;
tf_options.filter_width = 1;
tf_options.stride_width = 1;
tf_options.stride_height = 1;
tf_options.activation = kTfLiteActSignBit;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options.filter_height = 1;
tf_options.filter_width = 1;
tf_options.stride_width = 1;
tf_options.stride_height = 1;
tf_options.activation = kTfLiteActTanh;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ReduceMaxOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinReduceMax,
1,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->allocation_type = kTfLiteMmapRo;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->allocation_type = kTfLiteMmapRo;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ReduceMinOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinReduceMin,
1,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->allocation_type = kTfLiteMmapRo;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->allocation_type = kTfLiteMmapRo;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ReduceProductOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinReduceProd,
1,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->allocation_type = kTfLiteMmapRo;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->allocation_type = kTfLiteMmapRo;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(QuantizeOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinQuantize,
3,
1);
auto parser =
NewOperationParser(context->registration(), true);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinQuantize,
2,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinQuantize,
2,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ReLUOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinRelu,
3,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinRelu,
2,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ReLU6OperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinRelu6,
3,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinRelu6,
2,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(LeakyReLUOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinLeakyRelu,
3,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinLeakyRelu,
2,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ResamplerOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
1,
1);
context->registration()->custom_name = "Resampler";
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
1,
2);
context->registration()->custom_name = "Resampler";
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(ReshapeOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinReshape,
2,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinReshape,
1,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinReshape,
1,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(Resize2DBilinearOperationParserTest, TestIsSupported) {
auto context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinResizeBilinear,
4,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinResizeBilinear,
3,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinResizeBilinear,
3,
1);
TfLiteResizeBilinearParams* tf_options =
static_cast<TfLiteResizeBilinearParams*>(context->node()->builtin_data);
tf_options->half_pixel_centers = true;
tf_options->align_corners = true;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->half_pixel_centers = true;
tf_options->align_corners = false;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->half_pixel_centers = false;
tf_options->align_corners = true;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->half_pixel_centers = false;
tf_options->align_corners = false;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(Resize2DNearestNeighborOperationParserTest, TestIsSupported) {
auto context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinResizeNearestNeighbor,
4,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinResizeNearestNeighbor,
3,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinResizeNearestNeighbor,
3,
1);
TfLiteResizeNearestNeighborParams* tf_options =
static_cast<TfLiteResizeNearestNeighborParams*>(
context->node()->builtin_data);
tf_options->half_pixel_centers = true;
tf_options->align_corners = true;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->half_pixel_centers = true;
tf_options->align_corners = false;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->half_pixel_centers = false;
tf_options->align_corners = true;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->half_pixel_centers = false;
tf_options->align_corners = false;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(SliceOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSlice,
3,
3);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSlice,
2,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSlice,
2,
3);
context->tensor(1)->dims->size = 2;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSlice,
2,
3);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(SoftmaxOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSoftmax,
3,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSoftmax,
2,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSoftmax,
2,
1);
TfLiteSoftmaxParams* tf_options =
static_cast<TfLiteSoftmaxParams*>(context->node()->builtin_data);
tf_options->beta = 2;
tf_options->beta = 1;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(SplitOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSplit,
1,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(SplitVOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSplitV,
1,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(StridedSliceOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinStridedSlice,
5,
4);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinStridedSlice,
4,
3);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinStridedSlice,
4,
4);
context->tensor(1)->dims->size = 2;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinStridedSlice,
4,
5);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(TileOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinTile,
1,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinTile,
1,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(TransposeConvBuiltinOperationParserTest, TestIsSupported) {
auto context =
std::make_unique<StubTfLiteContext>(kTfLiteBuiltinTransposeConv,
4,
2);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinTransposeConv,
3,
3);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinTransposeConv,
3,
2);
TfLiteTransposeConvParams* tf_options =
static_cast<TfLiteTransposeConvParams*>(context->node()->builtin_data);
tf_options->stride_width = 0;
tf_options->stride_height = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options->stride_width = 1;
tf_options->stride_height = 1;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(TransposeConvCustomOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
1,
1);
context->registration()->custom_name = "Convolution2DTransposeBias";
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
1,
2);
context->registration()->custom_name = "Convolution2DTransposeBias";
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
1,
2);
context->registration()->custom_name = "Convolution2DTransposeBias";
TfLiteTransposeConvParams tf_options;
context->node()->custom_initial_data = &tf_options;
tf_options.stride_width = 0;
tf_options.stride_height = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options.stride_width = 1;
tf_options.stride_height = 1;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(TransposeOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinTranspose,
5,
1);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinTranspose,
4,
2);
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinTranspose,
4,
1);
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(Unpooling2DOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
1,
1);
context->registration()->custom_name = "MaxUnpooling2D";
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
1,
2);
context->registration()->custom_name = "MaxUnpooling2D";
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCustom,
1,
2);
context->registration()->custom_name = "MaxUnpooling2D";
TfLitePoolParams tf_options;
context->node()->custom_initial_data = &tf_options;
tf_options.filter_height = 0;
tf_options.filter_width = 0;
tf_options.stride_width = 0;
tf_options.stride_height = 0;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options.filter_height = 0;
tf_options.filter_width = 1;
tf_options.stride_width = 1;
tf_options.stride_height = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options.filter_height = 1;
tf_options.filter_width = 1;
tf_options.stride_width = 0;
tf_options.stride_height = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
tf_options.filter_height = 1;
tf_options.filter_width = 1;
tf_options.stride_width = 1;
tf_options.stride_height = 1;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(MeanOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMean,
1,
3);
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMean,
1,
2);
context->tensor(2)->allocation_type = kTfLiteArenaRw;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMean,
1,
2);
context->tensor(2)->allocation_type = kTfLiteMmapRo;
context->tensor(2)->type = kTfLiteFloat32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinMean,
1,
2);
context->tensor(2)->allocation_type = kTfLiteMmapRo;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(CumsumOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCumsum,
1,
3);
context->tensor(2)->type = kTfLiteFloat32;
auto parser = NewOperationParser(context->registration());
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinCumsum,
1,
2);
context->tensor(2)->type = kTfLiteFloat32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->type = kTfLiteInt32;
context->tensor(2)->type = kTfLiteInt32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->type = kTfLiteFloat32;
context->tensor(2)->type = kTfLiteInt32;
context->tensor(2)->allocation_type = kTfLiteMmapRo;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(OneHotOperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinOneHot,
1,
4);
auto parser = NewOperationParser(context->registration());
auto status = parser->IsSupported(context.get(), context->node(),
context->registration());
context->tensor(1)->dims->data[1] = 2;
context->tensor(1)->dims->data[2] = 2;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->type = kTfLiteInt32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
auto* params =
reinterpret_cast<TfLiteOneHotParams*>(malloc(sizeof(TfLiteOneHotParams)));
params->axis = -1;
if (context->node(1)->builtin_data) {
free(context->node(1)->builtin_data);
}
context->node(1)->builtin_data = params;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->dims->data[1] = 1;
context->tensor(1)->dims->data[2] = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
TfLiteIntArrayFree(context->tensor(3)->dims);
context->tensor(3)->dims = TfLiteIntArrayCreate(1);
context->tensor(3)->dims->data[0] = 1;
context->tensor(3)->allocation_type = kTfLiteMmapRo;
TfLiteIntArrayFree(context->tensor(4)->dims);
context->tensor(4)->dims = TfLiteIntArrayCreate(1);
context->tensor(4)->dims->data[0] = 1;
context->tensor(4)->allocation_type = kTfLiteMmapRo;
params->axis =
context->tensor(1)->dims->data[context->tensor(1)->dims->size - 1];
context->node(1)->builtin_data = params;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->dims->data[0] = 2;
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
TEST(SelectV2OperationParserTest, TestIsSupported) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinSelectV2,
1,
3);
auto parser = NewOperationParser(context->registration());
auto status = parser->IsSupported(context.get(), context->node(),
context->registration());
context->tensor(1)->dims->data[0] = 1;
context->tensor(1)->dims->data[1] = 2;
context->tensor(1)->dims->data[2] = 1;
context->tensor(1)->dims->data[3] = 4;
context->tensor(4)->dims->data[0] = 1;
context->tensor(4)->dims->data[1] = 2;
context->tensor(4)->dims->data[2] = 3;
context->tensor(4)->dims->data[3] = 4;
context->tensor(1)->type = kTfLiteInt32;
context->tensor(2)->type = kTfLiteInt32;
context->tensor(3)->type = kTfLiteInt32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(1)->type = kTfLiteFloat32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(2)->type = kTfLiteFloat32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
context->tensor(3)->type = kTfLiteFloat32;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
TfLiteIntArrayFree(context->tensor(2)->dims);
context->tensor(2)->dims = TfLiteIntArrayCreate(2);
context->tensor(2)->dims->data[0] = 2;
context->tensor(2)->dims->data[1] = 2;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
TfLiteIntArrayFree(context->tensor(2)->dims);
context->tensor(2)->dims = TfLiteIntArrayCreate(1);
context->tensor(2)->dims->data[0] = 1;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
TfLiteIntArrayFree(context->tensor(3)->dims);
context->tensor(3)->dims = TfLiteIntArrayCreate(2);
context->tensor(3)->dims->data[0] = 2;
context->tensor(3)->dims->data[1] = 2;
ASSERT_FALSE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
TfLiteIntArrayFree(context->tensor(3)->dims);
context->tensor(3)->dims = TfLiteIntArrayCreate(4);
for (int i = 0; i < context->tensor(4)->dims->size; ++i) {
context->tensor(3)->dims->data[i] = context->tensor(4)->dims->data[i];
}
ASSERT_TRUE(
parser
->IsSupported(context.get(), context->node(), context->registration())
.ok());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/model_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/model_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
740270e8-8ef5-4337-9da2-121238a9b2cd | cpp | google/quiche | hpack_encoder | quiche/http2/hpack/hpack_encoder.cc | quiche/http2/hpack/hpack_encoder_test.cc | #include "quiche/http2/hpack/hpack_encoder.h"
#include <algorithm>
#include <cstddef>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "quiche/http2/hpack/hpack_constants.h"
#include "quiche/http2/hpack/hpack_header_table.h"
#include "quiche/http2/hpack/hpack_output_stream.h"
#include "quiche/http2/hpack/huffman/hpack_huffman_encoder.h"
#include "quiche/common/http/http_header_block.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace spdy {
class HpackEncoder::RepresentationIterator {
public:
RepresentationIterator(const Representations& pseudo_headers,
const Representations& regular_headers)
: pseudo_begin_(pseudo_headers.begin()),
pseudo_end_(pseudo_headers.end()),
regular_begin_(regular_headers.begin()),
regular_end_(regular_headers.end()) {}
explicit RepresentationIterator(const Representations& headers)
: pseudo_begin_(headers.begin()),
pseudo_end_(headers.end()),
regular_begin_(headers.end()),
regular_end_(headers.end()) {}
bool HasNext() {
return pseudo_begin_ != pseudo_end_ || regular_begin_ != regular_end_;
}
const Representation Next() {
if (pseudo_begin_ != pseudo_end_) {
return *pseudo_begin_++;
} else {
return *regular_begin_++;
}
}
private:
Representations::const_iterator pseudo_begin_;
Representations::const_iterator pseudo_end_;
Representations::const_iterator regular_begin_;
Representations::const_iterator regular_end_;
};
namespace {
void NoOpListener(absl::string_view , absl::string_view ) {}
bool DefaultPolicy(absl::string_view name, absl::string_view ) {
if (name.empty()) {
return false;
}
if (name[0] == kPseudoHeaderPrefix) {
return name == ":authority";
}
return true;
}
}
HpackEncoder::HpackEncoder()
: output_stream_(),
min_table_size_setting_received_(std::numeric_limits<size_t>::max()),
listener_(NoOpListener),
should_index_(DefaultPolicy),
enable_compression_(true),
should_emit_table_size_(false),
crumble_cookies_(true) {}
HpackEncoder::~HpackEncoder() = default;
std::string HpackEncoder::EncodeHeaderBlock(
const quiche::HttpHeaderBlock& header_set) {
Representations pseudo_headers;
Representations regular_headers;
bool found_cookie = false;
for (const auto& header : header_set) {
if (!found_cookie && header.first == "cookie") {
found_cookie = true;
if (crumble_cookies_) {
CookieToCrumbs(header, ®ular_headers);
} else {
DecomposeRepresentation(header, ®ular_headers);
}
} else if (!header.first.empty() &&
header.first[0] == kPseudoHeaderPrefix) {
DecomposeRepresentation(header, &pseudo_headers);
} else {
DecomposeRepresentation(header, ®ular_headers);
}
}
RepresentationIterator iter(pseudo_headers, regular_headers);
return EncodeRepresentations(&iter);
}
void HpackEncoder::ApplyHeaderTableSizeSetting(size_t size_setting) {
if (size_setting == header_table_.settings_size_bound()) {
return;
}
if (size_setting < header_table_.settings_size_bound()) {
min_table_size_setting_received_ =
std::min(size_setting, min_table_size_setting_received_);
}
header_table_.SetSettingsHeaderTableSize(size_setting);
should_emit_table_size_ = true;
}
std::string HpackEncoder::EncodeRepresentations(RepresentationIterator* iter) {
MaybeEmitTableSize();
while (iter->HasNext()) {
const auto header = iter->Next();
listener_(header.first, header.second);
if (enable_compression_) {
size_t index =
header_table_.GetByNameAndValue(header.first, header.second);
if (index != kHpackEntryNotFound) {
EmitIndex(index);
} else if (should_index_(header.first, header.second)) {
EmitIndexedLiteral(header);
} else {
EmitNonIndexedLiteral(header, enable_compression_);
}
} else {
EmitNonIndexedLiteral(header, enable_compression_);
}
}
return output_stream_.TakeString();
}
void HpackEncoder::EmitIndex(size_t index) {
QUICHE_DVLOG(2) << "Emitting index " << index;
output_stream_.AppendPrefix(kIndexedOpcode);
output_stream_.AppendUint32(index);
}
void HpackEncoder::EmitIndexedLiteral(const Representation& representation) {
QUICHE_DVLOG(2) << "Emitting indexed literal: (" << representation.first
<< ", " << representation.second << ")";
output_stream_.AppendPrefix(kLiteralIncrementalIndexOpcode);
EmitLiteral(representation);
header_table_.TryAddEntry(representation.first, representation.second);
}
void HpackEncoder::EmitNonIndexedLiteral(const Representation& representation,
bool enable_compression) {
QUICHE_DVLOG(2) << "Emitting nonindexed literal: (" << representation.first
<< ", " << representation.second << ")";
output_stream_.AppendPrefix(kLiteralNoIndexOpcode);
size_t name_index = header_table_.GetByName(representation.first);
if (enable_compression && name_index != kHpackEntryNotFound) {
output_stream_.AppendUint32(name_index);
} else {
output_stream_.AppendUint32(0);
EmitString(representation.first);
}
EmitString(representation.second);
}
void HpackEncoder::EmitLiteral(const Representation& representation) {
size_t name_index = header_table_.GetByName(representation.first);
if (name_index != kHpackEntryNotFound) {
output_stream_.AppendUint32(name_index);
} else {
output_stream_.AppendUint32(0);
EmitString(representation.first);
}
EmitString(representation.second);
}
void HpackEncoder::EmitString(absl::string_view str) {
size_t encoded_size =
enable_compression_ ? http2::HuffmanSize(str) : str.size();
if (encoded_size < str.size()) {
QUICHE_DVLOG(2) << "Emitted Huffman-encoded string of length "
<< encoded_size;
output_stream_.AppendPrefix(kStringLiteralHuffmanEncoded);
output_stream_.AppendUint32(encoded_size);
http2::HuffmanEncode(str, encoded_size, output_stream_.MutableString());
} else {
QUICHE_DVLOG(2) << "Emitted literal string of length " << str.size();
output_stream_.AppendPrefix(kStringLiteralIdentityEncoded);
output_stream_.AppendUint32(str.size());
output_stream_.AppendBytes(str);
}
}
void HpackEncoder::MaybeEmitTableSize() {
if (!should_emit_table_size_) {
return;
}
const size_t current_size = CurrentHeaderTableSizeSetting();
QUICHE_DVLOG(1) << "MaybeEmitTableSize current_size=" << current_size;
QUICHE_DVLOG(1) << "MaybeEmitTableSize min_table_size_setting_received_="
<< min_table_size_setting_received_;
if (min_table_size_setting_received_ < current_size) {
output_stream_.AppendPrefix(kHeaderTableSizeUpdateOpcode);
output_stream_.AppendUint32(min_table_size_setting_received_);
}
output_stream_.AppendPrefix(kHeaderTableSizeUpdateOpcode);
output_stream_.AppendUint32(current_size);
min_table_size_setting_received_ = std::numeric_limits<size_t>::max();
should_emit_table_size_ = false;
}
void HpackEncoder::CookieToCrumbs(const Representation& cookie,
Representations* out) {
absl::string_view cookie_value = cookie.second;
absl::string_view::size_type first = cookie_value.find_first_not_of(" \t");
absl::string_view::size_type last = cookie_value.find_last_not_of(" \t");
if (first == absl::string_view::npos) {
cookie_value = absl::string_view();
} else {
cookie_value = cookie_value.substr(first, (last - first) + 1);
}
for (size_t pos = 0;;) {
size_t end = cookie_value.find(';', pos);
if (end == absl::string_view::npos) {
out->push_back(std::make_pair(cookie.first, cookie_value.substr(pos)));
break;
}
out->push_back(
std::make_pair(cookie.first, cookie_value.substr(pos, end - pos)));
pos = end + 1;
if (pos != cookie_value.size() && cookie_value[pos] == ' ') {
pos++;
}
}
}
void HpackEncoder::DecomposeRepresentation(const Representation& header_field,
Representations* out) {
std::vector<absl::string_view> pieces =
absl::StrSplit(header_field.second, '\0');
out->reserve(pieces.size());
for (absl::string_view piece : pieces) {
out->push_back(std::make_pair(header_field.first, piece));
}
}
class HpackEncoder::Encoderator : public ProgressiveEncoder {
public:
Encoderator(const quiche::HttpHeaderBlock& header_set, HpackEncoder* encoder);
Encoderator(const Representations& representations, HpackEncoder* encoder);
Encoderator(const Encoderator&) = delete;
Encoderator& operator=(const Encoderator&) = delete;
bool HasNext() const override { return has_next_; }
std::string Next(size_t max_encoded_bytes) override;
private:
HpackEncoder* encoder_;
std::unique_ptr<RepresentationIterator> header_it_;
Representations pseudo_headers_;
Representations regular_headers_;
bool has_next_;
};
HpackEncoder::Encoderator::Encoderator(
const quiche::HttpHeaderBlock& header_set, HpackEncoder* encoder)
: encoder_(encoder), has_next_(true) {
bool found_cookie = false;
for (const auto& header : header_set) {
if (!found_cookie && header.first == "cookie") {
found_cookie = true;
if (encoder_->crumble_cookies_) {
CookieToCrumbs(header, ®ular_headers_);
} else {
DecomposeRepresentation(header, ®ular_headers_);
}
} else if (!header.first.empty() &&
header.first[0] == kPseudoHeaderPrefix) {
DecomposeRepresentation(header, &pseudo_headers_);
} else {
DecomposeRepresentation(header, ®ular_headers_);
}
}
header_it_ = std::make_unique<RepresentationIterator>(pseudo_headers_,
regular_headers_);
encoder_->MaybeEmitTableSize();
}
HpackEncoder::Encoderator::Encoderator(const Representations& representations,
HpackEncoder* encoder)
: encoder_(encoder), has_next_(true) {
for (const auto& header : representations) {
if (header.first == "cookie") {
if (encoder_->crumble_cookies_) {
CookieToCrumbs(header, ®ular_headers_);
} else {
DecomposeRepresentation(header, ®ular_headers_);
}
} else if (!header.first.empty() &&
header.first[0] == kPseudoHeaderPrefix) {
pseudo_headers_.push_back(header);
} else {
regular_headers_.push_back(header);
}
}
header_it_ = std::make_unique<RepresentationIterator>(pseudo_headers_,
regular_headers_);
encoder_->MaybeEmitTableSize();
}
std::string HpackEncoder::Encoderator::Next(size_t max_encoded_bytes) {
QUICHE_BUG_IF(spdy_bug_61_1, !has_next_)
<< "Encoderator::Next called with nothing left to encode.";
const bool enable_compression = encoder_->enable_compression_;
while (header_it_->HasNext() &&
encoder_->output_stream_.size() <= max_encoded_bytes) {
const Representation header = header_it_->Next();
encoder_->listener_(header.first, header.second);
if (enable_compression) {
size_t index = encoder_->header_table_.GetByNameAndValue(header.first,
header.second);
if (index != kHpackEntryNotFound) {
encoder_->EmitIndex(index);
} else if (encoder_->should_index_(header.first, header.second)) {
encoder_->EmitIndexedLiteral(header);
} else {
encoder_->EmitNonIndexedLiteral(header, enable_compression);
}
} else {
encoder_->EmitNonIndexedLiteral(header, enable_compression);
}
}
has_next_ = encoder_->output_stream_.size() > max_encoded_bytes;
return encoder_->output_stream_.BoundedTakeString(max_encoded_bytes);
}
std::unique_ptr<HpackEncoder::ProgressiveEncoder> HpackEncoder::EncodeHeaderSet(
const quiche::HttpHeaderBlock& header_set) {
return std::make_unique<Encoderator>(header_set, this);
}
std::unique_ptr<HpackEncoder::ProgressiveEncoder>
HpackEncoder::EncodeRepresentations(const Representations& representations) {
return std::make_unique<Encoderator>(representations, this);
}
} | #include "quiche/http2/hpack/hpack_encoder.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "quiche/http2/hpack/hpack_constants.h"
#include "quiche/http2/hpack/hpack_entry.h"
#include "quiche/http2/hpack/hpack_header_table.h"
#include "quiche/http2/hpack/hpack_output_stream.h"
#include "quiche/http2/hpack/hpack_static_table.h"
#include "quiche/http2/hpack/huffman/hpack_huffman_encoder.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/common/http/http_header_block.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/quiche_simple_arena.h"
namespace spdy {
namespace test {
class HpackHeaderTablePeer {
public:
explicit HpackHeaderTablePeer(HpackHeaderTable* table) : table_(table) {}
const HpackEntry* GetFirstStaticEntry() const {
return &table_->static_entries_.front();
}
HpackHeaderTable::DynamicEntryTable* dynamic_entries() {
return &table_->dynamic_entries_;
}
private:
HpackHeaderTable* table_;
};
class HpackEncoderPeer {
public:
typedef HpackEncoder::Representation Representation;
typedef HpackEncoder::Representations Representations;
explicit HpackEncoderPeer(HpackEncoder* encoder) : encoder_(encoder) {}
bool compression_enabled() const { return encoder_->enable_compression_; }
HpackHeaderTable* table() { return &encoder_->header_table_; }
HpackHeaderTablePeer table_peer() { return HpackHeaderTablePeer(table()); }
void EmitString(absl::string_view str) { encoder_->EmitString(str); }
void TakeString(std::string* out) {
*out = encoder_->output_stream_.TakeString();
}
static void CookieToCrumbs(absl::string_view cookie,
std::vector<absl::string_view>* out) {
Representations tmp;
HpackEncoder::CookieToCrumbs(std::make_pair("", cookie), &tmp);
out->clear();
for (size_t i = 0; i != tmp.size(); ++i) {
out->push_back(tmp[i].second);
}
}
static void DecomposeRepresentation(absl::string_view value,
std::vector<absl::string_view>* out) {
Representations tmp;
HpackEncoder::DecomposeRepresentation(std::make_pair("foobar", value),
&tmp);
out->clear();
for (size_t i = 0; i != tmp.size(); ++i) {
out->push_back(tmp[i].second);
}
}
static std::string EncodeHeaderBlock(
HpackEncoder* encoder, const quiche::HttpHeaderBlock& header_set) {
return encoder->EncodeHeaderBlock(header_set);
}
static bool EncodeIncremental(HpackEncoder* encoder,
const quiche::HttpHeaderBlock& header_set,
std::string* output) {
std::unique_ptr<HpackEncoder::ProgressiveEncoder> encoderator =
encoder->EncodeHeaderSet(header_set);
http2::test::Http2Random random;
std::string output_buffer = encoderator->Next(random.UniformInRange(0, 16));
while (encoderator->HasNext()) {
std::string second_buffer =
encoderator->Next(random.UniformInRange(0, 16));
output_buffer.append(second_buffer);
}
*output = std::move(output_buffer);
return true;
}
static bool EncodeRepresentations(HpackEncoder* encoder,
const Representations& representations,
std::string* output) {
std::unique_ptr<HpackEncoder::ProgressiveEncoder> encoderator =
encoder->EncodeRepresentations(representations);
http2::test::Http2Random random;
std::string output_buffer = encoderator->Next(random.UniformInRange(0, 16));
while (encoderator->HasNext()) {
std::string second_buffer =
encoderator->Next(random.UniformInRange(0, 16));
output_buffer.append(second_buffer);
}
*output = std::move(output_buffer);
return true;
}
private:
HpackEncoder* encoder_;
};
}
namespace {
using testing::ElementsAre;
using testing::Pair;
const size_t kStaticEntryIndex = 1;
enum EncodeStrategy {
kDefault,
kIncremental,
kRepresentations,
};
class HpackEncoderTest
: public quiche::test::QuicheTestWithParam<EncodeStrategy> {
protected:
typedef test::HpackEncoderPeer::Representations Representations;
HpackEncoderTest()
: peer_(&encoder_),
static_(peer_.table_peer().GetFirstStaticEntry()),
dynamic_table_insertions_(0),
headers_storage_(1024 ),
strategy_(GetParam()) {}
void SetUp() override {
key_1_ = peer_.table()->TryAddEntry("key1", "value1");
key_1_index_ = dynamic_table_insertions_++;
key_2_ = peer_.table()->TryAddEntry("key2", "value2");
key_2_index_ = dynamic_table_insertions_++;
cookie_a_ = peer_.table()->TryAddEntry("cookie", "a=bb");
cookie_a_index_ = dynamic_table_insertions_++;
cookie_c_ = peer_.table()->TryAddEntry("cookie", "c=dd");
cookie_c_index_ = dynamic_table_insertions_++;
peer_.table()->SetMaxSize(peer_.table()->size());
QUICHE_CHECK_EQ(kInitialDynamicTableSize, peer_.table()->size());
}
void SaveHeaders(absl::string_view name, absl::string_view value) {
absl::string_view n(headers_storage_.Memdup(name.data(), name.size()),
name.size());
absl::string_view v(headers_storage_.Memdup(value.data(), value.size()),
value.size());
headers_observed_.push_back(std::make_pair(n, v));
}
void ExpectIndex(size_t index) {
expected_.AppendPrefix(kIndexedOpcode);
expected_.AppendUint32(index);
}
void ExpectIndexedLiteral(size_t key_index, absl::string_view value) {
expected_.AppendPrefix(kLiteralIncrementalIndexOpcode);
expected_.AppendUint32(key_index);
ExpectString(&expected_, value);
}
void ExpectIndexedLiteral(absl::string_view name, absl::string_view value) {
expected_.AppendPrefix(kLiteralIncrementalIndexOpcode);
expected_.AppendUint32(0);
ExpectString(&expected_, name);
ExpectString(&expected_, value);
}
void ExpectNonIndexedLiteral(absl::string_view name,
absl::string_view value) {
expected_.AppendPrefix(kLiteralNoIndexOpcode);
expected_.AppendUint32(0);
ExpectString(&expected_, name);
ExpectString(&expected_, value);
}
void ExpectNonIndexedLiteralWithNameIndex(size_t key_index,
absl::string_view value) {
expected_.AppendPrefix(kLiteralNoIndexOpcode);
expected_.AppendUint32(key_index);
ExpectString(&expected_, value);
}
void ExpectString(HpackOutputStream* stream, absl::string_view str) {
size_t encoded_size =
peer_.compression_enabled() ? http2::HuffmanSize(str) : str.size();
if (encoded_size < str.size()) {
expected_.AppendPrefix(kStringLiteralHuffmanEncoded);
expected_.AppendUint32(encoded_size);
http2::HuffmanEncode(str, encoded_size, stream->MutableString());
} else {
expected_.AppendPrefix(kStringLiteralIdentityEncoded);
expected_.AppendUint32(str.size());
expected_.AppendBytes(str);
}
}
void ExpectHeaderTableSizeUpdate(uint32_t size) {
expected_.AppendPrefix(kHeaderTableSizeUpdateOpcode);
expected_.AppendUint32(size);
}
Representations MakeRepresentations(
const quiche::HttpHeaderBlock& header_set) {
Representations r;
for (const auto& header : header_set) {
r.push_back(header);
}
return r;
}
void CompareWithExpectedEncoding(const quiche::HttpHeaderBlock& header_set) {
std::string actual_out;
std::string expected_out = expected_.TakeString();
switch (strategy_) {
case kDefault:
actual_out =
test::HpackEncoderPeer::EncodeHeaderBlock(&encoder_, header_set);
break;
case kIncremental:
EXPECT_TRUE(test::HpackEncoderPeer::EncodeIncremental(
&encoder_, header_set, &actual_out));
break;
case kRepresentations:
EXPECT_TRUE(test::HpackEncoderPeer::EncodeRepresentations(
&encoder_, MakeRepresentations(header_set), &actual_out));
break;
}
EXPECT_EQ(expected_out, actual_out);
}
void CompareWithExpectedEncoding(const Representations& representations) {
std::string actual_out;
std::string expected_out = expected_.TakeString();
EXPECT_TRUE(test::HpackEncoderPeer::EncodeRepresentations(
&encoder_, representations, &actual_out));
EXPECT_EQ(expected_out, actual_out);
}
size_t DynamicIndexToWireIndex(size_t index) {
return dynamic_table_insertions_ - index + kStaticTableSize;
}
HpackEncoder encoder_;
test::HpackEncoderPeer peer_;
const size_t kInitialDynamicTableSize = 4 * (10 + 32);
const HpackEntry* static_;
const HpackEntry* key_1_;
const HpackEntry* key_2_;
const HpackEntry* cookie_a_;
const HpackEntry* cookie_c_;
size_t key_1_index_;
size_t key_2_index_;
size_t cookie_a_index_;
size_t cookie_c_index_;
size_t dynamic_table_insertions_;
quiche::QuicheSimpleArena headers_storage_;
std::vector<std::pair<absl::string_view, absl::string_view>>
headers_observed_;
HpackOutputStream expected_;
const EncodeStrategy strategy_;
};
using HpackEncoderTestWithDefaultStrategy = HpackEncoderTest;
INSTANTIATE_TEST_SUITE_P(HpackEncoderTests, HpackEncoderTestWithDefaultStrategy,
::testing::Values(kDefault));
TEST_P(HpackEncoderTestWithDefaultStrategy, EncodeRepresentations) {
EXPECT_EQ(kInitialDynamicTableSize, encoder_.GetDynamicTableSize());
encoder_.SetHeaderListener(
[this](absl::string_view name, absl::string_view value) {
this->SaveHeaders(name, value);
});
const std::vector<std::pair<absl::string_view, absl::string_view>>
header_list = {{"cookie", "val1; val2;val3"},
{":path", "/home"},
{"accept", "text/html, text/plain,application/xml"},
{"cookie", "val4"},
{"withnul", absl::string_view("one\0two", 7)}};
ExpectNonIndexedLiteralWithNameIndex(peer_.table()->GetByName(":path"),
"/home");
ExpectIndexedLiteral(peer_.table()->GetByName("cookie"), "val1");
ExpectIndexedLiteral(peer_.table()->GetByName("cookie"), "val2");
ExpectIndexedLiteral(peer_.table()->GetByName("cookie"), "val3");
ExpectIndexedLiteral(peer_.table()->GetByName("accept"),
"text/html, text/plain,application/xml");
ExpectIndexedLiteral(peer_.table()->GetByName("cookie"), "val4");
ExpectIndexedLiteral("withnul", absl::string_view("one\0two", 7));
CompareWithExpectedEncoding(header_list);
EXPECT_THAT(
headers_observed_,
ElementsAre(Pair(":path", "/home"), Pair("cookie", "val1"),
Pair("cookie", "val2"), Pair("cookie", "val3"),
Pair("accept", "text/html, text/plain,application/xml"),
Pair("cookie", "val4"),
Pair("withnul", absl::string_view("one\0two", 7))));
EXPECT_GE(kInitialDynamicTableSize, encoder_.GetDynamicTableSize());
}
TEST_P(HpackEncoderTestWithDefaultStrategy, WithoutCookieCrumbling) {
EXPECT_EQ(kInitialDynamicTableSize, encoder_.GetDynamicTableSize());
encoder_.SetHeaderListener(
[this](absl::string_view name, absl::string_view value) {
this->SaveHeaders(name, value);
});
encoder_.DisableCookieCrumbling();
const std::vector<std::pair<absl::string_view, absl::string_view>>
header_list = {{"cookie", "val1; val2;val3"},
{":path", "/home"},
{"accept", "text/html, text/plain,application/xml"},
{"cookie", "val4"},
{"withnul", absl::string_view("one\0two", 7)}};
ExpectNonIndexedLiteralWithNameIndex(peer_.table()->GetByName(":path"),
"/home");
ExpectIndexedLiteral(peer_.table()->GetByName("cookie"), "val1; val2;val3");
ExpectIndexedLiteral(peer_.table()->GetByName("accept"),
"text/html, text/plain,application/xml");
ExpectIndexedLiteral(peer_.table()->GetByName("cookie"), "val4");
ExpectIndexedLiteral("withnul", absl::string_view("one\0two", 7));
CompareWithExpectedEncoding(header_list);
EXPECT_THAT(
headers_observed_,
ElementsAre(Pair(":path", "/home"), Pair("cookie", "val1; val2;val3"),
Pair("accept", "text/html, text/plain,application/xml"),
Pair("cookie", "val4"),
Pair("withnul", absl::string_view("one\0two", 7))));
EXPECT_GE(kInitialDynamicTableSize, encoder_.GetDynamicTableSize());
}
TEST_P(HpackEncoderTestWithDefaultStrategy, DynamicTableGrows) {
EXPECT_EQ(kInitialDynamicTableSize, encoder_.GetDynamicTableSize());
peer_.table()->SetMaxSize(4096);
encoder_.SetHeaderListener(
[this](absl::string_view name, absl::string_view value) {
this->SaveHeaders(name, value);
});
const std::vector<std::pair<absl::string_view, absl::string_view>>
header_list = {{"cookie", "val1; val2;val3"},
{":path", "/home"},
{"accept", "text/html, text/plain,application/xml"},
{"cookie", "val4"},
{"withnul", absl::string_view("one\0two", 7)}};
std::string out;
EXPECT_TRUE(test::HpackEncoderPeer::EncodeRepresentations(&encoder_,
header_list, &out));
EXPECT_FALSE(out.empty());
EXPECT_GT(encoder_.GetDynamicTableSize(), kInitialDynamicTableSize);
}
INSTANTIATE_TEST_SUITE_P(HpackEncoderTests, HpackEncoderTest,
::testing::Values(kDefault, kIncremental,
kRepresentations));
TEST_P(HpackEncoderTest, SingleDynamicIndex) {
encoder_.SetHeaderListener(
[this](absl::string_view name, absl::string_view value) {
this->SaveHeaders(name, value);
});
ExpectIndex(DynamicIndexToWireIndex(key_2_index_));
quiche::HttpHeaderBlock headers;
headers[key_2_->name()] = key_2_->value();
CompareWithExpectedEncoding(headers);
EXPECT_THAT(headers_observed_,
ElementsAre(Pair(key_2_->name(), key_2_->value())));
}
TEST_P(HpackEncoderTest, SingleStaticIndex) {
ExpectIndex(kStaticEntryIndex);
quiche::HttpHeaderBlock headers;
headers[static_->name()] = static_->value();
CompareWithExpectedEncoding(headers);
}
TEST_P(HpackEncoderTest, SingleStaticIndexTooLarge) {
peer_.table()->SetMaxSize(1);
ExpectIndex(kStaticEntryIndex);
quiche::HttpHeaderBlock headers;
headers[static_->name()] = static_->value();
CompareWithExpectedEncoding(headers);
EXPECT_EQ(0u, peer_.table_peer().dynamic_entries()->size());
}
TEST_P(HpackEncoderTest, SingleLiteralWithIndexName) {
ExpectIndexedLiteral(DynamicIndexToWireIndex(key_2_index_), "value3");
quiche::HttpHeaderBlock headers;
headers[key_2_->name()] = "value3";
CompareWithExpectedEncoding(headers);
HpackEntry* new_entry = peer_.table_peer().dynamic_entries()->front().get();
EXPECT_EQ(new_entry->name(), key_2_->name());
EXPECT_EQ(new_entry->value(), "value3");
}
TEST_P(HpackEncoderTest, SingleLiteralWithLiteralName) {
ExpectIndexedLiteral("key3", "value3");
quiche::HttpHeaderBlock headers;
headers["key3"] = "value3";
CompareWithExpectedEncoding(headers);
HpackEntry* new_entry = peer_.table_peer().dynamic_entries()->front().get();
EXPECT_EQ(new_entry->name(), "key3");
EXPECT_EQ(new_entry->value(), "value3");
}
TEST_P(HpackEncoderTest, SingleLiteralTooLarge) {
peer_.table()->SetMaxSize(1);
ExpectIndexedLiteral("key3", "value3");
quiche::HttpHeaderBlock headers;
headers["key3"] = "value3";
CompareWithExpectedEncoding(headers);
EXPECT_EQ(0u, peer_.table_peer().dynamic_entries()->size());
}
TEST_P(HpackEncoderTest, EmitThanEvict) {
ExpectIndex(DynamicIndexToWireIndex(key_1_index_));
ExpectIndexedLiteral("key3", "value3");
quiche::HttpHeaderBlock headers;
headers[key_1_->name()] = key_1_->value();
headers["key3"] = "value3";
CompareWithExpectedEncoding(headers);
}
TEST_P(HpackEncoderTest, CookieHeaderIsCrumbled) {
ExpectIndex(DynamicIndexToWireIndex(cookie_a_index_));
ExpectIndex(DynamicIndexToWireIndex(cookie_c_index_));
ExpectIndexedLiteral(peer_.table()->GetByName("cookie"), "e=ff");
quiche::HttpHeaderBlock headers;
headers["cookie"] = "a=bb; c=dd; e=ff";
CompareWithExpectedEncoding(headers);
}
TEST_P(HpackEncoderTest, CookieHeaderIsNotCrumbled) {
encoder_.DisableCookieCrumbling();
ExpectIndexedLiteral(peer_.table()->GetByName("cookie"), "a=bb; c=dd; e=ff");
quiche::HttpHeaderBlock headers;
headers["cookie"] = "a=bb; c=dd; e=ff";
CompareWithExpectedEncoding(headers);
}
TEST_P(HpackEncoderTest, MultiValuedHeadersNotCrumbled) {
ExpectIndexedLiteral("foo", "bar, baz");
quiche::HttpHeaderBlock headers;
headers["foo"] = "bar, baz";
CompareWithExpectedEncoding(headers);
}
TEST_P(HpackEncoderTest, StringsDynamicallySelectHuffmanCoding) {
peer_.EmitString("feedbeef");
expected_.AppendPrefix(kStringLiteralHuffmanEncoded);
expected_.AppendUint32(6);
expected_.AppendBytes("\x94\xA5\x92\x32\x96_");
peer_.EmitString("@@@@@@");
expected_.AppendPrefix(kStringLiteralIdentityEncoded);
expected_.AppendUint32(6);
expected_.AppendBytes("@@@@@@");
std::string actual_out;
std::string expected_out = expected_.TakeString();
peer_.TakeString(&actual_out);
EXPECT_EQ(expected_out, actual_out);
}
TEST_P(HpackEncoderTest, EncodingWithoutCompression) {
encoder_.SetHeaderListener(
[this](absl::string_view name, absl::string_view value) {
this->SaveHeaders(name, value);
});
encoder_.DisableCompression();
ExpectNonIndexedLiteral(":path", "/index.html");
ExpectNonIndexedLiteral("cookie", "foo=bar");
ExpectNonIndexedLiteral("cookie", "baz=bing");
if (strategy_ == kRepresentations) {
ExpectNonIndexedLiteral("hello", std::string("goodbye\0aloha", 13));
} else {
ExpectNonIndexedLiteral("hello", "goodbye");
ExpectNonIndexedLiteral("hello", "aloha");
}
ExpectNonIndexedLiteral("multivalue", "value1, value2");
quiche::HttpHeaderBlock headers;
headers[":path"] = "/index.html";
headers["cookie"] = "foo=bar; baz=bing";
headers["hello"] = "goodbye";
headers.AppendValueOrAddHeader("hello", "aloha");
headers["multivalue"] = "value1, value2";
CompareWithExpectedEncoding(headers);
if (strategy_ == kRepresentations) {
EXPECT_THAT(
headers_observed_,
ElementsAre(Pair(":path", "/index.html"), Pair("cookie", "foo=bar"),
Pair("cookie", "baz=bing"),
Pair("hello", absl::string_view("goodbye\0aloha", 13)),
Pair("multivalue", "value1, value2")));
} else {
EXPECT_THAT(
headers_observed_,
ElementsAre(Pair(":path", "/index.html"), Pair("cookie", "foo=bar"),
Pair("cookie", "baz=bing"), Pair("hello", "goodbye"),
Pair("hello", "aloha"),
Pair("multivalue", "value1, value2")));
}
EXPECT_EQ(kInitialDynamicTableSize, encoder_.GetDynamicTableSize());
}
TEST_P(HpackEncoderTest, MultipleEncodingPasses) {
encoder_.SetHeaderListener(
[this](absl::string_view name, absl::string_view value) {
this->SaveHeaders(name, value);
});
{
quiche::HttpHeaderBlock headers;
headers["key1"] = "value1";
headers["cookie"] = "a=bb";
ExpectIndex(DynamicIndexToWireIndex(key_1_index_));
ExpectIndex(DynamicIndexToWireIndex(cookie_a_index_));
CompareWithExpectedEncoding(headers);
}
{
quiche::HttpHeaderBlock headers;
headers["key2"] = "value2";
headers["cookie"] = "c=dd; e=ff";
ExpectIndex(DynamicIndexToWireIndex(key_2_index_));
ExpectIndex(DynamicIndexToWireIndex(cookie_c_index_));
ExpectIndexedLiteral(peer_.table()->GetByName("cookie"), "e=ff");
dynamic_table_insertions_++;
CompareWithExpectedEncoding(headers);
}
{
quiche::HttpHeaderBlock headers;
headers["key2"] = "value2";
headers["cookie"] = "a=bb; b=cc; c=dd";
EXPECT_EQ(65u, DynamicIndexToWireIndex(key_2_index_));
ExpectIndex(DynamicIndexToWireIndex(key_2_index_));
EXPECT_EQ(64u, DynamicIndexToWireIndex(cookie_a_index_));
ExpectIndex(DynamicIndexToWireIndex(cookie_a_index_));
ExpectIndexedLiteral(peer_.table()->GetByName("cookie"), "b=cc");
dynamic_table_insertions_++;
ExpectIndex(DynamicIndexToWireIndex(cookie_c_index_));
CompareWithExpectedEncoding(headers);
}
EXPECT_THAT(headers_observed_,
ElementsAre(Pair("key1", "value1"),
Pair("cookie", "a=bb"),
Pair("key2", "value2"),
Pair("cookie", "c=dd"),
Pair("cookie", "e=ff"),
Pair("key2", "value2"),
Pair("cookie", "a=bb"),
Pair("cookie", "b=cc"),
Pair("cookie", "c=dd")));
}
TEST_P(HpackEncoderTest, PseudoHeadersFirst) {
quiche::HttpHeaderBlock headers;
headers[":path"] = "/spam/eggs.html";
headers[":authority"] = "www.example.com";
headers["-foo"] = "bar";
headers["foo"] = "bar";
headers["cookie"] = "c=dd";
ExpectNonIndexedLiteralWithNameIndex(peer_.table()->GetByName(":path"),
"/spam/eggs.html");
ExpectIndexedLiteral(peer_.table()->GetByName(":authority"),
"www.example.com");
ExpectIndexedLiteral("-foo", "bar");
ExpectIndexedLiteral("foo", "bar");
ExpectIndexedLiteral(peer_.table()->GetByName("cookie"), "c=dd");
CompareWithExpectedEncoding(headers);
}
TEST_P(HpackEncoderTest, CookieToCrumbs) {
test::HpackEncoderPeer peer(nullptr);
std::vector<absl::string_view> out;
peer.CookieToCrumbs(" foo=1;bar=2 ; bar=3; bing=4; ", &out);
EXPECT_THAT(out, ElementsAre("foo=1", "bar=2 ", "bar=3", " bing=4", ""));
peer.CookieToCrumbs(";;foo = bar ;; ;baz =bing", &out);
EXPECT_THAT(out, ElementsAre("", "", "foo = bar ", "", "", "baz =bing"));
peer.CookieToCrumbs("baz=bing; foo=bar; baz=bing", &out);
EXPECT_THAT(out, ElementsAre("baz=bing", "foo=bar", "baz=bing"));
peer.CookieToCrumbs("baz=bing", &out);
EXPECT_THAT(out, ElementsAre("baz=bing"));
peer.CookieToCrumbs("", &out);
EXPECT_THAT(out, ElementsAre(""));
peer.CookieToCrumbs("foo;bar; baz;baz;bing;", &out);
EXPECT_THAT(out, ElementsAre("foo", "bar", "baz", "baz", "bing", ""));
peer.CookieToCrumbs(" \t foo=1;bar=2 ; bar=3;\t ", &out);
EXPECT_THAT(out, ElementsAre("foo=1", "bar=2 ", "bar=3", ""));
peer.CookieToCrumbs(" \t foo=1;bar=2 ; bar=3 \t ", &out);
EXPECT_THAT(out, ElementsAre("foo=1", "bar=2 ", "bar=3"));
}
TEST_P(HpackEncoderTest, DecomposeRepresentation) {
test::HpackEncoderPeer peer(nullptr);
std::vector<absl::string_view> out;
peer.DecomposeRepresentation("", &out);
EXPECT_THAT(out, ElementsAre(""));
peer.DecomposeRepresentation("foobar", &out);
EXPECT_THAT(out, ElementsAre("foobar"));
peer.DecomposeRepresentation(absl::string_view("foo\0bar", 7), &out);
EXPECT_THAT(out, ElementsAre("foo", "bar"));
peer.DecomposeRepresentation(absl::string_view("\0foo\0bar", 8), &out);
EXPECT_THAT(out, ElementsAre("", "foo", "bar"));
peer.DecomposeRepresentation(absl::string_view("foo\0bar\0", 8), &out);
EXPECT_THAT(out, ElementsAre("foo", "bar", ""));
peer.DecomposeRepresentation(absl::string_view("\0foo\0bar\0", 9), &out);
EXPECT_THAT(out, ElementsAre("", "foo", "bar", ""));
}
TEST_P(HpackEncoderTest, CrumbleNullByteDelimitedValue) {
if (strategy_ == kRepresentations) {
return;
}
quiche::HttpHeaderBlock headers;
headers["spam"] = std::string("foo\0bar", 7);
ExpectIndexedLiteral("spam", "foo");
expected_.AppendPrefix(kLiteralIncrementalIndexOpcode);
expected_.AppendUint32(62);
expected_.AppendPrefix(kStringLiteralIdentityEncoded);
expected_.AppendUint32(3);
expected_.AppendBytes("bar");
CompareWithExpectedEncoding(headers);
}
TEST_P(HpackEncoderTest, HeaderTableSizeUpdate) {
encoder_.ApplyHeaderTableSizeSetting(1024);
ExpectHeaderTableSizeUpdate(1024);
ExpectIndexedLiteral("key3", "value3");
quiche::HttpHeaderBlock headers;
headers["key3"] = "value3";
CompareWithExpectedEncoding(headers);
HpackEntry* new_entry = peer_.table_peer().dynamic_entries()->front().get();
EXPECT_EQ(new_entry->name(), "key3");
EXPECT_EQ(new_entry->value(), "value3");
}
TEST_P(HpackEncoderTest, HeaderTableSizeUpdateWithMin) {
const size_t starting_size = peer_.table()->settings_size_bound();
encoder_.ApplyHeaderTableSizeSetting(starting_size - 2);
encoder_.ApplyHeaderTableSizeSetting(starting_size - 1);
ExpectHeaderTableSizeUpdate(starting_size - 2);
ExpectHeaderTableSizeUpdate(starting_size - 1);
ExpectIndexedLiteral("key3", "value3");
quiche::HttpHeaderBlock headers;
headers["key3"] = "value3";
CompareWithExpectedEncoding(headers);
HpackEntry* new_entry = peer_.table_peer().dynamic_entries()->front().get();
EXPECT_EQ(new_entry->name(), "key3");
EXPECT_EQ(new_entry->value(), "value3");
}
TEST_P(HpackEncoderTest, HeaderTableSizeUpdateWithExistingSize) {
encoder_.ApplyHeaderTableSizeSetting(peer_.table()->settings_size_bound());
ExpectIndexedLiteral("key3", "value3");
quiche::HttpHeaderBlock headers;
headers["key3"] = "value3";
CompareWithExpectedEncoding(headers);
HpackEntry* new_entry = peer_.table_peer().dynamic_entries()->front().get();
EXPECT_EQ(new_entry->name(), "key3");
EXPECT_EQ(new_entry->value(), "value3");
}
TEST_P(HpackEncoderTest, HeaderTableSizeUpdatesWithGreaterSize) {
const size_t starting_size = peer_.table()->settings_size_bound();
encoder_.ApplyHeaderTableSizeSetting(starting_size + 1);
encoder_.ApplyHeaderTableSizeSetting(starting_size + 2);
ExpectHeaderTableSizeUpdate(starting_size + 2);
ExpectIndexedLiteral("key3", "value3");
quiche::HttpHeaderBlock headers;
headers["key3"] = "value3";
CompareWithExpectedEncoding(headers);
HpackEntry* new_entry = peer_.table_peer().dynamic_entries()->front().get();
EXPECT_EQ(new_entry->name(), "key3");
EXPECT_EQ(new_entry->value(), "value3");
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/hpack_encoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/hpack_encoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
b31eb974-d584-438f-959b-88eafc6895b6 | cpp | tensorflow/tensorflow | profiler_factory | third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_factory.cc | third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_factory_test.cc | #include "tsl/profiler/lib/profiler_factory.h"
#include <memory>
#include <utility>
#include <vector>
#include "tsl/platform/mutex.h"
#include "tsl/profiler/lib/profiler_controller.h"
#include "tsl/profiler/lib/profiler_interface.h"
#include "tsl/profiler/protobuf/profiler_options.pb.h"
namespace tsl {
namespace profiler {
namespace {
mutex mu(LINKER_INITIALIZED);
std::vector<ProfilerFactory>* GetFactories() {
static auto factories = new std::vector<ProfilerFactory>();
return factories;
}
}
void RegisterProfilerFactory(ProfilerFactory factory) {
mutex_lock lock(mu);
GetFactories()->push_back(std::move(factory));
}
std::vector<std::unique_ptr<profiler::ProfilerInterface>> CreateProfilers(
const tensorflow::ProfileOptions& options) {
std::vector<std::unique_ptr<profiler::ProfilerInterface>> result;
mutex_lock lock(mu);
for (const auto& factory : *GetFactories()) {
auto profiler = factory(options);
if (profiler == nullptr) continue;
result.emplace_back(
std::make_unique<ProfilerController>(std::move(profiler)));
}
return result;
}
void ClearRegisteredProfilersForTest() {
mutex_lock lock(mu);
GetFactories()->clear();
}
}
} | #include "tsl/profiler/lib/profiler_factory.h"
#include <functional>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/lib/profiler_interface.h"
#include "tsl/profiler/protobuf/profiler_options.pb.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
class TestProfiler : public ProfilerInterface {
public:
absl::Status Start() override { return absl::OkStatus(); }
absl::Status Stop() override { return absl::OkStatus(); }
absl::Status CollectData(tensorflow::profiler::XSpace*) override {
return absl::OkStatus();
}
};
std::unique_ptr<ProfilerInterface> TestFactoryFunction(
const tensorflow::ProfileOptions& options) {
return absl::make_unique<TestProfiler>();
}
TEST(ProfilerFactoryTest, FactoryFunctionPointer) {
ClearRegisteredProfilersForTest();
RegisterProfilerFactory(&TestFactoryFunction);
auto profilers = CreateProfilers(tensorflow::ProfileOptions());
EXPECT_EQ(profilers.size(), 1);
}
TEST(ProfilerFactoryTest, FactoryLambda) {
ClearRegisteredProfilersForTest();
RegisterProfilerFactory([](const tensorflow::ProfileOptions& options) {
return absl::make_unique<TestProfiler>();
});
auto profilers = CreateProfilers(tensorflow::ProfileOptions());
EXPECT_EQ(profilers.size(), 1);
}
std::unique_ptr<ProfilerInterface> NullFactoryFunction(
const tensorflow::ProfileOptions& options) {
return nullptr;
}
TEST(ProfilerFactoryTest, FactoryReturnsNull) {
ClearRegisteredProfilersForTest();
RegisterProfilerFactory(&NullFactoryFunction);
auto profilers = CreateProfilers(tensorflow::ProfileOptions());
EXPECT_TRUE(profilers.empty());
}
class FactoryClass {
public:
explicit FactoryClass(void* ptr) : ptr_(ptr) {}
FactoryClass(const FactoryClass&) = default;
FactoryClass(FactoryClass&&) = default;
std::unique_ptr<ProfilerInterface> CreateProfiler(
const tensorflow::ProfileOptions& options) const {
return absl::make_unique<TestProfiler>();
}
private:
void* ptr_ TF_ATTRIBUTE_UNUSED = nullptr;
};
TEST(ProfilerFactoryTest, FactoryClassCapturedByLambda) {
ClearRegisteredProfilersForTest();
static int token = 42;
FactoryClass factory(&token);
RegisterProfilerFactory([factory = std::move(factory)](
const tensorflow::ProfileOptions& options) {
return factory.CreateProfiler(options);
});
auto profilers = CreateProfilers(tensorflow::ProfileOptions());
EXPECT_EQ(profilers.size(), 1);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_factory.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_factory_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
be5eb669-1f38-45f4-bdbc-69ad464cf683 | cpp | tensorflow/tensorflow | select_and_scatter_expander | third_party/xla/xla/service/select_and_scatter_expander.cc | third_party/xla/xla/service/select_and_scatter_expander_test.cc | #include "xla/service/select_and_scatter_expander.h"
#include <numeric>
#include <vector>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal_util.h"
#include "xla/service/call_inliner.h"
namespace xla {
absl::StatusOr<HloInstruction*> SelectAndScatterExpander::ExpandInstruction(
HloInstruction* instruction) {
auto* computation = instruction->parent();
auto* sas = Cast<HloSelectAndScatterInstruction>(instruction);
auto* operand = sas->mutable_operand(0);
auto operand_shape = operand->shape();
auto* source = sas->mutable_operand(1);
auto* select = sas->select();
auto* init_value = sas->mutable_operand(2);
const auto iota_shape = ShapeUtil::ChangeElementType(operand_shape, S32);
const auto scalar_operand =
ShapeUtil::MakeScalarShape(operand->shape().element_type());
const auto scalar_iota =
ShapeUtil::MakeScalarShape(iota_shape.element_type());
const auto source_shape = source->shape();
const Shape iota_shape_reduced =
ShapeUtil::ChangeElementType(source_shape, S32);
std::vector<HloInstruction*> iotas;
iotas.reserve(operand_shape.rank());
for (int i = 0; i < operand_shape.rank(); ++i) {
iotas.push_back(
computation->AddInstruction(HloInstruction::CreateIota(iota_shape, i)));
}
HloComputation* new_comp = [&]() -> HloComputation* {
HloComputation::Builder builder(
absl::StrCat(select->name(), ".reduce_window"));
auto rhs_begin = static_cast<int64_t>(iotas.size() + 1);
auto first_iota_index = 1;
auto* neg_one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(-1)));
auto* first_lhs_iota =
builder.AddInstruction(HloInstruction::CreateParameter(
first_iota_index, scalar_iota, "iota_lhs"));
auto* first_rhs_iota =
builder.AddInstruction(HloInstruction::CreateParameter(
first_iota_index + rhs_begin, scalar_iota, "iota_lhs"));
auto* lhs_first_in_window =
builder.AddInstruction(HloInstruction::CreateCompare(
sas->select()->root_instruction()->shape(), first_lhs_iota, neg_one,
Comparison::Direction::kNe, {}));
auto* rhs_first_in_window =
builder.AddInstruction(HloInstruction::CreateCompare(
sas->select()->root_instruction()->shape(), first_rhs_iota, neg_one,
Comparison::Direction::kNe, {}));
auto rhs_not_first_in_window = builder.AddInstruction(
HloInstruction::CreateUnary(sas->select()->root_instruction()->shape(),
HloOpcode::kNot, rhs_first_in_window));
auto* operand_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_operand, "operand_lhs"));
auto* operand_rhs = builder.AddInstruction(HloInstruction::CreateParameter(
rhs_begin, scalar_operand, "operand_rhs"));
auto* call = builder.AddInstruction(
HloInstruction::CreateCall(sas->select()->root_instruction()->shape(),
{operand_lhs, operand_rhs}, sas->select()));
auto* pred = builder.AddInstruction(HloInstruction::CreateBinary(
call->shape(), HloOpcode::kAnd, call, lhs_first_in_window));
pred = builder.AddInstruction(HloInstruction::CreateBinary(
call->shape(), HloOpcode::kOr, pred, rhs_not_first_in_window));
std::vector<HloInstruction*> result_tuple;
result_tuple.push_back(builder.AddInstruction(HloInstruction::CreateTernary(
scalar_operand, HloOpcode::kSelect, pred, operand_lhs, operand_rhs)));
for (auto i = first_iota_index; i < rhs_begin; ++i) {
xla::HloInstruction *iota_lhs, *iota_rhs;
if (i == first_iota_index) {
iota_lhs = first_lhs_iota;
iota_rhs = first_rhs_iota;
} else {
iota_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(i, scalar_iota, "iota_lhs"));
iota_rhs = builder.AddInstruction(HloInstruction::CreateParameter(
i + rhs_begin, scalar_iota, "iota_rhs"));
}
result_tuple.push_back(
builder.AddInstruction(HloInstruction::CreateTernary(
scalar_iota, HloOpcode::kSelect, pred, iota_lhs, iota_rhs)));
}
builder.AddInstruction(HloInstruction::CreateTuple(result_tuple));
auto* result = select->parent()->AddEmbeddedComputation(builder.Build());
if (!CallInliner::Inline(call).ok()) {
return nullptr;
}
return result;
}();
if (!new_comp) {
return nullptr;
}
auto num_reduce_values = iotas.size() + 1;
std::vector<HloInstruction*> ops;
ops.reserve(num_reduce_values);
ops.push_back(operand);
ops.insert(ops.end(), iotas.begin(), iotas.end());
auto* neg_one = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(-1)));
std::vector<HloInstruction*> reduce_init_values;
reduce_init_values.reserve(num_reduce_values);
reduce_init_values.push_back(init_value);
for (auto i = 0; i < iotas.size(); ++i) {
reduce_init_values.push_back(neg_one);
}
std::vector<xla::Shape> shapes;
shapes.reserve(num_reduce_values);
shapes.push_back(source->shape());
for (auto i = 0; i < iotas.size(); ++i) {
shapes.push_back(iota_shape_reduced);
}
auto* reduce_window =
computation->AddInstruction(HloInstruction::CreateReduceWindow(
ShapeUtil::MakeTupleShape(shapes), ops, reduce_init_values,
sas->window(), new_comp));
std::vector<HloInstruction*> iota_indices;
std::vector<int64_t> broadcasted_iota_dims;
broadcasted_iota_dims.reserve(iota_shape_reduced.rank() + 1);
broadcasted_iota_dims.insert(broadcasted_iota_dims.end(),
iota_shape_reduced.dimensions().begin(),
iota_shape_reduced.dimensions().end());
broadcasted_iota_dims.push_back(1);
auto broadcasted_iota_shape = ShapeUtil::MakeShape(
iota_shape_reduced.element_type(), broadcasted_iota_dims);
for (int i = 1; i < num_reduce_values; ++i) {
auto* element = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(reduce_window, i));
iota_indices.push_back(computation->AddInstruction(
HloInstruction::CreateReshape(broadcasted_iota_shape, element)));
}
std::vector<int64_t> scatter_dims(operand->shape().rank());
std::iota(scatter_dims.begin(), scatter_dims.end(), 0);
auto* broadcasted_init_value = computation->AddInstruction(
HloInstruction::CreateBroadcast(instruction->shape(), init_value, {}));
std::vector<int64_t> concatenated_iotas_dims;
concatenated_iotas_dims.reserve(iota_indices.front()->shape().rank());
concatenated_iotas_dims.insert(concatenated_iotas_dims.end(),
broadcasted_iota_dims.begin(),
broadcasted_iota_dims.end());
concatenated_iotas_dims.back() = static_cast<int64_t>(iota_indices.size());
auto* indices = computation->AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(iota_shape.element_type(), concatenated_iotas_dims),
iota_indices, iota_shape.rank()));
ScatterDimensionNumbers dim_nums =
HloScatterInstruction::MakeScatterDimNumbers(
{},
scatter_dims,
scatter_dims,
source->shape().rank());
return computation->AddInstruction(HloInstruction::CreateScatter(
sas->shape(), broadcasted_init_value,
indices, source,
sas->scatter(), dim_nums,
false, false));
}
bool SelectAndScatterExpander::InstructionMatchesPattern(
HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kSelectAndScatter;
}
} | #include "xla/service/select_and_scatter_expander.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
constexpr absl::string_view kModuleStr =
R"(HloModule R4F32OverlapSmall_module, entry_computation_layout={()->f32[4,5,1,1]{3,2,1,0}}
%ge_F32.v3 (lhs: f32[], rhs: f32[]) -> pred[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %greater-than-or-equal-to = pred[] compare(f32[] %lhs, f32[] %rhs), direction=GE, type=TOTALORDER
}
%add_F32.v3 (lhs.1: f32[], rhs.1: f32[]) -> f32[] {
%lhs.1 = f32[] parameter(0)
%rhs.1 = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs.1, f32[] %rhs.1)
}
ENTRY %R4F32OverlapSmall.v4 () -> f32[4,5,1,1] {
%constant = f32[4,5,1,1]{3,2,1,0} constant({ { { {7} }, { {2} }, { {5} }, { {3} }, { {8} } }, { { {3} }, { {8} }, { {9} }, { {3} }, { {4} } }, { { {1} }, { {5} }, { {7} }, { {5} }, { {6} } }, { { {0} }, { {6} }, { {2} }, { {10} }, { {2} } } })
%constant.1 = f32[2,2,1,1]{3,2,1,0} constant({ { { {2} }, { {6} } }, { { {3} }, { {1} } } })
%constant.2 = f32[] constant(0)
ROOT %select-and-scatter = f32[4,5,1,1]{3,2,1,0} select-and-scatter(f32[4,5,1,1]{3,2,1,0} %constant, f32[2,2,1,1]{3,2,1,0} %constant.1, f32[] %constant.2), window={size=2x3x1x1 stride=2x2x1x1}, select=%ge_F32.v3, scatter=%add_F32.v3
})";
class SelectAndScatterExpanderTest : public HloTestBase {
protected:
void ClearInstructionLayout(HloModule* module, absl::string_view inst_name) {
HloInstruction* inst = FindInstruction(module, inst_name);
inst->mutable_shape()->clear_layout();
}
};
TEST_F(SelectAndScatterExpanderTest, ReplacesSelectAndScatter) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
RunAndFilecheckHloRewrite(kModuleStr, SelectAndScatterExpander(), R"(
CHECK-NOT: select-and-scatter
)");
}
TEST_F(SelectAndScatterExpanderTest, CreatesReduceAndScatter) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
RunAndFilecheckHloRewrite(kModuleStr, SelectAndScatterExpander(), R"(
CHECK: reduce
CHECK: scatter
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/select_and_scatter_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/select_and_scatter_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7652dcbf-c7f3-49b3-a1bf-6885ef98c58c | cpp | tensorflow/tensorflow | batchnorm_expander | third_party/xla/xla/service/batchnorm_expander.cc | third_party/xla/xla/service/batchnorm_expander_test.cc | #include "xla/service/batchnorm_expander.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using std::optional;
class BatchNormExpanderVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleBatchNormTraining(HloInstruction* batch_norm) override;
absl::Status HandleBatchNormInference(HloInstruction* batch_norm) override;
absl::Status HandleBatchNormGrad(HloInstruction* batch_norm) override;
static bool Run(HloComputation* computation, bool rewrite_training_op,
bool rewrite_inference_op, bool rewrite_grad_op);
~BatchNormExpanderVisitor() override = default;
private:
explicit BatchNormExpanderVisitor(HloComputation* computation,
bool rewrite_training_op,
bool rewrite_inference_op,
bool rewrite_grad_op)
: computation_(computation),
rewrite_training_op_(rewrite_training_op),
rewrite_inference_op_(rewrite_inference_op),
rewrite_grad_op_(rewrite_grad_op) {}
HloComputation* GetOrCreateScalarAddComputation(
PrimitiveType primitive_type) {
HloComputation::Builder b("scalar_add_computation");
Shape shape = ShapeUtil::MakeShape(primitive_type, {});
auto scalar_lhs = b.AddInstruction(
HloInstruction::CreateParameter(0, shape, "scalar_lhs"));
auto scalar_rhs = b.AddInstruction(
HloInstruction::CreateParameter(1, shape, "scalar_rhs"));
auto scalar_op = b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, scalar_lhs, scalar_rhs));
return computation_->parent()->AddEmbeddedComputation(b.Build(scalar_op));
}
std::unique_ptr<HloInstruction> Rsqrt(HloInstruction* operand) {
return HloInstruction::CreateUnary(operand->shape(), HloOpcode::kRsqrt,
operand);
}
std::unique_ptr<HloInstruction> Mean(
HloInstruction* element_count, HloInstruction* operand,
absl::FunctionRef<HloInstruction*(std::unique_ptr<HloInstruction>)>
add_instruction) {
auto broadcast = add_instruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeStaticShape(operand->shape()), element_count, {}));
return HloInstruction::CreateBinary(operand->shape(), HloOpcode::kDivide,
operand, broadcast);
}
std::unique_ptr<HloInstruction> DynamicElementCountPerFeature(
HloInstruction* operand, int64_t feature_index,
absl::FunctionRef<HloInstruction*(std::unique_ptr<HloInstruction>)>
add_instruction) {
auto elements_per_feature_s32 = add_instruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
for (int64_t i = 0; i < operand->shape().rank(); ++i) {
if (i == feature_index) {
continue;
}
auto dynamic_dimension_size =
add_instruction(HloInstruction::CreateGetDimensionSize(
ShapeUtil::MakeShape(S32, {}), operand, i));
elements_per_feature_s32 = add_instruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kMultiply,
dynamic_dimension_size, elements_per_feature_s32));
}
return HloInstruction::CreateConvert(
ShapeUtil::MakeShape(operand->shape().element_type(), {}),
elements_per_feature_s32);
}
HloComputation* computation_;
bool rewrite_training_op_;
bool rewrite_inference_op_;
bool rewrite_grad_op_;
};
}
bool BatchNormExpanderVisitor::Run(HloComputation* computation,
bool rewrite_training_op,
bool rewrite_inference_op,
bool rewrite_grad_op) {
BatchNormExpanderVisitor visitor(
computation,
rewrite_training_op,
rewrite_inference_op,
rewrite_grad_op);
TF_CHECK_OK(computation->Accept(&visitor));
return visitor.changed();
}
absl::Status BatchNormExpanderVisitor::HandleBatchNormTraining(
HloInstruction* batch_norm) {
if (!rewrite_training_op_) {
return absl::OkStatus();
}
std::vector<HloInstruction*> added_instructions;
auto add = [&](std::unique_ptr<HloInstruction> inst) {
HloInstruction* added_inst = computation_->AddInstruction(std::move(inst));
added_inst->set_metadata(batch_norm->metadata());
added_instructions.push_back(added_inst);
return added_inst;
};
auto add_binary = [&](const Shape& shape, const HloOpcode opcode,
HloInstruction* a, HloInstruction* b) {
return add(HloInstruction::CreateBinary(shape, opcode, a, b));
};
int64_t instruction_count_before = computation_->instruction_count();
HloInstruction* operand = batch_norm->mutable_operand(0);
const Shape operand_shape = operand->shape();
PrimitiveType ptype = operand_shape.element_type();
int64_t feature_index = batch_norm->feature_index();
HloInstruction* scale = batch_norm->mutable_operand(1);
HloInstruction* offset = batch_norm->mutable_operand(2);
const Shape feature_shape = scale->shape();
auto zero_literal = LiteralUtil::CreateR0(0.0f);
TF_ASSIGN_OR_RETURN(zero_literal, zero_literal.Convert(ptype));
auto zero = add(HloInstruction::CreateConstant(std::move(zero_literal)));
auto epsilon_literal = LiteralUtil::CreateR0(batch_norm->epsilon());
TF_ASSIGN_OR_RETURN(epsilon_literal, epsilon_literal.Convert(ptype));
Shape scalar_broadcast_shape = ShapeUtil::MakeStaticShape(operand_shape);
auto epsilon = add(HloInstruction::CreateBroadcast(
scalar_broadcast_shape,
add(HloInstruction::CreateConstant(std::move(epsilon_literal))), {}));
std::vector<int64_t> dimensions_without_feature;
const int64_t rank = operand_shape.rank();
dimensions_without_feature.reserve(rank - 1);
for (int64_t i = 0; i < rank; ++i) {
if (i != feature_index) {
dimensions_without_feature.push_back(i);
}
}
auto elements_per_feature =
add(DynamicElementCountPerFeature(operand, feature_index, add));
auto feature_broadcast = [&](HloInstruction* inst) -> HloInstruction* {
Shape feature_broadcast_shape = scalar_broadcast_shape;
feature_broadcast_shape.set_dynamic_dimension(
feature_index, inst->shape().is_dynamic_dimension(0));
return add(HloInstruction::CreateBroadcast(feature_broadcast_shape, inst,
{feature_index}));
};
auto scale_broadcasted = feature_broadcast(scale);
auto offset_broadcasted = feature_broadcast(offset);
HloComputation* add_reduce_computation =
GetOrCreateScalarAddComputation(ptype);
auto operand_squared =
add_binary(operand_shape, HloOpcode::kMultiply, operand, operand);
auto sum = add(HloInstruction::CreateReduce(feature_shape, operand, zero,
dimensions_without_feature,
add_reduce_computation));
auto squared_sum = add(HloInstruction::CreateReduce(
feature_shape, operand_squared, zero, dimensions_without_feature,
add_reduce_computation));
auto mean = add(Mean(elements_per_feature, sum, add));
auto mean_broadcasted = feature_broadcast(mean);
auto square_mean = add(Mean(elements_per_feature, squared_sum, add));
auto mean_square =
add_binary(feature_shape, HloOpcode::kMultiply, mean, mean);
auto var =
add_binary(feature_shape, HloOpcode::kSubtract, square_mean, mean_square);
auto var_broadcasted = feature_broadcast(var);
auto var_add_epsilon = add_binary(var_broadcasted->shape(), HloOpcode::kAdd,
var_broadcasted, epsilon);
auto rsqrt_var_add_epsilon = add(Rsqrt(var_add_epsilon));
auto operand_minus_mean = add_binary(operand_shape, HloOpcode::kSubtract,
operand, mean_broadcasted);
auto normalized = add_binary(operand_shape, HloOpcode::kMultiply,
operand_minus_mean, rsqrt_var_add_epsilon);
auto scaled_normalized = add_binary(operand_shape, HloOpcode::kMultiply,
normalized, scale_broadcasted);
auto shifted_normalized = add_binary(operand_shape, HloOpcode::kAdd,
scaled_normalized, offset_broadcasted);
auto tuple = HloInstruction::CreateTuple({shifted_normalized, mean, var});
if (batch_norm->has_sharding()) {
int64_t instruction_count_after = computation_->instruction_count();
CHECK_EQ(instruction_count_after,
instruction_count_before + added_instructions.size());
const HloSharding& sharding = batch_norm->sharding();
HloSharding operand_sharding =
sharding.GetAsShapeTree(batch_norm->shape()).element({0});
optional<int64_t> unique_device = batch_norm->sharding_unique_device();
HloSharding default_sharding =
unique_device.has_value()
? HloSharding::AssignDevice(unique_device.value())
: HloSharding::Replicate();
for (HloInstruction* inst : added_instructions) {
if (ShapeUtil::Equal(inst->shape(), operand_shape)) {
inst->set_sharding(operand_sharding);
} else {
inst->set_sharding(default_sharding);
}
}
tuple->set_sharding(sharding);
}
TF_CHECK_OK(ReplaceWithNewInstruction(batch_norm, std::move(tuple)));
return absl::OkStatus();
}
absl::Status BatchNormExpanderVisitor::HandleBatchNormInference(
HloInstruction* batch_norm) {
if (!rewrite_inference_op_) {
return absl::OkStatus();
}
HloInstruction* operand = batch_norm->mutable_operand(0);
const Shape operand_shape = operand->shape();
int64_t feature_index = batch_norm->feature_index();
PrimitiveType ptype = operand_shape.element_type();
HloInstruction* scale = batch_norm->mutable_operand(1);
HloInstruction* offset = batch_norm->mutable_operand(2);
HloInstruction* mean = batch_norm->mutable_operand(3);
HloInstruction* var = batch_norm->mutable_operand(4);
const Shape feature_shape = scale->shape();
Shape scalar_broadcast_shape = ShapeUtil::MakeStaticShape(feature_shape);
auto epsilon_literal = LiteralUtil::CreateR0(batch_norm->epsilon());
TF_ASSIGN_OR_RETURN(epsilon_literal, epsilon_literal.Convert(ptype));
auto epsilon = computation_->AddInstruction(HloInstruction::CreateBroadcast(
scalar_broadcast_shape,
computation_->AddInstruction(
HloInstruction::CreateConstant(std::move(epsilon_literal))),
{}));
std::vector<int64_t> dimensions_without_feature;
const int64_t rank = operand_shape.rank();
dimensions_without_feature.reserve(rank - 1);
for (int64_t i = 0; i < rank; ++i) {
if (i != feature_index) {
dimensions_without_feature.push_back(i);
}
}
std::vector<HloInstruction*> added_instructions;
auto add = [&](std::unique_ptr<HloInstruction> inst) {
HloInstruction* added_inst = computation_->AddInstruction(std::move(inst));
added_inst->set_metadata(batch_norm->metadata());
added_instructions.push_back(added_inst);
return added_inst;
};
auto add_binary = [&](const Shape& shape, const HloOpcode opcode,
HloInstruction* a, HloInstruction* b) {
return add(HloInstruction::CreateBinary(shape, opcode, a, b));
};
auto feature_broadcast = [&](HloInstruction* a) {
Shape broadcast_shape = ShapeUtil::MakeStaticShape(operand_shape);
broadcast_shape.set_dynamic_dimension(feature_index,
a->shape().is_dynamic_dimension(0));
return add(
HloInstruction::CreateBroadcast(broadcast_shape, a, {feature_index}));
};
int64_t instruction_count_before = computation_->instruction_count();
auto true_scale = add_binary(
feature_shape, HloOpcode::kMultiply, scale,
add(Rsqrt(add_binary(feature_shape, HloOpcode::kAdd, var, epsilon))));
auto true_shift = add_binary(
feature_shape, HloOpcode::kSubtract, offset,
add_binary(feature_shape, HloOpcode::kMultiply, mean, true_scale));
auto shifted_normalized =
add_binary(operand_shape, HloOpcode::kAdd,
add_binary(operand_shape, HloOpcode::kMultiply, operand,
feature_broadcast(true_scale)),
feature_broadcast(true_shift));
int64_t instruction_count_after = computation_->instruction_count();
CHECK_EQ(instruction_count_after,
instruction_count_before + added_instructions.size());
if (batch_norm->has_sharding()) {
const HloSharding& sharding = batch_norm->sharding();
optional<int64_t> unique_device = batch_norm->sharding_unique_device();
HloSharding default_sharding =
unique_device.has_value()
? HloSharding::AssignDevice(unique_device.value())
: HloSharding::Replicate();
for (HloInstruction* inst : added_instructions) {
if (ShapeUtil::Equal(inst->shape(), operand_shape)) {
inst->set_sharding(sharding);
} else {
inst->set_sharding(default_sharding);
}
}
shifted_normalized->set_sharding(sharding);
}
TF_CHECK_OK(ReplaceInstruction(batch_norm, shifted_normalized));
return absl::OkStatus();
}
absl::Status BatchNormExpanderVisitor::HandleBatchNormGrad(
HloInstruction* batch_norm) {
if (!rewrite_grad_op_) {
return absl::OkStatus();
}
std::vector<HloInstruction*> added_instructions;
auto add = [&](std::unique_ptr<HloInstruction> inst) {
HloInstruction* added_inst = computation_->AddInstruction(std::move(inst));
added_inst->set_metadata(batch_norm->metadata());
added_instructions.push_back(added_inst);
return added_inst;
};
auto add_binary = [&](const Shape& shape, const HloOpcode opcode,
HloInstruction* a, HloInstruction* b) {
return add(HloInstruction::CreateBinary(shape, opcode, a, b));
};
int64_t instruction_count_before = computation_->instruction_count();
HloInstruction* activation = batch_norm->mutable_operand(0);
const Shape activation_shape = activation->shape();
PrimitiveType ptype = activation_shape.element_type();
HloInstruction* scale = batch_norm->mutable_operand(1);
const Shape feature_shape = scale->shape();
HloInstruction* mean = batch_norm->mutable_operand(2);
HloInstruction* variance = batch_norm->mutable_operand(3);
HloInstruction* grad_output = batch_norm->mutable_operand(4);
int64_t feature_index = batch_norm->feature_index();
auto elements_per_feature =
add(DynamicElementCountPerFeature(activation, feature_index, add));
auto zero_literal = LiteralUtil::CreateR0(0.0f);
TF_ASSIGN_OR_RETURN(zero_literal, zero_literal.Convert(ptype));
auto zero = add(HloInstruction::CreateConstant(std::move(zero_literal)));
auto epsilon_literal = LiteralUtil::CreateR0(batch_norm->epsilon());
TF_ASSIGN_OR_RETURN(epsilon_literal, epsilon_literal.Convert(ptype));
auto epsilon_scalar =
add(HloInstruction::CreateConstant(std::move(epsilon_literal)));
auto epsilon_activation = add(HloInstruction::CreateBroadcast(
ShapeUtil::MakeStaticShape(activation_shape), epsilon_scalar, {}));
auto epsilon_feature = add(HloInstruction::CreateBroadcast(
ShapeUtil::MakeStaticShape(feature_shape), epsilon_scalar, {}));
std::vector<int64_t> dimensions_without_feature;
const int64_t rank = activation_shape.rank();
dimensions_without_feature.reserve(rank - 1);
for (int64_t i = 0; i < rank; ++i) {
if (i != feature_index) {
dimensions_without_feature.push_back(i);
}
}
auto activation_broadcast = [&](HloInstruction* hlo) -> HloInstruction* {
Shape broadcast_shape = ShapeUtil::MakeStaticShape(activation_shape);
broadcast_shape.set_dynamic_dimension(feature_index,
hlo->shape().is_dynamic_dimension(0));
return add(
HloInstruction::CreateBroadcast(broadcast_shape, hlo, {feature_index}));
};
auto scale_broadcasted = activation_broadcast(scale);
auto variance_broadcasted = activation_broadcast(variance);
auto mean_broadcasted = activation_broadcast(mean);
auto rsqrt_var_add_epsilon_broadcasted =
add(Rsqrt(add_binary(variance_broadcasted->shape(), HloOpcode::kAdd,
variance_broadcasted, epsilon_activation)));
auto rsqrt_var_add_epsilon = add(Rsqrt(
add_binary(feature_shape, HloOpcode::kAdd, variance, epsilon_feature)));
auto activation_minus_mean = add_binary(
activation_shape, HloOpcode::kSubtract, activation, mean_broadcasted);
auto grad_output_times_activation_minus_mean =
add_binary(activation_shape, HloOpcode::kMultiply, grad_output,
activation_minus_mean);
HloComputation* add_reduce_computation =
GetOrCreateScalarAddComputation(ptype);
auto sum_grad_output_times_activation_minus_mean =
add(HloInstruction::CreateReduce(
feature_shape, grad_output_times_activation_minus_mean, zero,
dimensions_without_feature, add_reduce_computation));
auto grad_beta = add(HloInstruction::CreateReduce(
feature_shape, grad_output, zero, dimensions_without_feature,
add_reduce_computation));
auto grad_scale = add_binary(feature_shape, HloOpcode::kMultiply,
sum_grad_output_times_activation_minus_mean,
rsqrt_var_add_epsilon);
auto i2 = activation_broadcast(grad_beta);
auto i3 = activation_broadcast(sum_grad_output_times_activation_minus_mean);
auto i4 = add_binary(activation_shape, HloOpcode::kMultiply, i3,
activation_minus_mean);
auto i5 =
add_binary(activation_shape, HloOpcode::kDivide, i4,
add_binary(variance_broadcasted->shape(), HloOpcode::kAdd,
variance_broadcasted, epsilon_activation));
Shape scale_times_rsqrt_var_add_epsilon_shape = scale_broadcasted->shape();
for (int64_t i = 0; i < rsqrt_var_add_epsilon_broadcasted->shape().rank();
++i) {
if (rsqrt_var_add_epsilon_broadcasted->shape().is_dynamic_dimension(i)) {
scale_times_rsqrt_var_add_epsilon_shape.set_dynamic_dimension(i, true);
}
}
auto scale_times_rsqrt_var_add_epsilon =
add_binary(scale_times_rsqrt_var_add_epsilon_shape, HloOpcode::kMultiply,
scale_broadcasted, rsqrt_var_add_epsilon_broadcasted);
scale_times_rsqrt_var_add_epsilon =
add(Mean(elements_per_feature, scale_times_rsqrt_var_add_epsilon, add));
auto i1 = add_binary(grad_output->shape(), HloOpcode::kMultiply, grad_output,
add(HloInstruction::CreateBroadcast(
ShapeUtil::MakeStaticShape(activation_shape),
elements_per_feature, {})));
auto i6 = add_binary(
activation_shape, HloOpcode::kSubtract,
add_binary(activation_shape, HloOpcode::kSubtract, i1, i2), i5);
auto grad_activation = add_binary(activation_shape, HloOpcode::kMultiply,
scale_times_rsqrt_var_add_epsilon, i6);
auto tuple =
HloInstruction::CreateTuple({grad_activation, grad_scale, grad_beta});
if (batch_norm->has_sharding()) {
const HloSharding& sharding = batch_norm->sharding();
int64_t instruction_count_after = computation_->instruction_count();
CHECK_EQ(instruction_count_after,
instruction_count_before + added_instructions.size());
HloSharding activation_sharding =
sharding.GetAsShapeTree(batch_norm->shape()).element({0});
auto unique_device = batch_norm->sharding_unique_device();
HloSharding default_sharding =
unique_device.has_value()
? HloSharding::AssignDevice(unique_device.value())
: HloSharding::Replicate();
for (HloInstruction* inst : added_instructions) {
if (ShapeUtil::Equal(inst->shape(), activation_shape)) {
inst->set_sharding(activation_sharding);
} else {
inst->set_sharding(default_sharding);
}
}
tuple->set_sharding(sharding);
}
TF_CHECK_OK(ReplaceWithNewInstruction(batch_norm, std::move(tuple)));
return absl::OkStatus();
}
absl::StatusOr<bool> BatchNormExpander::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(2, "BatchNormExpander::Run(), before:\n" + module->ToString());
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
if (BatchNormExpanderVisitor::Run(computation, rewrite_training_op_,
rewrite_inference_op_,
rewrite_grad_op_)) {
changed = true;
}
}
XLA_VLOG_LINES(2, "BatchNormExpander::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/batchnorm_expander.h"
#include <memory>
#include <utility>
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class BatchNormExpanderTest : public HloTestBase {
protected:
int64_t CountGetDimensionSize(const HloModule& module) {
int64_t count = 0;
for (HloComputation* comp : module.computations()) {
for (HloInstruction* inst : comp->instructions()) {
if (inst->opcode() == HloOpcode::kGetDimensionSize) {
count++;
}
}
}
return count;
}
};
TEST_F(BatchNormExpanderTest, BatchNormTraining) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 2, 2, 2});
Shape scale_shape = ShapeUtil::MakeShape(F32, {2});
Shape offset_shape = ShapeUtil::MakeShape(F32, {2});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "activation"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scale_shape, "scale"));
HloInstruction* param2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, offset_shape, "offset"));
builder.AddInstruction(HloInstruction::CreateBatchNormTraining(
ShapeUtil::MakeTupleShape({input_shape, scale_shape, offset_shape}),
param0, param1, param2,
0.001, 3));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kBatchNormTraining);
BatchNormExpander rewriter(true,
true,
true);
ASSERT_TRUE(rewriter.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(CountGetDimensionSize(*module), 3);
EXPECT_EQ(root->opcode(), HloOpcode::kTuple);
}
TEST_F(BatchNormExpanderTest, BatchNormGrad) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 2, 2, 2});
Shape scale_shape = ShapeUtil::MakeShape(F32, {2});
Shape mean_shape = ShapeUtil::MakeShape(F32, {2});
Shape var_shape = ShapeUtil::MakeShape(F32, {2});
Shape grad_output_shape = ShapeUtil::MakeShape(F32, {2, 2, 2, 2});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "activation"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scale_shape, "scale"));
HloInstruction* param2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, mean_shape, "mean"));
HloInstruction* param3 = builder.AddInstruction(
HloInstruction::CreateParameter(3, var_shape, "var"));
HloInstruction* param4 = builder.AddInstruction(
HloInstruction::CreateParameter(4, grad_output_shape, "grad_output"));
builder.AddInstruction(HloInstruction::CreateBatchNormGrad(
ShapeUtil::MakeTupleShape({input_shape, scale_shape, mean_shape}), param0,
param1, param2, param3, param4,
0.001, 3));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kBatchNormGrad);
BatchNormExpander rewriter(true,
true,
true);
ASSERT_TRUE(rewriter.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(CountGetDimensionSize(*module), 3);
EXPECT_EQ(root->opcode(), HloOpcode::kTuple);
}
TEST_F(BatchNormExpanderTest, BatchNormTrainingSharding) {
const char* module_str = R"(
HloModule module
ENTRY entry {
%param.0 = f32[8,4] parameter(0)
%param.1 = f32[4] parameter(1)
%param.2 = f32[4] parameter(2)
ROOT %batch-norm-training = (f32[8,4], f32[4], f32[4])
batch-norm-training(f32[8,4] %param.0, f32[4] %param.1, f32[4] %param.2),
epsilon=0.001, feature_index=1, sharding={{maximal device=1},{maximal device=1},{maximal device=1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
BatchNormExpander rewriter(true,
true,
true);
ASSERT_TRUE(rewriter.Run(m.get()).value());
for (auto* instruction : m->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kParameter) {
continue;
}
auto device = instruction->sharding_unique_device();
ASSERT_TRUE(device);
EXPECT_EQ(*device, 1);
}
}
TEST_F(BatchNormExpanderTest, Execution) {
const char* module_str = R"(
HloModule module
ENTRY entry {
%param.0 = f32[8,4] parameter(0)
%param.1 = f32[4] parameter(1)
%param.2 = f32[4] parameter(2)
ROOT %batch-norm-training = (f32[8,4], f32[4], f32[4])
batch-norm-training(f32[8,4] %param.0, f32[4] %param.1, f32[4] %param.2),
epsilon=0.001, feature_index=1, sharding={{maximal device=1},{maximal device=1},{maximal device=1}}
})";
EXPECT_TRUE(RunAndCompare(module_str, ErrorSpec{1e-4, 1e-4}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/batchnorm_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/batchnorm_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3d8edbda-9c2e-45e4-8660-fd15f9471059 | cpp | google/tensorstore | regular_grid | tensorstore/internal/regular_grid.h | tensorstore/internal/regular_grid_test.cc | #ifndef TENSORSTORE_INTERNAL_REGULAR_GRID_H_
#define TENSORSTORE_INTERNAL_REGULAR_GRID_H_
#include <cassert>
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_grid_partition {
struct RegularGridRef {
tensorstore::span<const Index> grid_cell_shape;
DimensionIndex rank() const { return grid_cell_shape.size(); }
IndexInterval GetCellOutputInterval(DimensionIndex dim,
Index cell_index) const {
assert(dim >= 0 && dim < rank());
return IndexInterval::UncheckedSized(cell_index * grid_cell_shape[dim],
grid_cell_shape[dim]);
}
Index operator()(DimensionIndex dim, Index output_index,
IndexInterval* cell_bounds) const {
assert(dim >= 0 && dim < rank());
Index cell_index = FloorOfRatio(output_index, grid_cell_shape[dim]);
if (cell_bounds) {
*cell_bounds = GetCellOutputInterval(dim, cell_index);
}
return cell_index;
}
};
}
}
#endif | #include "tensorstore/internal/regular_grid.h"
#include <array>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::internal_grid_partition::RegularGridRef;
using ::testing::Eq;
TEST(RegularGridTest, Basic) {
std::array<Index, 3> grid_cell_shape = {10, 20, 30};
RegularGridRef regular_grid{grid_cell_shape};
IndexInterval cell_bounds;
for (Index i = 0; i < 10; i++) {
EXPECT_THAT(regular_grid(0, i, &cell_bounds), Eq(0));
EXPECT_THAT(cell_bounds, Eq(IndexInterval::UncheckedSized(0, 10)));
EXPECT_THAT(regular_grid(1, i, &cell_bounds), Eq(0));
EXPECT_THAT(cell_bounds, Eq(IndexInterval::UncheckedSized(0, 20)));
EXPECT_THAT(regular_grid(2, i, &cell_bounds), Eq(0));
EXPECT_THAT(cell_bounds, Eq(IndexInterval::UncheckedSized(0, 30)));
}
for (DimensionIndex i = 0; i < 3; i++) {
Index j = (i + 1) * 10;
EXPECT_THAT(regular_grid(i, j - 1, &cell_bounds), Eq(0));
EXPECT_THAT(cell_bounds, Eq(IndexInterval::UncheckedSized(0, j)));
EXPECT_THAT(regular_grid(i, j, &cell_bounds), Eq(1));
EXPECT_THAT(cell_bounds, Eq(IndexInterval::UncheckedSized(j, j)));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/regular_grid.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/regular_grid_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
5befe34d-a742-4dd8-8eb2-5fbb362c43ee | cpp | tensorflow/tensorflow | gpu_fusible | third_party/xla/xla/service/gpu/gpu_fusible.cc | third_party/xla/xla/service/gpu/gpu_fusible_test.cc | #include "xla/service/gpu/gpu_fusible.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <stack>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/permutation_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
namespace {
bool HasAnyTiledTransposeRoot(const HloComputation& computation) {
return absl::c_any_of(GetFusionRoots(computation),
[&](const HloInstruction* instr) {
return GetDescriptionForTiledTransposeEmitter(
FindNonTrivialHero(*instr))
.has_value();
});
}
const Shape& GetElementShape(const HloFusionAnalysis& analysis) {
const Shape* shape = &analysis.fusion_root(0).shape();
while (shape->IsTuple()) {
shape = &shape->tuple_shapes(0);
}
return *shape;
}
int ComputeMaxUnrollFactor(int64_t num_elements) {
constexpr int kMaxUnrollFactor = 4;
for (int i = kMaxUnrollFactor; i > 1; i /= 2) {
if (num_elements % i == 0) {
return i;
}
}
return 1;
}
}
bool IfFusedReadsElementsMultipleTimes(const HloInstruction& instr) {
CHECK_NE(instr.opcode(), HloOpcode::kFusion) << "`instr` has to be unfused.";
if (instr.opcode() == HloOpcode::kGather ||
instr.opcode() == HloOpcode::kBroadcast) {
return ShapeUtil::ElementsIn(instr.shape()) >
ShapeUtil::ElementsIn(instr.operand(0)->shape());
}
if (instr.opcode() == HloOpcode::kReduceWindow) {
for (const auto& dim : instr.window().dimensions()) {
if (dim.size() > dim.stride()) {
return true;
}
}
}
return false;
}
bool IsExpensiveToRepeat(const HloInstruction& instr) {
CHECK_NE(instr.opcode(), HloOpcode::kFusion) << "`instr` has to be unfused.";
constexpr int kMaxInputsPerOutput = 10;
if (instr.opcode() == HloOpcode::kReduce &&
!IsReductionFromOrToContiguousDimensions(instr)) {
int64_t reduction_ratio = ShapeUtil::ElementsIn(instr.operand(0)->shape()) /
ShapeUtil::ElementsIn(instr.shape());
if (reduction_ratio > kMaxInputsPerOutput) return true;
}
if (instr.opcode() == HloOpcode::kReduceWindow) {
int64_t reduction_ratio = 1;
for (const auto& dim : instr.window().dimensions())
reduction_ratio *= dim.size();
if (reduction_ratio > kMaxInputsPerOutput) return true;
}
return false;
}
bool IsPhysicallyTransposing(const HloInstruction& instr) {
if (instr.opcode() == HloOpcode::kFusion) {
for (const HloInstruction* fused_instr : instr.fused_instructions()) {
if (IsPhysicallyTransposing(*fused_instr)) {
return true;
}
}
}
return instr.opcode() == HloOpcode::kCopy ||
(instr.opcode() == HloOpcode::kTranspose &&
!ShapeUtil::TransposeIsBitcast(instr.operand(0)->shape(),
instr.shape(), instr.dimensions()));
}
namespace {
std::pair<int64_t, int64_t> MostMinorNonTrivialDimension(const Shape& shape) {
int64_t position_of_first_non_trivial_dim = 0;
for (int64_t dim : shape.layout().minor_to_major()) {
if (shape.dimensions()[dim] > 1) {
return {dim, position_of_first_non_trivial_dim};
}
++position_of_first_non_trivial_dim;
}
return {-1, position_of_first_non_trivial_dim};
}
}
bool TransposesMinorDimension(const HloInstruction* instr) {
switch (instr->opcode()) {
case HloOpcode::kFusion:
return absl::c_any_of(instr->fused_instructions(),
TransposesMinorDimension);
case HloOpcode::kCopy: {
int64_t first_non_trivial_operand_dim =
MostMinorNonTrivialDimension(instr->operand(0)->shape()).first;
int64_t first_non_trivial_output_dim =
MostMinorNonTrivialDimension(instr->shape()).first;
return first_non_trivial_operand_dim != first_non_trivial_output_dim;
}
case HloOpcode::kTranspose: {
auto position_in_minor_to_major = InversePermutation(
instr->operand(0)->shape().layout().minor_to_major());
int64_t position_of_first_non_trivial_dim =
MostMinorNonTrivialDimension(instr->operand(0)->shape()).second;
for (int64_t output_dim : instr->shape().layout().minor_to_major()) {
if (instr->shape().dimensions()[output_dim] == 1) {
continue;
}
int64_t operand_dim = instr->dimensions().at(output_dim);
return position_in_minor_to_major[operand_dim] >
position_of_first_non_trivial_dim;
}
return false;
}
default:
return false;
}
}
bool IsReduceInputFusion(const HloInstruction& instr) {
return instr.opcode() == HloOpcode::kFusion &&
absl::c_any_of(GetFusionRoots(*instr.called_computations()[0]),
[](const HloInstruction* root) {
return IsRealReductionHero(*root,
FindNonTrivialHero(*root));
});
}
bool IsInputFusibleReduction(const HloInstruction& instr) {
return IsReduceInputFusion(instr) ||
IsReductionFromOrToContiguousDimensions(instr);
}
bool IsNestableVariadicReduction(const HloInstruction& instr) {
return instr.shape().IsTuple() &&
((instr.opcode() == HloOpcode::kReduce &&
!IsReductionFromOrToContiguousDimensions(instr)) ||
(instr.opcode() == HloOpcode::kFusion &&
instr.fusion_kind() == HloInstruction::FusionKind::kLoop &&
instr.fused_expression_root()->opcode() == HloOpcode::kReduce));
}
bool IsInputFusibleTranspose(const HloInstruction& instr) {
if (instr.opcode() == HloOpcode::kBitcast || instr.IsCustomFusion()) {
return false;
}
if (instr.opcode() == HloOpcode::kFusion) {
return HasAnyTiledTransposeRoot(*instr.fused_instructions_computation());
}
return GetDescriptionForTiledTransposeEmitter(instr).has_value();
}
const HloInstruction* GetRealHeroForMultiOutputFusion(
const HloInstruction& instr) {
if (instr.opcode() != HloOpcode::kFusion) {
return &instr;
}
auto fused_expression_root = instr.fused_expression_root();
if (!instr.IsMultiOutputFusion()) {
const auto& hero = FindNonTrivialHero(*fused_expression_root);
if (IsRealReductionHero(*fused_expression_root, hero) ||
GetDescriptionForTiledTransposeEmitter(hero).has_value()) {
return &hero;
}
return fused_expression_root;
}
for (auto* inst : fused_expression_root->mutable_operands()) {
const auto& hero = FindNonTrivialHero(*inst);
if (IsRealReductionHero(*inst, hero) ||
GetDescriptionForTiledTransposeEmitter(hero).has_value()) {
return &hero;
}
}
return fused_expression_root->operands()[0];
}
FusionDecision FusionHeroesAreCompatible(const HloInstruction* hero1,
const HloInstruction* hero2) {
auto hero1_is_unnested_reduce =
IsReductionFromOrToContiguousDimensions(*hero1);
auto tiled_transpose_hero1 = GetDescriptionForTiledTransposeEmitter(*hero1);
bool hero1_is_unnested_transpose = tiled_transpose_hero1.has_value();
bool hero2_is_unnested_reduce =
IsReductionFromOrToContiguousDimensions(*hero2);
auto tiled_transpose_hero2 = GetDescriptionForTiledTransposeEmitter(*hero2);
bool hero2_is_unnested_transpose = tiled_transpose_hero2.has_value();
if (hero1_is_unnested_reduce && hero2_is_unnested_reduce &&
!AreReductionsMultiOutputFusionCompatible(hero2, hero1)) {
return FusionDecision::Forbid("tiled reductions with different shapes");
} else if (hero1_is_unnested_transpose && hero2_is_unnested_transpose &&
!tiled_transpose_hero1->IsEquivalent(*tiled_transpose_hero2)) {
return FusionDecision::Forbid("tiled transposes with different shapes");
} else if ((hero1_is_unnested_transpose && hero2_is_unnested_reduce) ||
(hero1_is_unnested_reduce && hero2_is_unnested_transpose)) {
return FusionDecision::Forbid("MOF-fusion of a transpose and a reduction");
}
if (hero1_is_unnested_transpose || hero2_is_unnested_transpose) {
auto check_path_of_intermediate_ops = [](HloInstruction* param) {
if (param->user_count() != 1) {
return false;
}
HloInstruction* hlo = param->users()[0];
while (hlo->user_count() > 0) {
if (!IsIntermediate(hlo)) {
return false;
}
hlo = hlo->users()[0];
}
return true;
};
HloInstruction* fusion1 = hero1->parent()->FusionInstruction();
HloInstruction* fusion2 = hero2->parent()->FusionInstruction();
if (fusion1 != nullptr && fusion2 != nullptr) {
if (hero1_is_unnested_transpose && fusion2->IsUserOf(fusion1)) {
int64_t operand_idx = fusion2->operand_index(fusion1);
auto hlo = fusion2->fused_parameter(operand_idx);
if (!check_path_of_intermediate_ops(hlo)) {
return FusionDecision::Forbid("tiled transpose would become untiled");
}
} else if (hero2_is_unnested_transpose && fusion1->IsUserOf(fusion2)) {
int64_t operand_idx = fusion1->operand_index(fusion2);
auto hlo = fusion1->fused_parameter(operand_idx);
if (!check_path_of_intermediate_ops(hlo)) {
return FusionDecision::Forbid("tiled transpose would become untiled");
}
}
}
}
return FusionDecision::Allow();
}
FusionDecision ShapesCompatibleForMultiOutputFusion(
const HloInstruction& instr1, const HloInstruction& instr2) {
auto get_loop_shape = [&](const HloInstruction* element_instr) {
const auto& hero = element_instr->parent()->IsFusionComputation()
? FindNonTrivialHero(*element_instr)
: *element_instr;
if (IsReductionFromOrToContiguousDimensions(*element_instr) ||
GetDescriptionForTiledTransposeEmitter(hero).has_value()) {
return hero.operand(0)->shape();
}
return element_instr->shape();
};
const HloInstruction* hero1 = GetRealHeroForMultiOutputFusion(instr1);
const HloInstruction* hero2 = GetRealHeroForMultiOutputFusion(instr2);
if (auto compatible = FusionHeroesAreCompatible(hero1, hero2); !compatible) {
return compatible;
}
const Shape& l1 = get_loop_shape(hero1);
const Shape& l2 = get_loop_shape(hero2);
bool accept_unequal_shape = !l1.IsTuple() && !l2.IsTuple();
if (!ShapeUtil::EqualIgnoringElementType(l1, l2) &&
(!accept_unequal_shape ||
!ShapeUtil::IsReshapeOrTransposeBitcast(l1, l2,
true))) {
return FusionDecision::Forbid("different loop shapes");
}
return FusionDecision::Allow();
}
bool IsInputFusibleScatter(const HloInstruction& instr) {
if (instr.opcode() == HloOpcode::kScatter ||
(instr.opcode() == HloOpcode::kFusion &&
instr.fusion_kind() == HloInstruction::FusionKind::kInput &&
instr.fused_expression_root()->opcode() == HloOpcode::kScatter)) {
return true;
}
return false;
}
bool IsInputFusible(const HloInstruction& instr) {
return instr.IsFusible() &&
(IsInputFusibleReduction(instr) || IsInputFusibleScatter(instr) ||
IsInputFusibleTranspose(instr));
}
bool IsUniversallyLoopFusible(const HloInstruction& instr) {
if (instr.IsElementwise() && instr.operand_count() > 0 &&
instr.opcode() != HloOpcode::kCopy) {
return true;
}
switch (instr.opcode()) {
case HloOpcode::kCopy:
return !GetDescriptionForTiledTransposeEmitter(instr).has_value();
case HloOpcode::kFusion:
return instr.fusion_kind() == HloInstruction::FusionKind::kLoop;
case HloOpcode::kBitcast:
case HloOpcode::kBroadcast:
case HloOpcode::kConcatenate:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kGather:
case HloOpcode::kPad:
case HloOpcode::kReduceWindow:
case HloOpcode::kReshape:
case HloOpcode::kReverse:
case HloOpcode::kSlice:
case HloOpcode::kTranspose:
return true;
default:
return false;
}
}
bool IsLoopFusibleAsConsumer(const HloInstruction& instr) {
if (!instr.IsFusible()) return false;
if (instr.opcode() == HloOpcode::kBitcast) return false;
if (instr.opcode() == HloOpcode::kReduce) return true;
if (!IsInputFusible(instr) && instr.opcode() == HloOpcode::kFusion &&
instr.fusion_kind() == HloInstruction::FusionKind::kInput) {
return true;
}
return IsUniversallyLoopFusible(instr);
}
bool IsLoopFusibleAsProducer(const HloInstruction& instr) {
if (!instr.IsFusible()) return false;
switch (instr.opcode()) {
case HloOpcode::kIota:
case HloOpcode::kConstant:
return true;
case HloOpcode::kReduce:
return !instr.shape().IsTuple();
default:
return IsUniversallyLoopFusible(instr);
}
}
static bool AllSatisfy(const HloInstruction& instr,
const HloPredicate& predicate) {
if (instr.opcode() != HloOpcode::kFusion) {
return predicate(&instr);
}
return absl::c_all_of(
instr.fused_instructions(), [&](const HloInstruction* i) {
return i->opcode() == HloOpcode::kParameter || predicate(i);
});
}
FusionDecision CanEmitInputFusedScatter(const HloInstruction& producer,
const HloInstruction& consumer) {
if (IsInputFusibleScatter(producer)) {
return FusionDecision::Forbid("do not fuse into the output of scatter");
}
if (!IsInputFusibleScatter(consumer)) {
return FusionDecision::Allow();
}
const HloInstruction* inplace_operand;
if (consumer.opcode() == HloOpcode::kFusion) {
const HloInstruction* scatter = consumer.fused_expression_root();
CHECK_EQ(scatter->opcode(), HloOpcode::kScatter);
CHECK_EQ(scatter->operand(0)->opcode(), HloOpcode::kParameter);
inplace_operand = consumer.operand(scatter->operand(0)->parameter_number());
} else {
inplace_operand = consumer.operand(0);
}
if (inplace_operand == &producer) {
return FusionDecision::Forbid(
"do not fuse into the in-place operand of scatter");
}
if (absl::c_linear_search(producer.operands(), inplace_operand)) {
return FusionDecision::Forbid(
"Producer uses the in-place operand of a scatter");
}
return FusionDecision::Allow();
}
FusionDecision IsProducerConsumerFusible(const HloInstruction& producer,
const HloInstruction& consumer) {
if (!IsLoopFusibleAsProducer(producer) &&
!IsInputFusibleTranspose(producer)) {
return FusionDecision::Forbid("the producer is not loop-fusible");
}
if (IsInputFusibleReduction(producer)) {
if (!producer.GetModule()
->config()
.debug_options()
.xla_gpu_enable_reduction_epilogue_fusion()) {
return FusionDecision::Forbid(
"Reduction epilogue fusion is not enabled.");
}
const HloInstruction& reduce_hero =
producer.opcode() == HloOpcode::kFusion
? FindNonTrivialHero(*producer.fused_expression_root())
: producer;
if (!ReductionIsRaceFree(
reduce_hero.GetModule()->config(),
GetReductionKindAndContiguousComponents(reduce_hero))) {
return FusionDecision::Forbid(
"Reduction output fusion only works for race free reductions");
}
if (!AllSatisfy(consumer, [](const HloInstruction* hlo) {
return IsIntermediate(hlo, 1);
})) {
return FusionDecision::Forbid(
"Reductions from/to continuous dims epilogue not fusible");
}
if (producer.user_count() > 1) {
return FusionDecision::Forbid(
"reduction output fusion only works for single user");
}
}
if (auto can_fuse = CanEmitInputFusedScatter(producer, consumer); !can_fuse) {
return can_fuse;
}
if (!IsInputFusible(consumer) && !IsLoopFusibleAsConsumer(consumer)) {
return FusionDecision::Forbid(
"the consumer is not input-fusible and not loop-fusible");
}
if (producer.IsMultiOutputFusion()) {
return FusionDecision::Forbid(
"the producer is not fusible as it is a multi-output fusion");
}
if (producer.opcode() == HloOpcode::kConstant &&
(!ShapeUtil::IsEffectiveScalar(producer.shape()) ||
consumer.opcode() != HloOpcode::kFusion)) {
return FusionDecision::Forbid("not fusing constant");
}
return InstructionFusion::ShouldFuseInPlaceOp(&producer, &consumer);
}
FusionDecision IsProducerMultiOutputFusible(const HloInstruction& producer) {
if (producer.IsMultiOutputFusion()) {
return FusionDecision::Forbid("Producer is a multi-output fusion");
}
if (!HloDataflowAnalysis::GetInPlaceInputOutputPairs(&producer).empty()) {
return FusionDecision::Forbid("In-place operations are present");
}
if (!IsLoopFusibleAsProducer(producer)) {
return FusionDecision::Forbid("producer is not loop-fusible");
}
if (IsPhysicallyTransposing(producer)) {
return FusionDecision::Forbid("producer is physically transposing");
}
return FusionDecision::Allow();
}
static int64_t SharedMemoryUsageNoCache(const HloInstruction& instr) {
if (instr.opcode() == HloOpcode::kFusion) {
int64_t sum = 0;
for (const HloInstruction* hlo :
instr.fused_instructions_computation()->instructions()) {
sum += SharedMemoryUsageNoCache(*hlo);
}
return sum;
} else if (instr.opcode() == HloOpcode::kReduce &&
IsReductionFromOrToContiguousDimensions(instr)) {
ReductionDimensions reduction_info =
GetReductionKindAndContiguousComponents(instr);
int64_t primitive_size = ShapeUtil::ByteSizeOfPrimitiveType(
instr.operand(0)->shape().element_type());
int num_variadic =
instr.shape().IsTuple() ? instr.shape().tuple_shapes_size() : 1;
if (reduction_info.is_row_reduction) {
return 32 * primitive_size * num_variadic;
} else {
return 4 * 32 * 33 * primitive_size * num_variadic;
}
} else if (auto tr = GetDescriptionForTiledTransposeEmitter(instr)) {
int64_t primitive_size =
ShapeUtil::ByteSizeOfPrimitiveType(instr.shape().element_type());
int64_t bytes_required = 32 * 33 * primitive_size;
if (tr->permutation.back() == tr->permutation.size() - 1) {
bytes_required *= tr->dimensions.back();
}
return bytes_required;
}
return 0;
}
int64_t FusionInfoCache::GetSharedMemoryUsage(const HloInstruction& instr) {
{
absl::MutexLock lock(&mutex_);
auto it = shared_memory_usage_.find(&instr);
if (it != shared_memory_usage_.end()) {
return it->second;
}
}
int64_t shared_memory_usage = SharedMemoryUsageNoCache(instr);
absl::MutexLock lock(&mutex_);
shared_memory_usage_.emplace(&instr, shared_memory_usage);
return shared_memory_usage;
}
int64_t SharedMemoryUsage(const HloInstruction& instr, FusionInfoCache* cache) {
if (!cache) {
return SharedMemoryUsageNoCache(instr);
}
return cache->GetSharedMemoryUsage(instr);
}
constexpr int64_t kMaxUnnestedReductionOutputsPerFusion = 8;
static int64_t NumUnnestedReductionsNoCache(const HloInstruction& instr) {
if (instr.opcode() == HloOpcode::kReduce &&
IsReductionFromOrToContiguousDimensions(instr)) {
return 1;
}
if (instr.opcode() == HloOpcode::kFusion) {
int64_t sum = 0;
for (const HloInstruction* hlo :
instr.fused_instructions_computation()->instructions()) {
sum += NumUnnestedReductionsNoCache(*hlo);
}
return sum;
}
return 0;
}
int64_t FusionInfoCache::GetNumUnnestedReductions(const HloInstruction& instr) {
{
absl::MutexLock lock(&mutex_);
auto it = num_unnested_reductions_.find(&instr);
if (it != num_unnested_reductions_.end()) {
return it->second;
}
}
int64_t num_unnested_reductions = NumUnnestedReductionsNoCache(instr);
absl::MutexLock lock(&mutex_);
num_unnested_reductions_.emplace(&instr, num_unnested_reductions);
return num_unnested_reductions;
}
static int64_t NumUnnestedReductions(const HloInstruction& instr,
FusionInfoCache* cache) {
if (!cache) {
return NumUnnestedReductionsNoCache(instr);
}
return cache->GetNumUnnestedReductions(instr);
}
FusionDecision FusionFitsInBudget(const HloInstruction& instr1,
const HloInstruction& instr2,
const se::DeviceDescription& device_info,
bool is_consumer_producer_fusion,
FusionInfoCache* cache ) {
if (SharedMemoryUsage(instr1, cache) + SharedMemoryUsage(instr2, cache) >
device_info.shared_memory_per_block()) {
return FusionDecision::Forbid(
"shared memory usage would be over the budget of ")
<< device_info.shared_memory_per_block() << "B";
}
if (NumUnnestedReductions(instr1, cache) +
NumUnnestedReductions(instr2, cache) >
kMaxUnnestedReductionOutputsPerFusion) {
return FusionDecision::Forbid("over ")
<< kMaxUnnestedReductionOutputsPerFusion
<< " unnested reductions in fusion";
}
int64_t num_output_buffers = ShapeUtil::SubshapeCount(instr1.shape()) +
ShapeUtil::SubshapeCount(instr2.shape());
if (instr1.operand_count() + instr2.operand_count() - 1 +
num_output_buffers <=
MaxOperandsAndOutputsPerFusion()) {
return FusionDecision::Allow();
} else {
VLOG(5) << "Operand count of " << "(" << instr1.ToString()
<< " ) = " << instr1.operand_count() << " and ( "
<< instr2.ToString() << " ) = " << instr2.operand_count()
<< " and num_output_buffers = " << num_output_buffers
<< " is bigger than the bound of "
<< MaxOperandsAndOutputsPerFusion();
}
absl::flat_hash_set<const HloInstruction*> operands(instr1.operands().begin(),
instr1.operands().end());
operands.insert(instr2.operands().begin(), instr2.operands().end());
operands.erase(&instr1);
operands.erase(&instr2);
if (is_consumer_producer_fusion &&
operands.size() <= instr1.operands().size()) {
return FusionDecision::Allow();
}
if (operands.size() + num_output_buffers > MaxOperandsAndOutputsPerFusion()) {
return FusionDecision::Forbid(
"Number of operands and output buffers is larger than allowed budget "
"per fusion");
}
return FusionDecision::Allow();
}
bool CreatesHeavyComputation(const HloInstruction& producer,
const HloInstruction& consumer) {
auto producer_is_heavy = [&](const HloInstruction& instr) {
if (producer.opcode() != HloOpcode::kFusion) {
return IsExpensiveToRepeat(producer);
}
for (const auto& instr : producer.fused_instructions()) {
if (IsExpensiveToRepeat(*instr)) {
return true;
}
}
return false;
};
if (!producer_is_heavy(producer)) {
return false;
}
if (consumer.opcode() != HloOpcode::kFusion) {
return IfFusedReadsElementsMultipleTimes(consumer);
}
for (const HloInstruction* operand : consumer.operands()) {
if (operand != &producer) {
continue;
}
const HloInstruction* root =
consumer.fused_instructions_computation()->parameter_instruction(
consumer.operand_index(operand));
std::stack<const HloInstruction*> dfs;
dfs.push(root);
absl::flat_hash_set<const HloInstruction*> visited;
while (!dfs.empty()) {
const HloInstruction* cur = dfs.top();
dfs.pop();
if (!visited.insert(cur).second) {
continue;
}
if (IfFusedReadsElementsMultipleTimes(*cur)) {
return true;
}
for (const auto& user : cur->users()) {
if (visited.contains(user)) {
continue;
}
dfs.push(user);
}
}
}
return false;
}
bool IsFusibleAsMultiOutputFusionRoot(const HloInstruction& instr) {
return instr.IsFusible() && !instr.IsCustomFusion() &&
(IsInputFusibleReduction(instr) || IsInputFusibleTranspose(instr) ||
instr.IsLoopFusion() ||
instr.IsElementwise());
}
HloInstruction::FusionKind ChooseFusionKind(const HloInstruction& producer,
const HloInstruction& consumer) {
return (IsInputFusible(consumer) || IsInputFusible(producer))
? HloInstruction::FusionKind::kInput
: HloInstruction::FusionKind::kLoop;
}
bool IsConsumerTheOnlyNonRootUser(const HloInstruction& instr,
const HloInstruction& consumer) {
return absl::c_all_of(instr.users(), [&](const HloInstruction* user) {
if (user->opcode() == HloOpcode::kGetTupleElement) {
return IsConsumerTheOnlyNonRootUser(*user, consumer);
}
return user == &consumer || user == user->parent()->root_instruction();
});
}
size_t GetInstrCountOfFusible(const HloInstruction& instr) {
return instr.opcode() == HloOpcode::kFusion ? instr.fused_instruction_count()
: 1;
}
absl::InlinedVector<const HloInstruction*, 2> GetOutputsOfFusible(
const HloInstruction& instr) {
if (instr.opcode() != HloOpcode::kFusion) {
return {&instr};
}
HloInstruction* root = instr.fused_expression_root();
if (root->opcode() != HloOpcode::kTuple) {
return {root};
} else {
auto v = root->operands();
return absl::InlinedVector<const HloInstruction*, 2>(v.begin(), v.end());
}
}
size_t GetOutputSizeOfFusible(const HloInstruction& instr) {
if (!instr.IsMultiOutputFusion()) {
return 1;
}
const HloInstruction* root = instr.fused_expression_root();
return ShapeUtil::TupleElementCount(root->shape());
}
static void GetFusionRootsRec(const HloInstruction* root,
std::vector<const HloInstruction*>& out) {
if (root->opcode() == HloOpcode::kGetTupleElement &&
root->operand(0)->opcode() == HloOpcode::kTuple) {
return GetFusionRootsRec(root->operand(0)->operand(root->tuple_index()),
out);
} else if (root->opcode() == HloOpcode::kGetTupleElement) {
out.push_back(root->operand(0));
} else if (root->opcode() == HloOpcode::kTuple) {
for (int i = 0; i < root->operand_count(); i++) {
GetFusionRootsRec(root->operand(i), out);
}
} else {
out.push_back(root);
}
}
std::vector<const HloInstruction*> GetFusionRoots(
const HloComputation& computation) {
std::vector<const HloInstruction*> out;
GetFusionRootsRec(computation.root_instruction(), out);
return out;
}
bool IsGenericTritonFusion(const HloInstruction& instr) {
return instr.opcode() == HloOpcode::kFusion &&
instr.fusion_kind() == HloInstruction::FusionKind::kCustom &&
instr.backend_config<GpuBackendConfig>().ok() &&
instr.backend_config<GpuBackendConfig>()
->fusion_backend_config()
.kind() == kTritonFusionKind;
}
bool MayPreventVectorization(const HloFusionAdaptor& fusion) {
static constexpr int kMaxConcatArgumentsForUnrolling = 10;
return HloAnyOf(fusion, [&](auto node) {
switch (node.opcode()) {
case HloOpcode::kReduceWindow:
case HloOpcode::kSort:
case HloOpcode::kDot:
case HloOpcode::kSin:
case HloOpcode::kCos:
case HloOpcode::kTan:
case HloOpcode::kPower:
case HloOpcode::kAtan2:
return true;
case HloOpcode::kConcatenate:
return node.instruction().operand_count() >
kMaxConcatArgumentsForUnrolling;
case HloOpcode::kReduce:
return node.instruction().shape().tuple_shapes_size() > 1;
default:
return false;
}
});
}
std::vector<HloComputation*> GetFusibleComputations(
const HloModule& module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto result = module.MakeComputationPostOrder(execution_threads);
absl::flat_hash_set<const HloComputation*> computations_not_to_fuse;
for (const auto* computation : result) {
for (const auto* instr : computation->instructions()) {
if (HloInstruction::MightHaveCalledComputations(instr->opcode()) &&
instr->opcode() != HloOpcode::kWhile &&
instr->opcode() != HloOpcode::kConditional &&
instr->opcode() != HloOpcode::kFusion) {
for (auto* called : instr->called_computations()) {
computations_not_to_fuse.insert(called);
}
}
}
}
result.erase(
std::remove_if(result.begin(), result.end(),
[&](HloComputation* computation) {
return computation->IsFusionComputation() ||
computations_not_to_fuse.contains(computation);
}),
result.end());
return result;
}
LaunchDimensionsConfig ComputeLoopFusionConfig(
const HloFusionAnalysis& analysis) {
return ComputeLoopFusionConfig(analysis, GetElementShape(analysis));
}
LaunchDimensionsConfig ComputeLoopFusionConfig(
const HloFusionAnalysis& analysis, const Shape& element_shape) {
int unroll_factor = 1;
int64_t num_elements = ShapeUtil::ElementsIn(element_shape);
int64_t n_threads_max = analysis.device_info().threads_per_core_limit() *
analysis.device_info().core_count();
if (num_elements >= n_threads_max &&
!MayPreventVectorization(analysis.fusion())) {
unroll_factor = ComputeMaxUnrollFactor(num_elements);
}
CHECK(absl::has_single_bit(static_cast<uint64_t>(unroll_factor)));
unroll_factor = std::max(
unroll_factor,
CeilOfRatio(8, analysis.input_output_info().smallest_output_dtype_bits));
CHECK(absl::has_single_bit(static_cast<uint64_t>(unroll_factor)));
VLOG(2) << "Unroll factor: " << unroll_factor;
LaunchDimensionsConfig launch_config{unroll_factor};
return launch_config;
}
}
} | #include "xla/service/gpu/gpu_fusible.h"
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
using ::testing::ElementsAre;
using GpuFusibleTest = HloTestBase;
const char kModulePrefix[] = R"(
HloModule test_module
scalar_add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
})";
TEST_F(GpuFusibleTest, IsPhysicallyTransposing_ElementwiseProducer) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
p0 = f32[2,2,2]{2,1,0} parameter(0)
c0 = f32[] constant(0)
exp = f32[2,2,2]{2,1,0} exponential(p0)
ROOT reduce = f32[2,2]{1,0} reduce(exp, c0), dimensions={2}, to_apply=scalar_add
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* exp =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_EQ(exp->opcode(), HloOpcode::kExp);
EXPECT_FALSE(IsPhysicallyTransposing(*exp));
}
TEST_F(GpuFusibleTest, IsPhysicallyTransposing_MixedLayoutProducer) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
mixed_input_layouts_computation {
p0.1 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
p1.1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)
copy = f16[128,1024,32,32]{1,3,2,0} copy(p1.1)
c0 = f16[] constant(0)
broadcast = f16[128,1024,32,32]{1,3,2,0} broadcast(c0), dimensions={}
greater-than = pred[128,1024,32,32]{1,3,2,0} compare(copy, broadcast), direction=GT
ROOT root = f16[128,1024,32,32]{1,3,2,0} select(greater-than, p0.1, broadcast)
}
fused_reduce {
p0.2 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
convert = f32[128,1024,32,32]{1,3,2,0} convert(p0.2)
c0.2 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
p1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)
loop_fusion = f16[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=mixed_input_layouts_computation
reduce_fusion = f32[1024]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce
ROOT root = (f32[1024]{0}, f16[128,1024,32,32]{1,3,2,0}) tuple(reduce_fusion, loop_fusion)
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* loop_fusion =
module->entry_computation()->root_instruction()->operand(1);
ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kSelect);
EXPECT_TRUE(IsPhysicallyTransposing(*loop_fusion));
}
TEST_F(GpuFusibleTest,
IsPhysicallyTransposing_MixedLayoutProducerWithTrivialDim) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
mixed_input_layouts_computation {
p0.1 = f16[128,1,32,32]{1,3,2,0} parameter(0)
p1.1 = f16[128,1,32,32]{3,2,1,0} parameter(1)
bitcast = f16[128,1,32,32]{1,3,2,0} bitcast(p1.1)
c0 = f16[] constant(0)
broadcast = f16[128,1,32,32]{1,3,2,0} broadcast(c0), dimensions={}
greater-than = pred[128,1,32,32]{1,3,2,0} compare(bitcast, broadcast), direction=GT
ROOT root = f16[128,1,32,32]{1,3,2,0} select(greater-than, p0.1, broadcast)
}
fused_reduce {
p0.2 = f16[128,1,32,32]{1,3,2,0} parameter(0)
convert = f32[128,1,32,32]{1,3,2,0} convert(p0.2)
c0.2 = f32[] constant(0)
ROOT reduce = f32[1]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f16[128,1,32,32]{1,3,2,0} parameter(0)
p1 = f16[128,1,32,32]{3,2,1,0} parameter(1)
loop_fusion = f16[128,1,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=mixed_input_layouts_computation
reduce_fusion = f32[1]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce
ROOT root = (f32[1]{0}, f16[128,1,32,32]{1,3,2,0}) tuple(reduce_fusion, loop_fusion)
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* loop_fusion =
module->entry_computation()->root_instruction()->operand(1);
ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kSelect);
EXPECT_FALSE(IsPhysicallyTransposing(*loop_fusion));
}
TEST_F(GpuFusibleTest, IsPhysicallyTransposing_CopyProducer) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduce {
p0.1 = f32[128,1024,32,32]{1,3,2,0} parameter(0)
c0.1 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(p0.1, c0.1), dimensions={0,2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f16[128,1024,32,32]{3,2,1,0} parameter(0)
copy = f32[128,1024,32,32]{1,3,2,0} copy(p0)
ROOT reduce_fusion = f32[1024]{0} fusion(copy), kind=kInput, calls=fused_reduce
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* copy =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_EQ(copy->opcode(), HloOpcode::kCopy);
EXPECT_TRUE(IsPhysicallyTransposing(*copy));
}
TEST_F(GpuFusibleTest, IsPhysicallyTransposing_PhysicalTranspose) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduce {
p0.1 = f32[1024,128,32,32]{3,2,1,0} parameter(0)
c0.1 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(p0.1, c0.1), dimensions={1,2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f16[128,1024,32,32]{3,2,1,0} parameter(0)
copy = f32[1024,128,32,32]{3,2,1,0} transpose(p0), dimensions={1,0,2,3}
ROOT reduce_fusion = f32[1024]{0} fusion(copy), kind=kInput, calls=fused_reduce
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* transpose =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_EQ(transpose->opcode(), HloOpcode::kTranspose);
EXPECT_TRUE(IsPhysicallyTransposing(*transpose));
}
TEST_F(GpuFusibleTest, IsPhysicallyTransposing_LayoutChangingFusionProducer) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
layout_changing_computation {
p0.1 = f16[128,1024,32,32]{3,2,1,0} parameter(0)
p1.1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)
c0 = f16[] constant(0)
broadcast = f16[128,1024,32,32]{3,2,1,0} broadcast(c0), dimensions={}
greater-than = pred[128,1024,32,32]{3,2,1,0} compare(p1.1, broadcast), direction=GT
select = f16[128,1024,32,32]{3,2,1,0} select(greater-than, p0.1, broadcast)
ROOT root = f16[128,1024,32,32]{1,3,2,0} copy(select)
}
fused_reduce {
p0.2 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
convert = f32[128,1024,32,32]{1,3,2,0} convert(p0.2)
c0.2 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f16[128,1024,32,32]{3,2,1,0} parameter(0)
p1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)
loop_fusion = f16[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=layout_changing_computation
ROOT reduce_fusion = f32[1024]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* loop_fusion =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kCopy);
EXPECT_TRUE(IsPhysicallyTransposing(*loop_fusion));
}
TEST_F(GpuFusibleTest,
IsPhysicallyTransposing_ConsiderMaximumTrueRanksParamsOnly) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
broadcasting_computation {
p0.1 = f32[128,1024,32,32]{1,3,2,0} parameter(0)
p1.1 = f32[1,128,1,1]{3,2,1,0} parameter(1)
reshape = f32[128]{0} reshape(p1.1)
broadcast = f32[128,1024,32,32]{1,3,2,0} broadcast(reshape), dimensions={0}
ROOT add = f32[128,1024,32,32]{1,3,2,0} add(p0.1, broadcast)
}
ENTRY entry {
p0 = f32[128,1024,32,32]{1,3,2,0} parameter(0)
p1 = f32[1,128,1,1]{3,2,1,0} parameter(1)
loop_fusion = f32[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=broadcasting_computation
c0.2 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(loop_fusion, c0.2), dimensions={0,2,3}, to_apply=scalar_add
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* loop_fusion =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kAdd);
EXPECT_FALSE(IsPhysicallyTransposing(*loop_fusion));
}
TEST_F(GpuFusibleTest, TransposesMinorDimension) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
default_layout = f32[10,20,30,40]{3,2,1,0} parameter(0)
non_default_layout = f32[10,20,30,40]{1,2,3,0} parameter(1)
transpose_minor_default = f32[10,20,40,30]{3,2,1,0} transpose(default_layout), dimensions={0,1,3,2}
no_transpose_minor_default = f32[10,20,40,30]{2,3,1,0} transpose(default_layout), dimensions={0,1,3,2}
transpose_major_default = f32[10,30,20,40]{3,2,1,0} transpose(default_layout), dimensions={0,2,1,3}
transpose_minor_non_default = f32[10,30,20,40]{1,2,3,0} transpose(non_default_layout), dimensions={0,2,1,3}
no_transpose_minor_non_default = f32[10,20,40,30]{1,2,0,3} transpose(non_default_layout), dimensions={0,1,3,2}
transpose_major_non_default = f32[10,20,40,30]{1,2,3,0} transpose(non_default_layout), dimensions={0,1,3,2}
ROOT r = tuple(transpose_minor_default, no_transpose_minor_default, transpose_major_default,
transpose_minor_non_default, no_transpose_minor_non_default, transpose_major_non_default)
})"));
auto* tuple = (*module)->entry_computation()->root_instruction();
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(0)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(2)));
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(3)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(4)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(5)));
}
TEST_F(GpuFusibleTest, TransposesMinorDimensionSkipTrivialDimensions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
default_layout = f32[10,20,1,1]{3,2,1,0} parameter(0)
non_default_layout = f32[10,20,1,1]{1,2,3,0} parameter(1)
transpose_minor_default = f32[10,20,1,1]{3,2,1,0} transpose(default_layout), dimensions={0,1,3,2}
transpose_nontrivial_minor_default = f32[10,1,20,1]{3,2,1,0} transpose(default_layout), dimensions={0,2,1,3}
no_transpose_minor_default = f32[10,20,1,1]{2,3,1,0} transpose(default_layout), dimensions={0,1,3,2}
transpose_one_major_default = f32[1,20,10,1]{3,2,1,0} transpose(default_layout), dimensions={2,1,0,3}
transpose_two_major_default = f32[20,10,1,1]{3,2,1,0} transpose(default_layout), dimensions={1,0,2,3}
transpose_minor_non_default = f32[10,1,20,1]{1,2,3,0} transpose(non_default_layout), dimensions={0,2,1,3}
no_transpose_minor_non_default = f32[10,20,1,1]{1,2,0,3} transpose(non_default_layout), dimensions={0,1,3,2}
transpose_major_non_default = f32[10,20,1,1]{1,2,3,0} transpose(non_default_layout), dimensions={0,1,3,2}
ROOT r = tuple(transpose_minor_default, transpose_nontrivial_minor_default, no_transpose_minor_default, transpose_one_major_default, transpose_two_major_default,
transpose_minor_non_default, no_transpose_minor_non_default, transpose_major_non_default)
})"));
auto* tuple = (*module)->entry_computation()->root_instruction();
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(0)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(2)));
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(3)));
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(4)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(5)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(6)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(7)));
}
TEST_F(GpuFusibleTest, CopyTransposesMinorDimension) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
default_layout = f32[10,20,30,40]{3,2,1,0} parameter(0)
non_default_layout = f32[10,20,30,40]{1,2,3,0} parameter(1)
copy_transpose_minor_default = f32[10,20,30,40]{2,3,1,0} copy(default_layout)
copy_no_transpose_minor_default = f32[10,20,30,40]{3,2,1,0} copy(default_layout)
copy_transpose_minor_non_default = f32[10,20,30,40]{2,1,3,0} copy(non_default_layout)
copy_no_transpose_minor_non_default = f32[10,20,30,40]{1,2,3,0} copy(non_default_layout)
ROOT r = tuple(copy_transpose_minor_default, copy_no_transpose_minor_default,
copy_transpose_minor_non_default, copy_no_transpose_minor_non_default)
})"));
auto* tuple = (*module)->entry_computation()->root_instruction();
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(0)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1)));
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(2)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(3)));
}
TEST_F(GpuFusibleTest, CopyTransposesMinorDimensionSkipTrivialDimensions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
default_layout = f32[10,20,1,1]{3,2,1,0} parameter(0)
non_default_layout = f32[10,20,1,1]{1,2,3,0} parameter(1)
copy_transpose_minor_default = f32[10,20,1,1]{2,3,1,0} copy(default_layout)
copy_no_transpose_minor_default = f32[10,20,1,1]{3,2,1,0} copy(default_layout)
copy_transpose_minor_non_default = f32[10,20,1,1]{2,0,3,1} copy(non_default_layout)
copy_no_transpose_minor_non_default = f32[10,20,1,1]{1,2,3,0} copy(non_default_layout)
ROOT r = tuple(copy_transpose_minor_default, copy_no_transpose_minor_default,
copy_transpose_minor_non_default, copy_no_transpose_minor_non_default)
})"));
auto* tuple = (*module)->entry_computation()->root_instruction();
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(0)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1)));
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(2)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(3)));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_ReductionToVector) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
c0 = f32[] parameter(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
ROOT reduce = f32[512]{0} reduce(p1, c0), dimensions={0,2,3}, to_apply=scalar_add
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_FALSE(IsReduceInputFusion(*reduce));
EXPECT_TRUE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_ElementalReduction) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
c0 = f32[] parameter(0)
p1 = f32[8,512,5,16,1,1]{5,4,3,2,1,0} parameter(1)
ROOT reduce = f32[512,5,1,1]{3,2,1,0} reduce(p1, c0), dimensions={3,0},
to_apply=scalar_add
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_FALSE(IsReduceInputFusion(*reduce));
EXPECT_FALSE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_SingleOutputInputReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT reduce = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT fusion = f32[128,512]{1,0} fusion(p0), kind=kInput, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_TRUE(IsReduceInputFusion(*reduce));
EXPECT_TRUE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_SingleOutputLoopReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[8,512,5,16,1,1]{5,4,3,2,1,0} parameter(0)
ROOT reduce = f32[8,5,1,1]{3,2,1,0} reduce(p1, c0), dimensions={1,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f32[8,512,5,16,1,1]{5,4,3,2,1,0} parameter(0)
ROOT fusion = f32[8,5,1,1]{3,2,1,0} fusion(p0), kind=kLoop, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_FALSE(IsReduceInputFusion(*reduce));
EXPECT_FALSE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_MultiOutputInputReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
reduce.0 = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add
reduce.1 = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add
ROOT root = (f32[128,512]{1,0}, f32[128,512]{1,0}) tuple(reduce.0, reduce.1)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT fusion = (f32[128,512]{1,0}, f32[128,512]{1,0}) fusion(p0), kind=kInput, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_TRUE(IsReduceInputFusion(*reduce));
EXPECT_TRUE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest,
IsReduceInputFusion_MultiOutputInputReduceFusionWithExtraOutputs) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
reduce = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1, p1)
ROOT root = (f32[128,512]{1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(reduce, mul)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT fusion = (f32[128,512]{1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kInput, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_TRUE(IsReduceInputFusion(*reduce));
EXPECT_TRUE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_MultiOutputLoopReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
reduce.0 = f32[512,28]{1,0} reduce(p1, c0), dimensions={0,2}, to_apply=scalar_add
reduce.1 = f32[512,28]{1,0} reduce(p1, c0), dimensions={0,2}, to_apply=scalar_add
ROOT root = (f32[512,28]{1,0}, f32[512,28]{1,0}) tuple(reduce.0, reduce.1)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT fusion = (f32[512,28]{1,0}, f32[512,28]{1,0}) fusion(p0), kind=kLoop, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_FALSE(IsReduceInputFusion(*reduce));
EXPECT_FALSE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest,
IsReduceInputFusion_MultiOutputLoopFusionReduceAndElementwiseOp) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
reduce = f32[512,28]{1,0} reduce(p1, c0), dimensions={0,2}, to_apply=scalar_add
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1, p1)
ROOT root = (f32[512,28]{1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(reduce, mul)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT fusion = (f32[512,28]{1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kLoop, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_FALSE(IsReduceInputFusion(*reduce));
EXPECT_FALSE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, CustomFusionIsNotFusibleAsConsumer) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_fusion {
p = s32[20,3] parameter(0)
ROOT neg = s32[20,3] negate(p)
}
ENTRY e {
p = s32[20,3] parameter(0)
ROOT r = s32[20,3] fusion(p), kind=kCustom, calls=triton_fusion
})"));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_FALSE(IsFusibleAsMultiOutputFusionRoot(*root));
}
TEST_F(GpuFusibleTest, FusionHeroesAreCompatible_TransposeFusionCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[64,32]{1,0} parameter(0)
neg = f32[64,32]{1,0} negate(p0.1)
ROOT transpose = f32[32,64]{1,0} transpose(neg), dimensions={1,0}
}
fused_computation_2 {
p0.2 = f32[32,64]{1,0} parameter(0)
neg = f32[32,64]{1,0} negate(p0.2)
ROOT add = f32[32,64]{1,0} add(neg, neg)
}
ENTRY entry {
p0 = f32[64,32]{1,0} parameter(0)
fusion.1 = f32[32,64]{1,0} fusion(p0), kind=kLoop, calls=fused_computation_1
ROOT fusion.2 = f32[32,64]{1,0} fusion(fusion.1), kind=kLoop, calls=fused_computation_2
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction();
const HloInstruction* fusion_2 = fusion_1->operand(0);
EXPECT_TRUE(FusionHeroesAreCompatible(fusion_1->fused_expression_root(),
fusion_2->fused_expression_root()));
EXPECT_TRUE(FusionHeroesAreCompatible(fusion_2->fused_expression_root(),
fusion_1->fused_expression_root()));
}
TEST_F(GpuFusibleTest, FusionHeroesAreCompatible_TransposeFusionNotCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[64,32]{1,0} parameter(0)
neg = f32[64,32]{1,0} negate(p0.1)
bc = f32[1,64,32]{2,1,0} bitcast(neg)
transpose = f32[1,32,64]{2,1,0} transpose(bc), dimensions={0,2,1}
ROOT bc2 = f32[32,64]{1,0} bitcast(transpose)
}
fused_computation_2 {
p0.2 = f32[32,64]{1,0} parameter(0)
broadcast = f32[32,64,4]{2,1,0} broadcast(p0.2), dimensions={0,1}
ROOT add = f32[32,64,4]{2,1,0} add(broadcast, broadcast)
}
ENTRY entry {
p0 = f32[64,32]{1,0} parameter(0)
fusion.1 = f32[32,64]{1,0} fusion(p0), kind=kLoop, calls=fused_computation_1
ROOT fusion.2 = f32[32,64,4]{2,1,0} fusion(fusion.1), kind=kLoop, calls=fused_computation_2
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction();
const HloInstruction* fusion_2 = fusion_1->operand(0);
EXPECT_FALSE(
FusionHeroesAreCompatible(fusion_1->fused_expression_root(),
fusion_2->fused_expression_root()->operand(0)));
EXPECT_FALSE(
FusionHeroesAreCompatible(fusion_2->fused_expression_root()->operand(0),
fusion_1->fused_expression_root()));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_LoopFusions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
fused_computation_2 {
p0.2 = f32[6400]{0} parameter(0)
const.2 = f32[] constant(1)
broadcast = f32[6400]{0} broadcast(const.2), dimensions={}
ROOT div = f32[6400]{0} divide(p0.2, broadcast)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_2
ROOT root = (f32[6400]{0}, f32[6400]{0}) tuple(fusion.1, fusion.2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_IgnoreFpPrecision) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
fused_computation_2 {
p0.2 = f32[6400]{0} parameter(0)
ROOT convert = f16[6400]{0} convert(p0.2)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f16[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_2
ROOT root = (f32[6400]{0}, f16[6400]{0}) tuple(fusion.1, fusion.2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_BitcastCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
fused_computation_2 {
p0.2 = f32[6400]{0} parameter(0)
bitcast = f32[1,6400]{1,0} bitcast(p0.2)
ROOT convert = f16[1,6400]{1,0} convert(bitcast)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f16[1,6400]{1,0} fusion(p0), kind=kLoop, calls=fused_computation_2
ROOT root = (f32[6400]{0}, f16[1,6400]{1,0}) tuple(fusion.1, fusion.2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_Reduce) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
const.2 = f32[] constant(0)
reduce = f32[] reduce(p0, const.2), dimensions={0}, to_apply=scalar_add
ROOT root = (f32[6400]{0}, f32[]) tuple(fusion.1, reduce)
})"))
.value();
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* reduce =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion, *reduce));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_Elementwise) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
const.2 = f32[] constant(1)
broadcast = f32[6400]{0} broadcast(const.2), dimensions={}
div = f32[6400]{0} divide(p0, broadcast)
ROOT root = (f32[6400]{0}, f32[6400]{0}) tuple(fusion.1, div)
})"))
.value();
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* div =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion, *div));
}
TEST_F(GpuFusibleTest,
ShapesCompatibleForMultiOutputFusion_MultiOutputLoopFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
mul = f32[8,1,5,16,1,1]{5,4,3,2,1,0} multiply(p0.1, p0.1)
exp = f32[8,1,5,16,1,1]{5,4,3,2,1,0} exponential(p0.1)
ROOT tuple = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
const.2 = f32[] constant(0)
broadcast = f32[8,1,5,16,1,1]{5,4,3,2,1,0} broadcast(const.2), dimensions={}
ROOT add = f32[8,1,5,16,1,1]{5,4,3,2,1,0} add(p0.2, broadcast)
}
ENTRY entry {
p0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
fusion.1 = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} fusion(p0), kind=kLoop, calls=fused_computation_2
gte0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=1
ROOT root = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(gte0, gte1, fusion.2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(2);
EXPECT_NE(fusion_1, fusion_2);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest,
ShapesCompatibleForMultiOutputFusion_DifferentElementType) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
mul = f32[8,1,5,16,1,1]{5,4,3,2,1,0} multiply(p0.1, p0.1)
exp = f32[8,1,5,16,1,1]{5,4,3,2,1,0} exponential(p0.1)
ROOT tuple = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
const.2 = f32[] constant(0)
broadcast = f32[8,1,5,16,1,1]{5,4,3,2,1,0} broadcast(const.2), dimensions={}
add = f32[8,1,5,16,1,1]{5,4,3,2,1,0} add(p0.2, broadcast)
ROOT convert = s32[8,1,5,16,1,1]{5,4,3,2,1,0} convert(add)
}
ENTRY entry {
p0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
fusion.1 = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = s32[8,1,5,16,1,1]{5,4,3,2,1,0} fusion(p0), kind=kLoop, calls=fused_computation_2
gte0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=1
ROOT root = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}, s32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(gte0, gte1, fusion.2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(2);
EXPECT_NE(fusion_1, fusion_2);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_UnfusedOps) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
c0 = f32[] constant(0)
exp = f32[32,32,32]{2,1,0} exponential(p0)
reduce = f32[32,32]{1,0} reduce(exp, c0), dimensions={2},
to_apply=scalar_add
ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, exp)
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* exp =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*reduce, *exp));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_DifferentLayouts) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY reduce {
p0 = f32[2,2,2]{2,1,0} parameter(0)
p1 = f32[2,2,2]{0,1,2} parameter(1)
c0 = f32[] constant(0)
exp = f32[2,2,2]{2,1,0} exponential(p0)
reduce = f32[2,2]{0,1} reduce(p1, c0), dimensions={2}, to_apply=scalar_add
ROOT root = (f32[2,2]{0,1}, f32[2,2,2]{2,1,0}) tuple(reduce, exp)
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* exp =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*reduce, *exp));
}
TEST_F(
GpuFusibleTest,
ShapesCompatibleForMultiOutputFusion_SiblingTransposeFusionsNotCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_021_transpose {
param_0 = f32[20,20,20]{2,1,0} parameter(0)
transpose = f32[20,20,20]{2,1,0} transpose(param_0), dimensions={0,2,1}
ROOT bitcast = f32[8000]{0} bitcast(transpose)
}
fused_220_transpose {
param_0 = f32[20,20,20]{2,1,0} parameter(0)
transpose = f32[20,20,20]{2,1,0} transpose(param_0), dimensions={2,1,0}
ROOT bitcast = f32[8000]{0} bitcast(transpose)
}
ENTRY reduce {
p0 = f32[20,20,20]{2,1,0} parameter(0)
fusion = f32[8000]{0} fusion(p0), kind=kInput, calls=fused_021_transpose
fusion.1 = f32[8000]{0} fusion(p0), kind=kInput, calls=fused_220_transpose
ROOT root = (f32[8000]{0}, f32[8000]{0}) tuple(fusion, fusion.1)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_FALSE(
FusionHeroesAreCompatible(fusion_1->fused_expression_root()->operand(0),
fusion_2->fused_expression_root()->operand(0)));
EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest,
ShapesCompatibleForMultiOutputFusion_SiblingTransposeFusionsCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_1230_transpose {
param_0 = f32[1,20,20]{2,1,0} parameter(0)
bitcast.1 = f32[20,2,2,5]{3,2,1,0} bitcast(param_0)
transpose = f32[2,2,5,20]{3,2,1,0} transpose(bitcast.1), dimensions={1,2,3,0}
ROOT bitcast.2 = f32[400]{0} bitcast(transpose)
}
fused_021_transpose {
param_0 = f32[1,20,20]{2,1,0} parameter(0)
transpose = f32[1,20,20]{2,1,0} transpose(param_0), dimensions={0,2,1}
ROOT bitcast = f32[400]{0} bitcast(transpose)
}
ENTRY reduce {
p0 = f32[1,20,20]{2,1,0} parameter(0)
fusion = f32[400]{0} fusion(p0), kind=kInput, calls=fused_1230_transpose
fusion.1 = f32[400]{0} fusion(p0), kind=kInput, calls=fused_021_transpose
ROOT root = (f32[400]{0}, f32[400]{0}) tuple(fusion, fusion.1)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest,
ShapesCompatibleForMultiOutputFusion_MultiOutputReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_select {
p1.1 = f32[2,2,2]{2,1,0} parameter(1)
c0 = f32[] constant(0)
broadcast = f32[2,2,2]{2,1,0} broadcast(f32[] c0), dimensions={}
greater-than = pred[2,2,2]{2,1,0} compare(f32[2,2,2]{2,1,0} p1.1, f32[2,2,2]{2,1,0} broadcast), direction=GT
p0.1 = f32[2,2,2]{2,1,0} parameter(0)
ROOT select = f32[2,2,2]{2,1,0} select(pred[2,2,2]{2,1,0} greater-than, f32[2,2,2]{2,1,0} p0.1, f32[2,2,2]{2,1,0} broadcast)
}
fused_reduce {
p0.2 = f32[2,2,2]{2,1,0} parameter(0)
c1 = f32[] constant(0)
r1 = f32[2,2]{1,0} reduce(p0.2, c1), dimensions={2}, to_apply=scalar_add
mul = f32[2,2,2]{2,1,0} multiply(p0.2, p0.2)
r2 = f32[2,2]{1,0} reduce(mul, c1), dimensions={2}, to_apply=scalar_add
ROOT tuple = (f32[2,2]{1,0}, f32[2,2]{1,0}) tuple(r1, r2)
}
ENTRY reduce {
p0 = f32[2,2,2]{2,1,0} parameter(0)
p1 = f32[2,2,2]{2,1,0} parameter(1)
select = f32[2,2,2]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_select
fusion = (f32[2,2]{1,0}, f32[2,2]{1,0}) fusion(select), kind=kInput, calls=fused_reduce
gte0 = f32[2,2]{1,0} get-tuple-element(fusion), index=0
gte1 = f32[2,2]{1,0} get-tuple-element(fusion), index=1
ROOT root = (f32[2,2]{1,0}, f32[2,2]{1,0}, f32[2,2,2]{2,1,0}) tuple(gte1, gte1, select)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1)->operand(0);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_ReduceFusions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduce_1 {
p0.1 = f32[2,2,2]{2,1,0} parameter(0)
c0 = f32[] constant(0)
ROOT reduce = f32[2,2]{1,0} reduce(f32[2,2,2]{2,1,0} p0.1, f32[] c0), dimensions={0}, to_apply=scalar_add
}
fused_reduce_2 {
p0.2 = f32[2,2,2]{2,1,0} parameter(0)
mul = f32[2,2,2]{2,1,0} multiply(f32[2,2,2]{2,1,0} p0.2, f32[2,2,2]{2,1,0} p0.2)
c1 = f32[] constant(0)
ROOT reduce = f32[2,2]{1,0} reduce(f32[2,2,2]{2,1,0} mul, f32[] c1), dimensions={0}, to_apply=scalar_add
}
ENTRY reduce {
p0 = f32[2,2,2]{2,1,0} parameter(0)
p1 = f32[2,2,2]{2,1,0} parameter(1)
reduce_1 = f32[2,2]{1,0} fusion(p0), kind=kLoop, calls=fused_reduce_1
reduce_2 = f32[2,2]{1,0} fusion(p1), kind=kLoop, calls=fused_reduce_2
ROOT root = (f32[2,2]{1,0}, f32[2,2]{1,0}) tuple(reduce_1, reduce_2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest,
ShapesCompatibleForMultiOutputFusion_DifferentReduceDimensions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduce_1 {
p0.1 = f32[32,32,32]{2,1,0} parameter(0)
c0 = f32[] constant(0)
ROOT reduce = f32[32,32]{1,0} reduce(f32[32,32,32]{2,1,0} p0.1, f32[] c0),
dimensions={0}, to_apply=scalar_add
}
fused_reduce_2 {
p0.2 = f32[32,32,32]{2,1,0} parameter(0)
mul = f32[32,32,32]{2,1,0} multiply(f32[32,32,32]{2,1,0} p0.2,
f32[32,32,32]{2,1,0} p0.2)
c1 = f32[] constant(0)
ROOT reduce = f32[32,32]{1,0} reduce(f32[32,32,32]{2,1,0} mul, f32[] c1),
dimensions={2}, to_apply=scalar_add
}
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
reduce_1 = f32[32,32]{1,0} fusion(p0), kind=kLoop, calls=fused_reduce_1
reduce_2 = f32[32,32]{1,0} fusion(p1), kind=kLoop, calls=fused_reduce_2
ROOT root = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(reduce_1, reduce_2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest,
ShapesCompatibleForMultiOutputFusion_NoReductionToVector) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_element_wise {
p0.1 = f32[32,32,32]{2,1,0} parameter(0)
p1.1 = f32[32,32,32]{2,1,0} parameter(1)
ROOT add = f32[32,32,32]{2,1,0} add(p0.1, p1.1)
}
fused_reduce {
p0.2 = f32[32,32,32]{2,1,0} parameter(0)
mul = f32[32,32,32]{2,1,0} multiply(f32[32,32,32]{2,1,0} p0.2,
f32[32,32,32]{2,1,0} p0.2)
broadcast = f32[32,32,32,32]{3,2,1,0} broadcast(mul), dimensions={3,2,1}
c1 = f32[] constant(0)
ROOT reduce = f32[32,32]{1,0} reduce(f32[32,32,32,32]{3,2,1,0} broadcast,
f32[] c1), dimensions={1,3}, to_apply=scalar_add
}
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
element_wise = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop,
calls=fused_element_wise
fusion = f32[32,32]{1,0} fusion(element_wise),
kind=kLoop, calls=fused_reduce
ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0})
tuple(fusion, element_wise)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest, IsFusibleAsMultiOutputFusionRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
})")
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*root));
}
TEST_F(GpuFusibleTest, ScatterIsNotFusibleAsMultiOutputFusionRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY Scatter {
p0 = s32[3,3] parameter(0)
operand = s32[3,3] add(p0, p0)
p1 = s32[2] parameter(1)
indices = s32[2] add(p1, p1)
p2 = s32[2,3] parameter(2)
updates = s32[2,3] add(p2, p2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})")
.value();
const HloInstruction* scatter_inst =
module->entry_computation()->root_instruction();
EXPECT_FALSE(IsFusibleAsMultiOutputFusionRoot(*scatter_inst));
}
TEST_F(GpuFusibleTest, ProducerConsumerFusionElementwiseAndReduce) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
c0 = f32[] constant(0)
exp = f32[32,32,32]{2,1,0} exponential(p0)
reduce = f32[32,32]{1,0} reduce(exp, c0), dimensions={2},
to_apply=scalar_add
ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, exp)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* consumer = root->operand(0);
const HloInstruction* producer = root->operand(1);
EXPECT_TRUE(IsProducerMultiOutputFusible(*producer));
EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer));
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer));
}
TEST_F(GpuFusibleTest, ProducerConsumerFusionTransposeAndLoopFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_add {
p0.1 = f32[32,31,30]{2,1,0} parameter(0)
p1.1 = f32[32,31,30]{2,1,0} parameter(1)
neg = f32[32,31,30]{2,1,0} negate(p0.1)
ROOT add = f32[32,31,30]{2,1,0} add(neg, p1.1)
}
ENTRY reduce {
p0 = f32[32,31,30]{2,1,0} parameter(0)
p1 = f32[32,30,31]{2,1,0} parameter(1)
transpose = f32[32,31,30]{2,1,0} transpose(p1), dimensions={0,2,1}
ROOT add = f32[32,31,30]{2,1,0} fusion(p0, transpose), kind=kLoop, calls=fused_add
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* consumer = root;
const HloInstruction* producer = root->operand(1);
EXPECT_TRUE(IsProducerConsumerFusible(*producer, *consumer));
}
TEST_F(GpuFusibleTest, ProducerConsumerFusionReduceAndLoopFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_add {
p0.1 = f32[32,31,30]{2,1,0} parameter(0)
p1.1 = f32[32,31,30]{2,1,0} parameter(1)
neg = f32[32,31,30]{2,1,0} negate(p0.1)
ROOT add = f32[32,31,30]{2,1,0} add(neg, p1.1)
}
ENTRY reduce {
p0 = f32[32,31,30]{2,1,0} parameter(0)
p1 = f32[32,31,30,29]{3,2,1,0} parameter(1)
c0 = f32[] constant(0.0)
reduce = f32[32,31,30]{2,1,0} reduce(p1, c0), dimensions={3}, to_apply=scalar_add
ROOT add = f32[32,31,30]{2,1,0} fusion(p0, reduce), kind=kLoop, calls=fused_add
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* consumer = root;
const HloInstruction* producer = root->operand(1);
EXPECT_TRUE(IsProducerConsumerFusible(*producer, *consumer));
}
TEST_F(GpuFusibleTest, ProducerConsumerFusionLoopFusionAndReduce) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_add {
p0.1 = f32[32,32,32]{2,1,0} parameter(0)
p1.1 = f32[32,32,32]{2,1,0} parameter(1)
ROOT add = f32[32,32,32]{2,1,0} add(p0.1, p1.1)
}
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
c0 = f32[] constant(0)
add = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_add
reduce = f32[32,32]{1,0} reduce(add, c0), dimensions={2},
to_apply=scalar_add
ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, add)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* consumer = root->operand(0);
const HloInstruction* producer = root->operand(1);
EXPECT_TRUE(IsProducerMultiOutputFusible(*producer));
EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer));
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer));
}
TEST_F(GpuFusibleTest, ProducerConsumerFusionLoopFusionAndReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_select {
p1.1 = f32[32,32,32]{2,1,0} parameter(1)
c0 = f32[] constant(0)
broadcast = f32[32,32,32]{2,1,0} broadcast(f32[] c0), dimensions={}
greater-than = pred[32,32,32]{2,1,0} compare(f32[32,32,32]{2,1,0} p1.1,
f32[32,32,32]{2,1,0} broadcast), direction=GT
p0.1 = f32[32,32,32]{2,1,0} parameter(0)
ROOT select = f32[32,32,32]{2,1,0} select(pred[32,32,32]{2,1,0}
greater-than, f32[32,32,32]{2,1,0} p0.1, f32[32,32,32]{2,1,0} broadcast)
}
fused_reduce {
p0.2 = f32[32,32,32]{2,1,0} parameter(0)
c1 = f32[] constant(0)
r1 = f32[32,32]{1,0} reduce(p0.2, c1), dimensions={2},
to_apply=scalar_add
mul = f32[32,32,32]{2,1,0} multiply(p0.2, p0.2)
r2 = f32[32,32]{1,0} reduce(mul, c1), dimensions={2},
to_apply=scalar_add
ROOT tuple = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(r1, r2)
}
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
select = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_select
fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(select), kind=kInput,
calls=fused_reduce
ROOT root = ((f32[32,32]{1,0}, f32[32,32]{1,0}), f32[32,32,32]{2,1,0}) tuple(fusion, select)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* consumer = root->operand(0);
const HloInstruction* producer = root->operand(1);
EXPECT_TRUE(IsProducerMultiOutputFusible(*producer));
EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer));
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer));
}
TEST_F(GpuFusibleTest, ProducerConsumerFusionDoNotFuseLoopReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_element_wise {
p0.1 = f32[2,2,2]{2,1,0} parameter(0)
p1.1 = f32[2,2,2]{2,1,0} parameter(1)
ROOT root = f32[2,2,2]{2,1,0} add(p0.1, p1.1)
}
fused_reduce {
p0.2 = f32[2,2,2]{2,1,0} parameter(0)
mul = f32[2,2,2]{2,1,0} multiply(f32[2,2,2]{2,1,0} p0.2,
f32[2,2,2]{2,1,0} p0.2)
broadcast = f32[2,2,2,2]{3,2,1,0} broadcast(mul), dimensions={3,2,1}
c1 = f32[] constant(0)
ROOT reduce = f32[2,2]{1,0} reduce(f32[2,2,2,2]{3,2,1,0} broadcast,
f32[] c1), dimensions={1,3}, to_apply=scalar_add
}
ENTRY reduce {
p0 = f32[2,2,2]{2,1,0} parameter(0)
p1 = f32[2,2,2]{2,1,0} parameter(1)
element_wise = f32[2,2,2]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_element_wise
fusion = f32[2,2]{1,0} fusion(element_wise), kind=kLoop, calls=fused_reduce
ROOT root = (f32[2,2]{1,0}, f32[2,2,2]{2,1,0}) tuple(fusion, element_wise)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* consumer = root->operand(0);
const HloInstruction* producer = root->operand(1);
EXPECT_TRUE(IsProducerMultiOutputFusible(*producer));
EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer));
EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer));
}
TEST_F(GpuFusibleTest, ProducerConsumerFusionReduceUnfriendlyLoopFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
mixed_input_layouts_computation {
p0.1 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
p1.1 = f16[128,1024,33,33]{3,2,1,0} parameter(1)
copy = f16[128,1024,33,33]{1,3,2,0} copy(p1.1)
slice = f16[128,1024,32,32]{1,3,2,0} slice(copy), slice={[0:128],[0:1024],[0:32],[0:32]}
c0 = f16[] constant(0)
broadcast = f16[128,1024,32,32]{1,3,2,0} broadcast(c0), dimensions={}
greater-than = pred[128,1024,32,32]{1,3,2,0} compare(slice, broadcast), direction=GT
ROOT root = f16[128,1024,32,32]{1,3,2,0} select(greater-than, p0.1, broadcast)
}
fused_reduce {
p0.2 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
convert = f32[128,1024,32,32]{1,3,2,0} convert(p0.2)
c0.2 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add
}
ENTRY reduce {
p0 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
p1 = f16[128,1024,33,33]{3,2,1,0} parameter(1)
loop_fusion = f16[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=mixed_input_layouts_computation
reduce_fusion = f32[1024]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce
ROOT root = (f32[1024]{0}, f16[128,1024,32,32]{1,3,2,0}) tuple(reduce_fusion, loop_fusion)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* consumer = root->operand(0);
const HloInstruction* producer = root->operand(1);
EXPECT_FALSE(IsProducerMultiOutputFusible(*producer));
EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer));
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer));
}
TEST_F(GpuFusibleTest, ProducerConsumerFusionInPlaceOperation) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
%fusion {
%param_0 = s32[4,4]{1,0} parameter(0)
%copy = s32[4,4]{0,1} copy(%param_0)
ROOT %transpose = s32[4,4]{1,0} transpose(%copy), dimensions={1,0}
}
ENTRY %main {
%param_0 = s32[4,4]{1,0} parameter(0)
%constant_0 = s32[] constant(0)
%constant_1 = s32[] constant(1)
%constant_1x1_1 = s32[1,1] constant({ {1} })
%updated = s32[4,4]{1,0} dynamic-update-slice(%param_0, %constant_1x1_1, %constant_1, %constant_0)
%transpose = s32[4,4]{0,1} fusion(%updated), kind=kLoop, calls=fusion
ROOT %tuple = tuple(%updated, %transpose)
})"))
.value();
const HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
const HloInstruction* dus = tuple->operand(0);
EXPECT_EQ(dus->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* transpose = tuple->operand(1);
EXPECT_EQ(transpose->opcode(), HloOpcode::kFusion);
EXPECT_FALSE(IsProducerMultiOutputFusible(*dus));
EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*transpose));
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*dus, *transpose));
}
TEST_F(GpuFusibleTest, NonscalarConstantsNotFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY BroadcastIntoReduce {
constant = f32[16] constant({0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15})
broadcast = f32[16,16,16,16]{3,2,1,0} broadcast(constant), dimensions={0}
constant.1 = f32[] constant(0)
reduce = f32[] reduce(broadcast, constant.1), dimensions={0,1,2,3},
to_apply=add
ROOT root = (f32[], f32[], f32[16,16,16,16], f32[16]) tuple(reduce, constant.1, broadcast, constant)
})")
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* consumer = root->operand(0);
const HloInstruction* producer = root->operand(1);
const HloInstruction* consumer2 = root->operand(2);
const HloInstruction* producer2 = root->operand(3);
EXPECT_FALSE(
static_cast<bool>(IsProducerConsumerFusible(*producer, *consumer)));
EXPECT_FALSE(
static_cast<bool>(IsProducerConsumerFusible(*producer2, *consumer2)));
}
TEST_F(GpuFusibleTest, FuseLayoutChangingOpWithElementwise) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry {
p0 = f32[16,16,16,16]{3,2,1,0} parameter(0)
copy = f32[16,16,16,16]{0,1,2,3} copy(p0)
ROOT add = f32[16,16,16,16]{0,1,2,3} add(copy, copy)
})")
.value();
const HloInstruction* consumer =
module->entry_computation()->root_instruction();
const HloInstruction* producer = consumer->operand(0);
EXPECT_TRUE(
static_cast<bool>(IsProducerConsumerFusible(*producer, *consumer)));
}
TEST_F(GpuFusibleTest, FuseReduceWithUnaryElementwise) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY main.12 {
Arg_0.1 = f32[2048]{0} parameter(0)
constant.4 = f32[] constant(0.0)
reduce.10 = f32[] reduce(Arg_0.1, constant.4), dimensions={0}, to_apply=scalar_add
ROOT exp = f32[] exponential(reduce.10)
})"))
.value();
const HloInstruction* consumer =
module->entry_computation()->root_instruction();
const HloInstruction* producer = consumer->operand(0);
EXPECT_TRUE(
static_cast<bool>(IsProducerConsumerFusible(*producer, *consumer)));
}
TEST_F(GpuFusibleTest, DoNotFuseReduceWithRacesWithUnaryElementwise) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY main.12 {
Arg_0.1 = f32[196608]{0} parameter(0)
constant.4 = f32[] constant(0.0)
reduce.10 = f32[] reduce(Arg_0.1, constant.4), dimensions={0}, to_apply=scalar_add
ROOT exp = f32[] exponential(reduce.10)
})"))
.value();
const HloInstruction* consumer =
module->entry_computation()->root_instruction();
const HloInstruction* producer = consumer->operand(0);
EXPECT_FALSE(
static_cast<bool>(IsProducerConsumerFusible(*producer, *consumer)));
}
TEST_F(GpuFusibleTest, CreatesHeavyComputation_NonfusionInstr) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
p_0 = f32[20,50] parameter(0)
constant_1 = f32[] constant(1)
reduce-window_1 = f32[21,41] reduce-window(p_0, constant_1),
window={size=20x10 pad=0_20x0_0}, to_apply=scalar_add
constant_2 = f32[] constant(2)
reduce-window_2 = f32[21,41] reduce-window(p_0, constant_2),
window={size=20x10 pad=0_20x0_0}, to_apply=scalar_add
ROOT root = (f32[21,41], f32[21,41])
tuple(reduce-window_1, reduce-window_2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* producer = root->operand(0);
const HloInstruction* consumer = root->operand(1);
EXPECT_TRUE(CreatesHeavyComputation(*producer, *consumer));
}
TEST_F(GpuFusibleTest, DoesNotCreateHeavyComputation_NonfusionInstr) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
p_0 = f32[3,5] parameter(0)
constant = f32[] constant(1)
broadcast = f32[3, 5] broadcast(f32[] constant), dimensions={}
scaled_p_0 = f32[3,5] multiply(f32[3, 5] broadcast, f32[3,5]{1, 0} p_0)
p_1 = f32[2,5] parameter(1)
reduce-window = f32[3,5] reduce-window(p_1, constant),
window={size=2x1 pad=0_2x0_0}, to_apply=scalar_add
ROOT root = (f32[3,5], f32[3,5]) tuple(reduce-window, scaled_p_0)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* producer = root->operand(0);
const HloInstruction* consumer = root->operand(1);
EXPECT_FALSE(CreatesHeavyComputation(*producer, *consumer));
}
TEST_F(GpuFusibleTest,
DoesNotCreateHeavyComputation_NonoverlappingReduceWindows) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
p_0 = f32[2,5] parameter(0)
constant_1 = f32[] constant(1)
reduce-window_1 = f32[3,5] reduce-window(p_0, constant_1),
window={size=2x1 pad=0_2x0_0}, to_apply=scalar_add
constant_2 = f32[] constant(2)
reduce-window_2 = f32[2,3] reduce-window(p_0, constant_2),
window={size=2x1 pad=0_2x0_0 stride=2x2}, to_apply=scalar_add
ROOT root = (f32[3,5], f32[2,3]) tuple(reduce-window_1, reduce-window_2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* producer = root->operand(0);
const HloInstruction* consumer = root->operand(1);
EXPECT_FALSE(CreatesHeavyComputation(*producer, *consumer));
}
TEST_F(GpuFusibleTest, CreatesHeavyComputation_ReduceWindowGather) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
p0 = s32[512,512,2] parameter(0)
p1 = f32[1,1,512,512] parameter(1)
constant_1 = f32[] constant(0)
reduce-window.1 = reduce-window(p1, constant_1),
window={size=1x1x16x16 stride=1x1x16x16}, to_apply=scalar_add
ROOT ret = gather(reduce-window.1, p0), offset_dims={0,1,2,3},
collapsed_slice_dims={}, start_index_map={1,2},
index_vector_dim=2, slice_sizes={1,1,1,1}
})"))
.value();
auto gather = module->entry_computation()->root_instruction();
auto reduce_window = gather->operand(0);
EXPECT_EQ(gather->opcode(), HloOpcode::kGather);
EXPECT_EQ(reduce_window->opcode(), HloOpcode::kReduceWindow);
EXPECT_FALSE(IfFusedReadsElementsMultipleTimes(*reduce_window));
EXPECT_TRUE(IsExpensiveToRepeat(*reduce_window));
EXPECT_TRUE(IfFusedReadsElementsMultipleTimes(*gather));
EXPECT_TRUE(CreatesHeavyComputation(*reduce_window, *gather));
}
TEST_F(GpuFusibleTest, CreatesHeavyComputation_FusionInstr) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_producer {
operand = f32[20,20] parameter(0)
constant = f32[] constant(1)
ROOT reduce-window = f32[11,11] reduce-window(operand, constant),
window={size=20x20 pad=0_10x0_10}, to_apply=scalar_add
}
fused_consumer {
operand_0 = f32[11,11] parameter(0)
operand_1 = f32[11,11] parameter(1)
constant = f32[] constant(1)
reduce-window = f32[11,11] reduce-window(operand_1, constant),
window={size=2x2 pad=0_1x0_1}, to_apply=scalar_add
ROOT scaled_operand_1 =
f32[11,11] multiply(f32[11,11] operand_0, f32[11,11] reduce-window)
}
ENTRY entry {
p0 = f32[20,20] parameter(0)
p1 = f32[11,11] parameter(1)
producer = f32[11,11] fusion(p0), kind=kLoop, calls=fused_producer
consumer = f32[11,11] fusion(p1, producer), kind=kLoop, calls=fused_consumer
ROOT root = (f32[11,11], f32[11,11]) tuple(producer, consumer)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* producer = root->operand(0);
const HloInstruction* consumer = root->operand(1);
EXPECT_TRUE(CreatesHeavyComputation(*producer, *consumer));
}
TEST_F(GpuFusibleTest, DoesNotCreateHeavyComputation_FusionInstr) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_producer {
p_0 = f32[2,2] parameter(0)
constant = f32[] constant(1)
ROOT reduce-window = f32[2,2] reduce-window(p_0, constant),
window={size=2x2 pad=0_1x0_1}, to_apply=scalar_add
}
fused_consumer {
p_0 = f32[2,2] parameter(0)
p_1 = f32[2,2] parameter(1)
constant = f32[] constant(1)
reduce-window = f32[2,2] reduce-window(p_1, constant),
window={size=2x2 pad=0_1x0_1}, to_apply=scalar_add
ROOT scaled_p_1 = f32[2,2] multiply(f32[2, 2] p_0, f32[2,2] reduce-window)
}
ENTRY entry {
p_0 = f32[2,2] parameter(0)
producer = f32[2,2] fusion(p_0), kind=kLoop, calls=fused_producer
consumer = f32[2,2] fusion(producer, p_0), kind=kLoop, calls=fused_consumer
ROOT root = (f32[2,2], f32[2,2]) tuple(producer, consumer)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* producer = root->operand(0);
const HloInstruction* consumer = root->operand(1);
EXPECT_FALSE(CreatesHeavyComputation(*producer, *consumer));
}
TEST_F(GpuFusibleTest, ChooseFusionKind) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
ENTRY computation {
p = f32[1,5000,6000]{2,1,0} parameter(0)
c = f32[1,6000,5000]{2,1,0} transpose(p), dimensions={0,2,1}
ROOT r = f32[300,20,5000]{2,1,0} reshape(c)
}
)")
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* producer = root->operand(0);
EXPECT_EQ(ChooseFusionKind(*producer, *root),
HloInstruction::FusionKind::kInput);
}
TEST_F(GpuFusibleTest, GetFusionRoots1) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fusion {
p0 = s32[] parameter(0)
custom-call = (bf16[], s32[]) custom-call(p0), custom_call_target="my_custom_call"
get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0
get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1
ROOT tuple = (bf16[], s32[], s32[]) tuple(get-tuple-element.0, get-tuple-element.1, p0)
}
ENTRY entry{
p0 = s32[] parameter(0)
ROOT fusion = (bf16[], s32[], s32[]) fusion(p0), kind=kCustom, calls=fusion
}
)")
.value();
auto called_computations =
module->entry_computation()->root_instruction()->called_computations();
ASSERT_EQ(called_computations.size(), 1);
auto fusion = called_computations.front();
auto roots = GetFusionRoots(*fusion);
auto custom_call = fusion->root_instruction()->operand(0)->operand(0);
auto parameter = fusion->root_instruction()->operand(2);
std::vector<const HloInstruction*> expected_roots{custom_call, custom_call,
parameter};
EXPECT_EQ(roots, expected_roots);
}
TEST_F(GpuFusibleTest, GetFusionRoots2) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fusion {
p0 = s32[] parameter(0)
custom-call.1 = bf16[] custom-call(p0), custom_call_target="my_custom_call1"
custom-call.2 = bf16[] custom-call(p0), custom_call_target="my_custom_call2"
ROOT tuple = (bf16[], bf16[], s32[]) tuple(custom-call.1, custom-call.2, p0)
}
ENTRY entry{
p0 = s32[] parameter(0)
ROOT fusion = (bf16[], bf16[], s32[]) fusion(p0), kind=kCustom, calls=fusion
}
)")
.value();
auto called_computations =
module->entry_computation()->root_instruction()->called_computations();
ASSERT_EQ(called_computations.size(), 1);
auto fusion = called_computations.front();
auto roots = GetFusionRoots(*fusion);
auto custom_call1 = fusion->root_instruction()->operand(0);
auto custom_call2 = fusion->root_instruction()->operand(1);
auto parameter = fusion->root_instruction()->operand(2);
std::vector<const HloInstruction*> expected_roots{custom_call1, custom_call2,
parameter};
EXPECT_EQ(roots, expected_roots);
}
TEST_F(GpuFusibleTest, GetFusionRoots3) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fusion {
p0 = s32[] parameter(0)
custom-call = (bf16[], s32[]) custom-call(p0), custom_call_target="my_custom_call"
get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0
custom-call.2 = bf16[] custom-call(p0), custom_call_target="my_custom_call2"
get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1
ROOT tuple = (bf16[], bf16[], s32[], s32[]) tuple(get-tuple-element.0, custom-call.2, get-tuple-element.1, p0)
}
ENTRY entry{
p0 = s32[] parameter(0)
ROOT fusion = (bf16[], bf16[], s32[], s32[]) fusion(p0), kind=kCustom, calls=fusion
}
)")
.value();
auto called_computations =
module->entry_computation()->root_instruction()->called_computations();
ASSERT_EQ(called_computations.size(), 1);
auto fusion = called_computations.front();
auto roots = GetFusionRoots(*fusion);
auto custom_call1 = fusion->root_instruction()->operand(0)->operand(0);
auto custom_call2 = fusion->root_instruction()->operand(1);
auto parameter = fusion->root_instruction()->operand(3);
std::vector<const HloInstruction*> expected_roots{custom_call1, custom_call2,
custom_call1, parameter};
EXPECT_EQ(roots, expected_roots);
}
TEST_F(GpuFusibleTest, GetFusionRootsWithGTEMakeTupleSequence) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fusion {
p0 = s32[] parameter(0)
p1 = s32[32] parameter(1)
custom-call = (bf16[], s32[], u32[]) custom-call(p1), custom_call_target="my_custom_call"
get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0
get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1
bitcast = s32[1] bitcast(get-tuple-element.1)
dynamic-update-slice = s32[32] dynamic-update-slice(p1, bitcast, p0)
get-tuple-element.2 = u32[] get-tuple-element(custom-call), index=2
ROOT tuple = (bf16[], s32[32], u32[]) tuple(get-tuple-element.0, dynamic-update-slice, get-tuple-element.2)
}
ENTRY entry{
p0 = s32[] parameter(0)
bitcast = s32[32] bitcast(p0)
ROOT fusion = (bf16[], s32[32], u32[]) fusion(p0, bitcast), kind=kCustom, calls=fusion
}
)")
.value();
auto called_computations =
module->entry_computation()->root_instruction()->called_computations();
ASSERT_EQ(called_computations.size(), 1);
auto fusion = called_computations.front();
auto roots = GetFusionRoots(*fusion);
auto custom_call = fusion->root_instruction()->operand(0)->operand(0);
auto dus = fusion->root_instruction()->operand(1);
std::vector<const HloInstruction*> expected_result{custom_call, dus,
custom_call};
EXPECT_EQ(roots, expected_result);
}
TEST_F(GpuFusibleTest, GetFusionRootsWithMakeTupleGTESequence) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fusion {
p0 = s32[] parameter(0)
p1 = s32[32] parameter(1)
custom-call = (bf16[], s32[], u32[]) custom-call(p1), custom_call_target="my_custom_call"
get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0
get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1
bitcast = s32[1] bitcast(get-tuple-element.1)
dynamic-update-slice = s32[32] dynamic-update-slice(p1, bitcast, p0)
get-tuple-element.2 = u32[] get-tuple-element(custom-call), index=2
tuple = (bf16[], s32[32], u32[]) tuple(get-tuple-element.0, dynamic-update-slice, get-tuple-element.2)
get-tuple-element.3 = bf16[] get-tuple-element(tuple), index=0
get-tuple-element.4 = u32[] get-tuple-element(tuple), index=2
ROOT tuple2 = (bf16[], s32[32], u32[]) tuple(get-tuple-element.3, dynamic-update-slice, get-tuple-element.4)
}
ENTRY entry{
p0 = s32[] parameter(0)
bitcast = s32[32] bitcast(p0)
ROOT fusion = (bf16[], s32[32], u32[]) fusion(p0, bitcast), kind=kCustom, calls=fusion
}
)")
.value();
auto called_computations =
module->entry_computation()->root_instruction()->called_computations();
ASSERT_EQ(called_computations.size(), 1);
auto fusion = called_computations.front();
auto roots = GetFusionRoots(*fusion);
auto tuple_inst = fusion->root_instruction()->operand(0)->operand(0);
auto custom_call = tuple_inst->operand(0)->operand(0);
auto dus = fusion->root_instruction()->operand(1);
std::vector<const HloInstruction*> expected_result{custom_call, dus,
custom_call};
EXPECT_EQ(roots, expected_result);
}
TEST_F(GpuFusibleTest, GetFusionRootsWithTupleMultipleSameOperands) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fusion {
p1 = s32[32] parameter(0)
add0 = s32[32] add(p1, p1)
ROOT _ = (s32[32], s32[32]) tuple(add0, add0)
}
ENTRY entry {
p0 = s32[32] parameter(0)
ROOT fusion = (s32[32], s32[32]) fusion(p0), kind=kCustom, calls=fusion
}
)")
.value();
auto called_computations =
module->entry_computation()->root_instruction()->called_computations();
ASSERT_EQ(called_computations.size(), 1);
auto fusion = called_computations.front();
auto roots = GetFusionRoots(*fusion);
auto add0 = fusion->root_instruction()->operand(0);
EXPECT_THAT(GetFusionRoots(*fusion), ElementsAre(add0, add0));
}
TEST_F(GpuFusibleTest, GetFusibleComputations) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduce {
p0 = f32[128,1024] parameter(0)
c0 = f32[] constant(0)
ROOT reduce = f32[128]{0} reduce(p0, c0), dimensions={1}, to_apply=scalar_add
}
body_a {
p0 = f32[128,1024] parameter(0)
ROOT reduce_fusion = f32[128] fusion(p0), kind=kInput, calls=fused_reduce
}
body_b {
p0 = f32[128,1024] parameter(0)
c0 = f32[] constant(0)
ROOT bc = f32[128] broadcast(c0), dimensions={}
}
ENTRY main {
p0 = s32[] parameter(0)
p1 = f32[128,1024] parameter(1)
ROOT conditional = f32[128] conditional(p0, p1, p1),
branch_computations={body_a, body_b}
})"))
.value();
auto fusible = GetFusibleComputations(*module, {});
EXPECT_THAT(fusible, ElementsAre(module->GetComputationWithName("body_a"),
module->GetComputationWithName("body_b"),
module->entry_computation()));
}
TEST_F(GpuFusibleTest, GetSharedMemoryUsage) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
wrapped_transpose {
p0 = f32[128,1024,2]{2,1,0} parameter(0)
ROOT transpose = f32[1024,128,2]{2,1,0} transpose(p0), dimensions={1,0,2}
}
ENTRY main {
p = f32[128,1024,2] parameter(0)
ROOT res = f32[1024,128,2]{2,1,0} fusion(p), kind=kInput, calls=wrapped_transpose
})"))
.value();
auto& debug_options = module->mutable_config().mutable_debug_options();
debug_options.set_xla_gpu_mlir_emitter_level(3);
FusionInfoCache cache;
auto fusion = module->entry_computation()->root_instruction();
EXPECT_EQ(cache.GetSharedMemoryUsage(*fusion), 32 * 33 * 2 * 4);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_fusible.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_fusible_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d52c20ab-b940-4de5-843a-acbaa8160622 | cpp | tensorflow/tensorflow | model_utils | tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/model_utils.cc | tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/model_utils_test.cc | #include "tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/model_utils.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/compiler/mlir/lite/schema/schema_conversion_utils.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
namespace mlir {
namespace lite {
namespace toco_legacy {
using std::string;
using tflite::BuiltinOperator;
using tflite::BuiltinOperator_DEQUANTIZE;
using tflite::ModelT;
using tflite::OperatorCodeT;
using tflite::OperatorT;
using tflite::TensorT;
using tflite::TensorType;
int32_t GetOrInsertOpCodeIndex(ModelT* model, const BuiltinOperator& op_code,
int32_t version) {
for (size_t i = 0; i < model->operator_codes.size(); ++i) {
if (tflite::GetBuiltinCode(model->operator_codes[i].get()) == op_code) {
return i;
}
}
model->operator_codes.push_back(std::make_unique<OperatorCodeT>());
int op_code_idx = model->operator_codes.size() - 1;
model->operator_codes[op_code_idx]->builtin_code = op_code;
model->operator_codes[op_code_idx]->deprecated_builtin_code =
tflite::ConvertBuiltinCodeToDeprecatedBuiltinCode(op_code);
model->operator_codes[op_code_idx]->version = version;
return op_code_idx;
}
void MakeDequantizeOperator(ModelT* model, std::unique_ptr<OperatorT>* op,
int32_t input, int32_t output) {
OperatorT* op_raw = new OperatorT;
op_raw->opcode_index =
GetOrInsertOpCodeIndex(model, BuiltinOperator_DEQUANTIZE, 2);
op_raw->inputs = {input};
op_raw->outputs = {output};
op->reset(op_raw);
}
void MakeTensor(const string& name, const std::vector<int32_t>& shape,
const std::vector<int32_t>& shape_signature,
const TensorType& type, std::unique_ptr<TensorT>* tensor) {
TensorT* tensor_raw = new TensorT;
tensor_raw->name = name;
tensor_raw->shape = shape;
if (!shape_signature.empty()) {
tensor_raw->shape_signature = shape_signature;
}
tensor_raw->type = type;
tensor->reset(tensor_raw);
}
bool HasMinMax(const TensorT* tensor) {
return tensor->quantization && !tensor->quantization->min.empty() &&
!tensor->quantization->max.empty();
}
}
}
} | #include "tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/model_utils.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
namespace mlir {
namespace lite {
namespace toco_legacy {
namespace {
using std::string;
TEST(ModelUtilsTest, HasMinMax) {
tflite::TensorT tensor;
tensor.quantization = std::make_unique<tflite::QuantizationParametersT>();
tensor.quantization->min.push_back(0.5);
EXPECT_FALSE(mlir::lite::toco_legacy::HasMinMax(&tensor));
tensor.quantization->max.push_back(1.5);
EXPECT_TRUE(mlir::lite::toco_legacy::HasMinMax(&tensor));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/model_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/model_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1411a6c0-ec82-45e2-a39b-2b8df454cb18 | cpp | google/arolla | wildcard_input_loader | arolla/io/wildcard_input_loader.cc | arolla/io/wildcard_input_loader_test.cc | #include "arolla/io/wildcard_input_loader.h"
#include <cstddef>
#include <functional>
#include <optional>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
namespace arolla::input_loader_impl {
std::function<std::optional<std::string>(absl::string_view)> MakeNameToKeyFn(
const absl::ParsedFormat<'s'>& format) {
constexpr absl::string_view kUniqueString =
"unique_string_5a7cf4c5ed2d49068302b641bad242aa";
auto formatted = absl::StrFormat(format, kUniqueString);
size_t prefix_end = formatted.find(kUniqueString);
DCHECK(prefix_end != absl::string_view::npos);
std::string prefix = formatted.substr(0, prefix_end);
size_t suffix_begin = prefix_end + kUniqueString.size();
DCHECK(suffix_begin <= formatted.size());
std::string suffix = formatted.substr(suffix_begin);
return [prefix = std::move(prefix), suffix = std::move(suffix)](
absl::string_view name) -> std::optional<std::string> {
if (!absl::ConsumePrefix(&name, prefix)) {
return std::nullopt;
}
if (!absl::ConsumeSuffix(&name, suffix)) {
return std::nullopt;
}
return std::string(name);
};
}
} | #include "arolla/io/wildcard_input_loader.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/types/optional.h"
#include "arolla/io/input_loader.h"
#include "arolla/io/testing/matchers.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_value.h"
namespace arolla {
namespace {
using ::absl_testing::StatusIs;
using ::arolla::testing::InputLoaderSupports;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::IsNull;
using ::testing::MatchesRegex;
struct DummyInput {};
TEST(WildcardInputLoaderCombinationTest, MakeNameToKeyFn) {
{
auto fn = input_loader_impl::MakeNameToKeyFn(absl::ParsedFormat<'s'>("%s"));
EXPECT_THAT(fn(""), Eq(""));
EXPECT_THAT(fn("foo"), Eq("foo"));
EXPECT_THAT(fn("foobarbaz\\[.*\"]"), Eq("foobarbaz\\[.*\"]"));
}
{
auto fn = input_loader_impl::MakeNameToKeyFn(
absl::ParsedFormat<'s'>("%s_only_suffix"));
EXPECT_THAT(fn(""), Eq(std::nullopt));
EXPECT_THAT(fn("_only_suffix"), Eq(""));
EXPECT_THAT(fn("foo_only_suffix"), Eq("foo"));
}
{
auto fn = input_loader_impl::MakeNameToKeyFn(
absl::ParsedFormat<'s'>("only_prefix_%s"));
EXPECT_THAT(fn(""), Eq(std::nullopt));
EXPECT_THAT(fn("only_prefix_"), Eq(""));
EXPECT_THAT(fn("only_prefix_foo"), Eq("foo"));
}
{
auto fn = input_loader_impl::MakeNameToKeyFn(
absl::ParsedFormat<'s'>("prefix_%s_and_suffix"));
EXPECT_THAT(fn(""), Eq(std::nullopt));
EXPECT_THAT(fn("prefix_"), Eq(std::nullopt));
EXPECT_THAT(fn("_and_suffix"), Eq(std::nullopt));
EXPECT_THAT(fn("prefix__and_suffix"), Eq(""));
EXPECT_THAT(fn("prefix_foo_and_suffix"), Eq("foo"));
}
}
TEST(InputLoaderTest, InputLoaderAccessorResultType) {
using Input = absl::flat_hash_map<std::string, int>;
{
auto accessor = [](const Input& input, const std::string& key) {
return 1;
};
static_assert(
std::is_same_v<
WildcardAccessorResultType<decltype(accessor), Input, std::string>,
int>);
}
{
auto accessor = [](const Input& input,
const std::string& key) -> absl::StatusOr<int> {
return 1;
};
static_assert(
std::is_same_v<
WildcardAccessorResultType<decltype(accessor), Input, std::string>,
int>);
}
{
auto accessor = [](const Input& input, const std::string& key,
RawBufferFactory*) { return 1; };
static_assert(
std::is_same_v<
WildcardAccessorResultType<decltype(accessor), Input, std::string>,
int>);
}
{
auto accessor = [](const Input& input, const std::string& key,
RawBufferFactory*) -> absl::StatusOr<int> { return 1; };
static_assert(
std::is_same_v<
WildcardAccessorResultType<decltype(accessor), Input, std::string>,
int>);
}
{
auto accessor = [](const Input& input, const std::string& key, int* res) {
*res = 1;
};
static_assert(
std::is_same_v<
WildcardAccessorResultType<decltype(accessor), Input, std::string>,
int>);
}
{
auto accessor = [](const Input& input, const std::string& key,
RawBufferFactory*, int* res) { *res = 1; };
static_assert(
std::is_same_v<
WildcardAccessorResultType<decltype(accessor), Input, std::string>,
int>);
}
}
TEST(WildcardInputLoaderTest, FromMapNoError) {
using OInt = OptionalValue<int>;
auto oi32 = GetQType<OInt>();
using Input = absl::flat_hash_map<std::string, int>;
auto accessor = [](const Input& input, const std::string& key) -> OInt {
if (auto it = input.find(key); it != input.end()) {
return it->second;
} else {
return std::nullopt;
}
};
ASSERT_OK_AND_ASSIGN(auto input_loader,
WildcardInputLoader<Input>::Build(
accessor, absl::ParsedFormat<'s'>("from_map_%s")));
EXPECT_THAT(input_loader, InputLoaderSupports(
{{"from_map_a", oi32}, {"from_map_b", oi32}}));
EXPECT_THAT(input_loader->SuggestAvailableNames(), ElementsAre("from_map_*"));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<OInt>();
auto b_slot = layout_builder.AddSlot<OInt>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<Input> bound_input_loader,
input_loader->Bind({
{"from_map_a", TypedSlot::FromSlot(a_slot)},
{"from_map_b", TypedSlot::FromSlot(b_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({{"a", 5}, {"b", 7}}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 7);
ASSERT_OK(bound_input_loader({{"a", 7}}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 7);
EXPECT_EQ(alloc.frame().Get(b_slot), std::nullopt);
}
TEST(WildcardInputLoaderTest, AccessorExecutionOrderIsDetemenistic) {
std::vector<std::string> accessor_calls_order;
auto accessor = [&](const DummyInput& input, const std::string& key) -> int {
accessor_calls_order.push_back(key);
return 1;
};
ASSERT_OK_AND_ASSIGN(auto input_loader,
WildcardInputLoader<DummyInput>::Build(accessor));
EXPECT_THAT(input_loader, InputLoaderSupports({{"a", GetQType<int>()},
{"b", GetQType<int>()},
{"c", GetQType<int>()}}));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<int>();
auto c_slot = layout_builder.AddSlot<int>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<DummyInput> bound_input_loader,
input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
{"c", TypedSlot::FromSlot(c_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader(DummyInput{}, alloc.frame()));
EXPECT_THAT(accessor_calls_order, ElementsAre("a", "b", "c"));
}
TEST(WildcardInputLoaderTest, FromMapNoErrorName2KeyFn) {
using OInt = OptionalValue<int>;
auto oi32 = GetQType<OInt>();
using Input = absl::flat_hash_map<std::string, int>;
auto accessor = [](const Input& input, const std::string& key) -> OInt {
if (auto it = input.find(key); it != input.end()) {
return it->second;
} else {
return std::nullopt;
}
};
auto name2key = [](absl::string_view name) -> std::optional<std::string> {
if (!absl::ConsumePrefix(&name, "from_map_")) {
return std::nullopt;
}
if (name != "a" && name != "b") {
return std::nullopt;
}
return std::string(name);
};
ASSERT_OK_AND_ASSIGN(auto input_loader,
WildcardInputLoader<Input>::Build(accessor, name2key));
EXPECT_THAT(input_loader, InputLoaderSupports(
{{"from_map_a", oi32}, {"from_map_b", oi32}}));
EXPECT_THAT(input_loader->GetQTypeOf("from_map_x"), IsNull());
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<OInt>();
auto b_slot = layout_builder.AddSlot<OInt>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<Input> bound_input_loader,
input_loader->Bind({
{"from_map_a", TypedSlot::FromSlot(a_slot)},
{"from_map_b", TypedSlot::FromSlot(b_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({{"a", 5}, {"b", 7}}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 7);
ASSERT_OK(bound_input_loader({{"a", 7}}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 7);
EXPECT_EQ(alloc.frame().Get(b_slot), std::nullopt);
}
TEST(WildcardInputLoaderTest, FromMapOutputArg) {
using OInt = OptionalValue<int>;
auto oi32 = GetQType<OInt>();
using Input = absl::flat_hash_map<std::string, int>;
auto accessor = [](const Input& input, const std::string& key, OInt* output) {
if (auto it = input.find(key); it != input.end()) {
*output = it->second;
} else {
*output = std::nullopt;
}
};
ASSERT_OK_AND_ASSIGN(auto input_loader,
WildcardInputLoader<Input>::Build(
accessor, absl::ParsedFormat<'s'>("from_map_%s")));
EXPECT_THAT(input_loader, InputLoaderSupports(
{{"from_map_a", oi32}, {"from_map_b", oi32}}));
EXPECT_THAT(input_loader->SuggestAvailableNames(), ElementsAre("from_map_*"));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<OInt>();
auto b_slot = layout_builder.AddSlot<OInt>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<Input> bound_input_loader,
input_loader->Bind({
{"from_map_a", TypedSlot::FromSlot(a_slot)},
{"from_map_b", TypedSlot::FromSlot(b_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({{"a", 5}, {"b", 7}}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 7);
ASSERT_OK(bound_input_loader({{"a", 7}}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 7);
EXPECT_EQ(alloc.frame().Get(b_slot), std::nullopt);
}
TEST(WildcardInputLoaderTest, FromMapError) {
auto i32 = GetQType<int32_t>();
using Input = absl::flat_hash_map<std::string, int>;
auto accessor = [](const Input& input,
const std::string& key) -> absl::StatusOr<int> {
if (auto it = input.find(key); it != input.end()) {
return it->second;
}
return absl::FailedPreconditionError(
absl::StrFormat("key `%s` is not found", key));
};
ASSERT_OK_AND_ASSIGN(auto input_loader,
WildcardInputLoader<Input>::Build(
accessor, absl::ParsedFormat<'s'>("from_map_%s")));
EXPECT_THAT(input_loader,
InputLoaderSupports({{"from_map_a", i32}, {"from_map_b", i32}}));
EXPECT_THAT(input_loader->SuggestAvailableNames(), ElementsAre("from_map_*"));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<int>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<Input> bound_input_loader,
input_loader->Bind({
{"from_map_a", TypedSlot::FromSlot(a_slot)},
{"from_map_b", TypedSlot::FromSlot(b_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({{"a", 5}, {"b", 7}}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 7);
EXPECT_THAT(bound_input_loader({{"a", 7}}, alloc.frame()),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("key `b` is not found")));
}
TEST(InputLoaderTest, BufferFactoryPropagated) {
UnsafeArenaBufferFactory global_factory(1000);
auto accessor = [&](int input, const std::string& key,
RawBufferFactory* factory) -> bool {
return &global_factory == factory;
};
ASSERT_OK_AND_ASSIGN(auto input_loader,
WildcardInputLoader<int>::Build(accessor));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<bool>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<int> bound_input_loader,
input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader(0, alloc.frame(), &global_factory));
EXPECT_TRUE(alloc.frame().Get(a_slot));
UnsafeArenaBufferFactory global_factory2(1000);
ASSERT_OK(bound_input_loader(0, alloc.frame(), &global_factory2));
EXPECT_FALSE(alloc.frame().Get(a_slot));
}
TEST(InputLoaderTest, BufferFactoryPropagatedOutputArg) {
UnsafeArenaBufferFactory global_factory(1000);
auto accessor = [&](int input, const std::string& key,
RawBufferFactory* factory,
bool* output) { *output = (&global_factory == factory); };
ASSERT_OK_AND_ASSIGN(auto input_loader,
WildcardInputLoader<int>::Build(accessor));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<bool>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<int> bound_input_loader,
input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader(0, alloc.frame(), &global_factory));
EXPECT_TRUE(alloc.frame().Get(a_slot));
UnsafeArenaBufferFactory global_factory2(1000);
ASSERT_OK(bound_input_loader(0, alloc.frame(), &global_factory2));
EXPECT_FALSE(alloc.frame().Get(a_slot));
}
TEST(WildcardInputLoaderTest, BuildFromCallbackAccessorFnFromStruct) {
using Input = std::pair<int, float>;
auto accessor = [](const Input& input, const std::string& key,
WildcardInputLoaderCallback callback) -> absl::Status {
if (key == "x") {
return callback(input.first);
}
if (key == "y") {
return callback(TypedRef::FromValue(input.second));
}
return absl::FailedPreconditionError(
absl::StrFormat("key `%s` is not found", key));
};
ASSERT_OK_AND_ASSIGN(
auto input_loader,
WildcardInputLoader<Input>::BuildFromCallbackAccessorFn(
accessor, {{"x", GetQType<int32_t>()}, {"y", GetQType<float>()}}));
EXPECT_THAT(input_loader, InputLoaderSupports({{"x", GetQType<int32_t>()},
{"y", GetQType<float>()}}));
EXPECT_THAT(input_loader->GetQTypeOf("z"), IsNull());
EXPECT_THAT(input_loader->SuggestAvailableNames(), ElementsAre("x", "y"));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<Input> bound_input_loader,
input_loader->Bind({
{"x", TypedSlot::FromSlot(a_slot)},
{"y", TypedSlot::FromSlot(b_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 7.f}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 7.f);
}
TEST(WildcardInputLoaderTest, BuildFromCallbackAccessorFnFromMap) {
auto i32 = GetQType<int32_t>();
auto f32 = GetQType<float>();
using Input = absl::flat_hash_map<std::string, TypedValue>;
auto accessor = [](const Input& input, const std::string& key,
WildcardInputLoaderCallback callback) -> absl::Status {
if (auto it = input.find(key); it != input.end()) {
return callback(it->second.AsRef());
}
return absl::FailedPreconditionError(
absl::StrFormat("key `%s` is not found", key));
};
ASSERT_OK_AND_ASSIGN(
auto input_loader,
WildcardInputLoader<Input>::BuildFromCallbackAccessorFn(
accessor, {{"a", GetQType<int32_t>()}, {"b", GetQType<float>()}},
absl::ParsedFormat<'s'>("from_map_%s")));
EXPECT_THAT(*input_loader,
InputLoaderSupports({{"from_map_a", i32}, {"from_map_b", f32}}));
EXPECT_THAT(input_loader->GetQTypeOf("from_map_c"), IsNull());
EXPECT_THAT(input_loader->SuggestAvailableNames(),
ElementsAre("from_map_a", "from_map_b"));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<Input> bound_input_loader,
input_loader->Bind({
{"from_map_a", TypedSlot::FromSlot(a_slot)},
{"from_map_b", TypedSlot::FromSlot(b_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader(
{{"a", TypedValue::FromValue(5)}, {"b", TypedValue::FromValue(7.f)}},
alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 7.f);
EXPECT_THAT(
bound_input_loader({{"a", TypedValue::FromValue(5)}}, alloc.frame()),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("key `b` is not found")));
EXPECT_THAT(bound_input_loader({{"a", TypedValue::FromValue(5.)},
{"b", TypedValue::FromValue(5.f)}},
alloc.frame()),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex(".*type does not match.*expected "
"FLOAT64, got INT32.*key: `a`")));
}
TEST(WildcardInputLoaderTest, FromVector) {
auto i32 = GetQType<int32_t>();
using Input = std::vector<int>;
auto accessor = [](const Input& input,
const size_t& key) -> absl::StatusOr<int> {
return key < input.size() ? input[key] : -1;
};
auto name2key = [](absl::string_view key) -> std::optional<int64_t> {
if (!absl::ConsumePrefix(&key, "from_vector_")) {
return std::nullopt;
}
int64_t id;
if (!absl::SimpleAtoi(key, &id)) {
return std::nullopt;
}
return id;
};
ASSERT_OK_AND_ASSIGN(auto input_loader,
WildcardInputLoader<Input>::Build(accessor, name2key));
EXPECT_THAT(input_loader, InputLoaderSupports({{"from_vector_0", i32},
{"from_vector_1", i32}}));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<int>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<Input> bound_input_loader,
input_loader->Bind({
{"from_vector_0", TypedSlot::FromSlot(a_slot)},
{"from_vector_1", TypedSlot::FromSlot(b_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 7}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 7);
ASSERT_OK(bound_input_loader({7}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 7);
EXPECT_EQ(alloc.frame().Get(b_slot), -1);
}
struct SeparateSparsityInput {
absl::flat_hash_set<std::string> presents;
absl::flat_hash_map<std::string, float> values;
};
TEST(WildcardInputLoaderTest, FromTwoMapsSeparateSparsity) {
auto of32 = GetOptionalQType<float>();
using Input = SeparateSparsityInput;
auto accessor =
[](const Input& input,
const std::string& key) -> absl::StatusOr<OptionalValue<float>> {
if (!input.presents.contains(key)) {
return std::nullopt;
}
if (auto it = input.values.find(key); it != input.values.end()) {
return {it->second};
}
return std::nullopt;
};
ASSERT_OK_AND_ASSIGN(auto input_loader,
WildcardInputLoader<Input>::Build(
accessor, absl::ParsedFormat<'s'>("from_map_%s")));
EXPECT_THAT(input_loader, InputLoaderSupports(
{{"from_map_a", of32}, {"from_map_b", of32}}));
EXPECT_THAT(input_loader->SuggestAvailableNames(), ElementsAre("from_map_*"));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<OptionalValue<float>>();
auto b_slot = layout_builder.AddSlot<OptionalValue<float>>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<Input> bound_input_loader,
input_loader->Bind({
{"from_map_a", TypedSlot::FromSlot(a_slot)},
{"from_map_b", TypedSlot::FromSlot(b_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({{"a", "b"}, {{"a", 5.f}, {"b", 7.f}}},
alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5.f);
EXPECT_EQ(alloc.frame().Get(b_slot), 7.f);
ASSERT_OK(
bound_input_loader({{"a"}, {{"a", 5.f}, {"b", 7.f}}}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5.f);
EXPECT_EQ(alloc.frame().Get(b_slot), std::nullopt);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/wildcard_input_loader.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/wildcard_input_loader_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
780c0485-dad1-448e-b5ed-a3e5f8d5dfb2 | cpp | google/arolla | forest_evaluator | arolla/decision_forest/pointwise_evaluation/forest_evaluator.cc | arolla/decision_forest/pointwise_evaluation/forest_evaluator_test.cc | #include "arolla/decision_forest/pointwise_evaluation/forest_evaluator.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <map>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/pointwise_evaluation/bitmask_builder.h"
#include "arolla/decision_forest/pointwise_evaluation/bound_split_conditions.h"
#include "arolla/decision_forest/pointwise_evaluation/oblivious.h"
#include "arolla/decision_forest/pointwise_evaluation/single_input_eval.h"
#include "arolla/decision_forest/split_condition.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/fast_dynamic_downcast_final.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
bool HasOnlyIntervalSplitConditions(const DecisionTree& tree) {
for (const auto& split_node : tree.split_nodes) {
if (!fast_dynamic_downcast_final<const IntervalSplitCondition*>(
split_node.condition.get()))
return false;
}
return true;
}
absl::StatusOr<std::vector<int>> SplitTreesByGroups(
absl::Span<const DecisionTree> trees,
absl::Span<const ForestEvaluator::Output> outputs) {
if (outputs.empty()) {
return absl::InvalidArgumentError("at least one output is expected");
}
std::vector<int> tree2group(trees.size(), -1);
for (int group_id = 0; group_id < outputs.size(); ++group_id) {
for (int tree_id = 0; tree_id < trees.size(); ++tree_id) {
if (!outputs[group_id].filter(trees[tree_id].tag)) continue;
if (tree2group[tree_id] != -1) {
return absl::InvalidArgumentError(absl::StrFormat(
"intersection of groups for outputs #%d and #%d is not empty",
tree2group[tree_id], group_id));
}
tree2group[tree_id] = group_id;
}
}
return tree2group;
}
std::optional<SplitCondition::InputSignature> GetSingleInputSignature(
const DecisionTree& tree) {
std::optional<SplitCondition::InputSignature> input_signature;
for (const auto& node : tree.split_nodes) {
auto signatures = node.condition->GetInputSignatures();
if (signatures.size() != 1 ||
(input_signature && input_signature->id != signatures[0].id)) {
return std::nullopt;
}
input_signature = signatures[0];
}
return input_signature;
}
}
class ForestEvaluator::RegularPredictorsBuilder {
public:
RegularPredictorsBuilder(int group_count,
absl::Span<const TypedSlot> input_slots)
: group_count_(group_count),
input_slots_(input_slots.begin(), input_slots.end()),
universal_compilers_(group_count),
interval_splits_compilers_(group_count) {}
absl::Status AddTree(const DecisionTree& tree, int group_id) {
if (HasOnlyIntervalSplitConditions(tree)) {
return AddTreeToRegularForestCompiler(
tree,
[this](const std::shared_ptr<const SplitCondition>& cond) {
auto interval_cond =
std::static_pointer_cast<const IntervalSplitCondition>(cond);
return IntervalBoundCondition::Create(interval_cond, input_slots_);
},
&interval_splits_compilers_[group_id]);
} else {
return AddTreeToRegularForestCompiler(
tree,
[this](const std::shared_ptr<const SplitCondition>& cond) {
return UniversalBoundCondition::Create(cond, input_slots_);
},
&universal_compilers_[group_id]);
}
}
absl::StatusOr<RegularPredictorsList> Build() && {
RegularPredictorsList res;
res.reserve(group_count_);
for (int i = 0; i < group_count_; ++i) {
ASSIGN_OR_RETURN(auto universal_predictor,
universal_compilers_[i].Compile());
ASSIGN_OR_RETURN(auto interval_splits_predictor,
interval_splits_compilers_[i].Compile());
res.push_back({std::move(universal_predictor),
std::move(interval_splits_predictor)});
}
return res;
}
private:
template <typename ForestCompiler, typename CreateConditionFunc>
absl::Status AddTreeToRegularForestCompiler(const DecisionTree& tree,
CreateConditionFunc create_cond,
ForestCompiler* forest_compiler) {
auto tree_compiler = forest_compiler->AddTree(
tree.split_nodes.size() + tree.adjustments.size(),
tree.tag.submodel_id);
for (int64_t id = 0; id < tree.split_nodes.size(); ++id) {
const auto& split_node = tree.split_nodes[id];
auto child_if_false = split_node.child_if_false.is_leaf()
? split_node.child_if_false.adjustment_index() +
tree.split_nodes.size()
: split_node.child_if_false.split_node_index();
auto child_if_true = split_node.child_if_true.is_leaf()
? split_node.child_if_true.adjustment_index() +
tree.split_nodes.size()
: split_node.child_if_true.split_node_index();
ASSIGN_OR_RETURN(auto cond, create_cond(split_node.condition));
RETURN_IF_ERROR(
tree_compiler.SetNode(id, child_if_true, child_if_false, cond));
}
for (int64_t i = 0; i < tree.adjustments.size(); ++i) {
RETURN_IF_ERROR(tree_compiler.SetLeaf(i + tree.split_nodes.size(),
tree.adjustments[i] * tree.weight));
}
return absl::OkStatus();
}
int group_count_;
std::vector<TypedSlot> input_slots_;
std::vector<PredictorCompiler<UniversalBoundCondition>> universal_compilers_;
std::vector<PredictorCompiler<IntervalBoundCondition>>
interval_splits_compilers_;
};
absl::StatusOr<ForestEvaluator> ForestEvaluator::Compile(
const DecisionForest& decision_forest,
absl::Span<const TypedSlot> input_slots, absl::Span<const Output> outputs,
CompilationParams params) {
ASSIGN_OR_RETURN(auto tree2group,
SplitTreesByGroups(decision_forest.GetTrees(), outputs));
std::vector<FrameLayout::Slot<float>> output_slots;
output_slots.reserve(outputs.size());
for (const auto& output : outputs) {
output_slots.push_back(output.slot);
}
RegularPredictorsBuilder regular_builder(outputs.size(), input_slots);
BitmaskBuilder bitmask_builder(input_slots, output_slots);
SingleInputBuilder single_input_builder(input_slots, output_slots);
std::vector<std::map<int, double>> consts(outputs.size());
if (tree2group.size() != decision_forest.GetTrees().size()) {
return absl::InternalError("size of tree2group doesn't match trees");
}
for (size_t i = 0; i < decision_forest.GetTrees().size(); ++i) {
if (tree2group[i] == -1) {
continue;
}
if (tree2group[i] < 0 || tree2group[i] >= outputs.size()) {
return absl::InternalError("invalid tree2group mapping");
}
const DecisionTree& tree = decision_forest.GetTrees()[i];
if (params.enable_regular_eval && tree.split_nodes.empty()) {
consts[tree2group[i]][tree.tag.submodel_id] +=
tree.adjustments[0] * tree.weight;
continue;
}
if (params.enable_single_input_eval) {
if (std::optional<SplitCondition::InputSignature> input_signature =
GetSingleInputSignature(tree)) {
if (single_input_builder.IsInputTypeSupported(input_signature->type)) {
RETURN_IF_ERROR(single_input_builder.AddTree(tree, *input_signature,
tree2group[i]));
continue;
}
}
}
if (params.enable_bitmask_eval &&
std::all_of(tree.split_nodes.begin(), tree.split_nodes.end(),
BitmaskBuilder::IsSplitNodeSupported)) {
auto oblivious = ToObliviousTree(tree);
if (oblivious.has_value() && (oblivious->layer_splits.size() <=
BitmaskBuilder::kMaxRegionsForBitmask)) {
bitmask_builder.AddObliviousTree(*std::move(oblivious), tree2group[i]);
continue;
}
if (tree.adjustments.size() <= BitmaskBuilder::kMaxRegionsForBitmask) {
bitmask_builder.AddSmallTree(tree, tree2group[i]);
continue;
}
}
if (params.enable_regular_eval) {
RETURN_IF_ERROR(regular_builder.AddTree(tree, tree2group[i]));
} else {
return absl::InvalidArgumentError(
"No suitable evaluator. Use enable_regular_eval=true.");
}
}
for (int group_id = 0; group_id < consts.size(); ++group_id) {
for (const auto& [submodel_id, value] : consts[group_id]) {
DecisionTree tree;
tree.adjustments = {static_cast<float>(value)};
tree.tag.submodel_id = submodel_id;
RETURN_IF_ERROR(regular_builder.AddTree(tree, group_id));
}
}
ASSIGN_OR_RETURN(auto regular_predictors, std::move(regular_builder).Build());
ASSIGN_OR_RETURN(auto bitmask_predictor, std::move(bitmask_builder).Build());
ASSIGN_OR_RETURN(auto single_input_predictor,
std::move(single_input_builder).Build());
return ForestEvaluator(std::move(output_slots), std::move(regular_predictors),
std::move(bitmask_predictor),
std::move(single_input_predictor));
}
void ForestEvaluator::Eval(const ConstFramePtr input_ctx,
FramePtr output_ctx) const {
for (size_t i = 0; i < output_slots_.size(); ++i) {
*output_ctx.GetMutable(output_slots_[i]) =
regular_predictors_[i].Predict(input_ctx);
}
if (bitmask_predictor_) {
bitmask_predictor_->IncrementalEval(input_ctx, output_ctx);
}
single_input_predictor_.IncrementalEval(input_ctx, output_ctx);
}
} | #include "arolla/decision_forest/pointwise_evaluation/forest_evaluator.h"
#include <cmath>
#include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/split_condition.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/decision_forest/split_conditions/set_of_values_split_condition.h"
#include "arolla/decision_forest/testing/test_util.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/bytes.h"
namespace arolla {
namespace {
using ::absl_testing::StatusIs;
using ::testing::HasSubstr;
constexpr float kInf = std::numeric_limits<float>::infinity();
constexpr auto S = DecisionTreeNodeId::SplitNodeId;
constexpr auto A = DecisionTreeNodeId::AdjustmentId;
const ForestEvaluator::CompilationParams kDefaultEval{
.enable_regular_eval = true,
.enable_bitmask_eval = true,
.enable_single_input_eval = true};
const ForestEvaluator::CompilationParams kRegularEval{
.enable_regular_eval = true,
.enable_bitmask_eval = false,
.enable_single_input_eval = false};
const ForestEvaluator::CompilationParams kBitmaskEval{
.enable_regular_eval = false,
.enable_bitmask_eval = true,
.enable_single_input_eval = false};
const ForestEvaluator::CompilationParams kSingleInputEval{
.enable_regular_eval = false,
.enable_bitmask_eval = false,
.enable_single_input_eval = true};
void FillArgs(FramePtr ctx, int row_id, absl::Span<const TypedSlot> slots) {}
template <typename T, typename... Tn>
void FillArgs(FramePtr frame, int row_id, absl::Span<const TypedSlot> slots,
const std::vector<OptionalValue<T>>& inputs1,
const std::vector<OptionalValue<Tn>>&... inputsN) {
auto slot = slots[0].ToSlot<OptionalValue<T>>().value();
frame.Set(slot, inputs1[row_id]);
FillArgs(frame, row_id, slots.subspan(1), inputsN...);
}
class SourceLocation {
public:
SourceLocation(int line, const char* filename)
: line_(line), file_name_(filename) {}
const char* file_name() { return file_name_.c_str(); }
constexpr int line() const { return line_; }
static SourceLocation current(int line = __builtin_LINE(),
const char* file_name = __builtin_FILE()) {
return SourceLocation(line, file_name);
}
private:
int line_ = 0;
std::string file_name_;
};
std::string ErrFormat(SourceLocation loc,
ForestEvaluator::CompilationParams params,
const std::string& msg, int row_id) {
return absl::StrFormat(
"%s Test at %s:%d, row_id=%d, params = "
"{enable_regular_eval=%d, enable_bitmask_eval=%d, "
"enable_single_input_eval=%d}",
msg, loc.file_name(), loc.line(), row_id, params.enable_regular_eval,
params.enable_bitmask_eval, params.enable_single_input_eval);
}
template <typename... T>
void TestCases(SourceLocation loc, const DecisionForest& forest,
absl::Span<const TreeFilter> groups,
ForestEvaluator::CompilationParams params,
absl::Span<const std::vector<float>> expected_outputs,
const std::vector<OptionalValue<T>>&... inputs) {
ASSERT_TRUE(((expected_outputs.size() == inputs.size()) && ...))
<< absl::StrCat(
"Input and output vector sizes are different: (",
absl::StrJoin({expected_outputs.size(), inputs.size()...}, ", "),
")");
std::vector<TypedSlot> input_slots;
std::vector<ForestEvaluator::Output> outputs;
FrameLayout::Builder layout_builder;
CreateSlotsForForest(forest, &layout_builder, &input_slots);
outputs.reserve(groups.size());
for (const TreeFilter& filter : groups) {
outputs.push_back({filter, layout_builder.AddSlot<float>()});
}
FrameLayout layout = std::move(layout_builder).Build();
ASSERT_OK_AND_ASSIGN(
auto evaluator,
ForestEvaluator::Compile(forest, input_slots, outputs, params));
MemoryAllocation alloc(&layout);
auto frame = alloc.frame();
for (int i = 0; i < expected_outputs.size(); ++i) {
FillArgs(frame, i, input_slots, inputs...);
evaluator.Eval(frame, frame);
for (int j = 0; j < outputs.size(); ++j) {
EXPECT_EQ(frame.Get(outputs[j].slot), expected_outputs[i][j])
<< ErrFormat(loc, params, "Incorrect output.", i);
}
}
}
void RandomTestAgainstReferenceImplementation(
SourceLocation loc, std::vector<DecisionTree> trees,
const std::vector<ForestEvaluator::CompilationParams>& params,
absl::BitGen* rnd) {
for (int i = 0; i < trees.size(); ++i) {
trees[i].tag.submodel_id = i % 4;
}
TreeFilter group0{.submodels{0, 3}};
TreeFilter group1{.submodels{1, 2}};
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees(std::move(trees)));
std::vector<TypedSlot> input_slots;
std::vector<ForestEvaluator::Output> outputs;
FrameLayout::Builder layout_builder;
CreateSlotsForForest(*forest, &layout_builder, &input_slots);
outputs.push_back({group0, layout_builder.AddSlot<float>()});
outputs.push_back({group1, layout_builder.AddSlot<float>()});
FrameLayout layout = std::move(layout_builder).Build();
std::vector<ForestEvaluator> evaluators;
for (auto p : params) {
ASSERT_OK_AND_ASSIGN(auto evaluator, ForestEvaluator::Compile(
*forest, input_slots, outputs, p));
evaluators.push_back(std::move(evaluator));
}
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
for (int item_id = 0; item_id < 15; ++item_id) {
for (auto slot : input_slots) {
ASSERT_OK(FillWithRandomValue(slot, frame, rnd,
0.25));
}
float reference_implementation_res0 =
DecisionForestNaiveEvaluation(*forest, frame, input_slots, group0);
float reference_implementation_res1 =
DecisionForestNaiveEvaluation(*forest, frame, input_slots, group1);
for (int eval_id = 0; eval_id < evaluators.size(); ++eval_id) {
const ForestEvaluator& evaluator = evaluators[eval_id];
frame.Set(outputs[0].slot, 0.0f);
frame.Set(outputs[1].slot, 0.0f);
evaluator.Eval(frame, frame);
EXPECT_FLOAT_EQ(reference_implementation_res0, frame.Get(outputs[0].slot))
<< ErrFormat(loc, params[eval_id], "Incorrect output #0 in Eval",
item_id);
EXPECT_FLOAT_EQ(reference_implementation_res1, frame.Get(outputs[1].slot))
<< ErrFormat(loc, params[eval_id], "Incorrect output #1 in Eval",
item_id);
}
}
}
TEST(ForestEvaluator, GroupsValidation) {
std::vector<DecisionTree> trees(3);
trees[0].tag.submodel_id = 3;
trees[0].adjustments = {1.0};
trees[1].tag.submodel_id = 2;
trees[1].adjustments = {1.0};
trees[2].tag.submodel_id = 1;
trees[2].adjustments = {1.0};
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees(std::move(trees)));
EXPECT_THAT(ForestEvaluator::Compile(*forest, {}, {}).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("at least one output is expected")));
auto fake_slot = FrameLayout::Slot<float>::UnsafeUninitializedSlot();
EXPECT_THAT(
ForestEvaluator::Compile(
*forest, {},
{ForestEvaluator::Output{{.submodels = {1, 3}}, fake_slot},
ForestEvaluator::Output{{.submodels = {2, 3}}, fake_slot}})
.status(),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr(
"intersection of groups for outputs #0 and #1 is not empty")));
EXPECT_OK(ForestEvaluator::Compile(
*forest, {},
{ForestEvaluator::Output{{.submodels = {1, 3}}, fake_slot},
ForestEvaluator::Output{{.submodels = {2}}, fake_slot}})
.status());
EXPECT_OK(ForestEvaluator::Compile(
*forest, {},
{ForestEvaluator::Output{{.submodels = {1}}, fake_slot},
ForestEvaluator::Output{{.submodels = {2}}, fake_slot}})
.status());
}
TEST(ForestEvaluator, EmptyForest) {
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({}));
for (auto params :
{kDefaultEval, kRegularEval, kBitmaskEval, kSingleInputEval}) {
TestCases<>(SourceLocation::current(), *forest,
{{.submodels = {0}}, {.submodels = {1}}}, params,
{{0.0, 0.0}});
}
}
TEST(ForestEvaluator, Constant) {
std::vector<DecisionTree> trees(2);
trees[0].tag = {.submodel_id = 0};
trees[0].adjustments = {1.5};
trees[1].tag = {.submodel_id = 1};
trees[1].adjustments = {2.5};
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees(std::move(trees)));
for (auto params : {kDefaultEval, kRegularEval, kBitmaskEval}) {
TestCases<>(SourceLocation::current(), *forest,
{{.submodels = {0}}, {.submodels = {1}}}, params,
{{1.5, 2.5}});
}
}
TEST(ForestEvaluator, SmallForest) {
std::vector<DecisionTree> trees(2);
trees[0].tag = {.submodel_id = 0};
trees[0].adjustments = {0.5, 1.5, 2.5, 3.5};
trees[0].split_nodes = {
{S(1), S(2), IntervalSplit(0, 1.5, kInf)},
{A(0), A(2), SetOfValuesSplit<int64_t>(1, {1, 2}, false)},
{A(1), A(3), IntervalSplit(0, -kInf, 10)}};
trees[1].tag = {.submodel_id = 1};
trees[1].adjustments = {-1.0, 1.0};
trees[1].split_nodes = {{A(0), A(1), IntervalSplit(0, 1, 5)}};
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees(std::move(trees)));
for (auto params : {kDefaultEval, kRegularEval, kBitmaskEval}) {
TestCases<float, int64_t>(
SourceLocation::current(), *forest,
{{.submodels = {0}}, {.submodels = {1}}}, params,
{{0.5, -1},
{2.5, -1},
{2.5, 1},
{3.5, 1},
{3.5, -1},
{1.5, -1},
{2.5, -1},
{0.5, -1}},
{0, 0, 1.2, 1.6, 7.0, 13.5, NAN, {}},
{3, 1, 1, 1, 1, 1, 1, {}});
}
}
TEST(ForestEvaluator, RangesSplits) {
DecisionTree tree;
tree.split_nodes = {{S(2), S(1), IntervalSplit(0, -1.0, 1.0)},
{A(1), A(2), IntervalSplit(0, 0.5, 0.5)},
{A(0), A(3), IntervalSplit(0, 2.5, 3.5)}};
tree.adjustments = {0.0, 1.0, 2.0, 3.0};
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({tree}));
for (auto params :
{kDefaultEval, kRegularEval, kBitmaskEval, kSingleInputEval}) {
TestCases<float>(
SourceLocation::current(), *forest, {{}}, params,
{{0}, {0}, {0}, {1}, {2}, {3}, {3}, {3}},
{{}, -5, 5, -1, 0.5, 2.5, 3.0, 3.5});
}
}
TEST(ForestEvaluator, EqualSplits) {
DecisionTree tree;
tree.split_nodes = {{S(2), S(1), IntervalSplit(0, 1.0, 1.0)},
{A(1), A(2), IntervalSplit(1, 5.0, 5.0)},
{A(0), A(3), IntervalSplit(1, -5.0, -5.0)}};
tree.adjustments = {0.0, 1.0, 2.0, 3.0};
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({tree}));
for (auto params : {kDefaultEval, kRegularEval, kBitmaskEval}) {
TestCases<float, float>(
SourceLocation::current(), *forest, {{}}, params,
{{0}, {0}, {0}, {1}, {1}, {2}, {3}, {3}},
{{}, 0.0, -5.0, 1.0, 1.0, 1.0, 0.0, {}},
{{}, {}, {}, {}, -5.0, +5.0, -5.0, -5.0});
}
}
TEST(ForestEvaluator, BytesInput) {
DecisionTree tree;
tree.split_nodes = {
{A(0), A(1), SetOfValuesSplit<Bytes>(0, {Bytes("X")}, false)}};
tree.adjustments = {0.0, 1.0};
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({tree}));
for (auto params : {kDefaultEval, kRegularEval}) {
TestCases<Bytes>(SourceLocation::current(), *forest, {{}},
params,
{{0}, {1}, {0}},
{{}, Bytes("X"), Bytes("Y")});
}
}
TEST(ForestEvaluator, BitmaskNotPossible) {
absl::BitGen rnd;
auto forest =
CreateRandomForest(&rnd, 10, true,
70, 70,
1);
std::vector<TypedSlot> slots;
FrameLayout::Builder layout_builder;
CreateSlotsForForest(*forest, &layout_builder, &slots);
EXPECT_THAT(
SimpleForestEvaluator::Compile(*forest, slots, kBitmaskEval),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("No suitable evaluator. Use enable_regular_eval=true.")));
}
TEST(ForestEvaluator, SingleInputEvalNotPossible) {
DecisionTree tree;
tree.split_nodes = {{S(2), S(1), IntervalSplit(0, 1.0, 1.0)},
{A(1), A(2), IntervalSplit(1, 5.0, 5.0)},
{A(0), A(3), IntervalSplit(1, -5.0, -5.0)}};
tree.adjustments = {0.0, 1.0, 2.0, 3.0};
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({tree}));
std::vector<TypedSlot> slots;
FrameLayout::Builder layout_builder;
CreateSlotsForForest(*forest, &layout_builder, &slots);
EXPECT_THAT(
SimpleForestEvaluator::Compile(*forest, slots, kSingleInputEval),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("No suitable evaluator. Use enable_regular_eval=true.")));
}
TEST(ForestEvaluator, ObliviousTree) {
DecisionTree tree;
std::vector<std::shared_ptr<SplitCondition>> conditions = {
IntervalSplit(0, -5, 5),
IntervalSplit(1, 0, kInf),
SetOfValuesSplit<int64_t>(2, {1, 2}, false),
IntervalSplit(3, -kInf, 3.0),
SetOfValuesSplit<int64_t>(4, {4, 2}, true),
IntervalSplit(5, -1, 7),
IntervalSplit(6, -kInf, -5)};
int layer_size = 1;
for (int layer = 0; layer < conditions.size(); ++layer) {
int layer_offset = tree.split_nodes.size() + layer_size;
for (int i = 0; i < layer_size; ++i) {
auto left =
(layer == conditions.size() - 1) ? A(i * 2) : S(layer_offset + i * 2);
auto right = (layer == conditions.size() - 1)
? A(i * 2 + 1)
: S(layer_offset + i * 2 + 1);
tree.split_nodes.push_back({left, right, conditions[layer]});
}
layer_size *= 2;
}
for (int i = 0; i < layer_size; ++i) {
tree.adjustments.push_back(i);
}
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({tree}));
for (auto params : {kDefaultEval, kRegularEval, kBitmaskEval}) {
TestCases<float, float, int64_t, float, int64_t, float, float>(
SourceLocation::current(), *forest, {{}}, params,
{{58}, {86}, {12}, {39}, {112}},
{{}, 3, -7, 15, -4},
{10, -1, {}, 25, 1},
{2, 1, 3, {}, 1},
{0, {}, -5, 8, 14},
{1, 2, {}, 4, 5},
{0, 4, -3, 7, {}},
{10, 5, -3, -8, {}});
}
}
TEST(ForestEvaluator, TestAgainstReferenceOnSmallTrees) {
absl::BitGen rnd;
std::vector<QTypePtr> types;
for (int input_id = 0; input_id < 10; input_id++) {
types.push_back(GetOptionalQType<float>());
}
for (int input_id = 10; input_id < 15; input_id++) {
types.push_back(GetOptionalQType<int64_t>());
}
for (int iteration = 0; iteration < 10; ++iteration) {
std::vector<DecisionTree> trees;
for (int i = 0; i < 10; ++i) {
int num_splits = absl::Uniform<int32_t>(rnd, 0, 32);
trees.push_back(
CreateRandomTree(&rnd, true, num_splits, &types));
}
RandomTestAgainstReferenceImplementation(
SourceLocation::current(), trees,
{kDefaultEval, kRegularEval, kBitmaskEval}, &rnd);
}
}
TEST(ForestEvaluator, TestAgainstReferenceOnSingleInputTrees) {
absl::BitGen rnd;
std::vector<QTypePtr> types;
for (int input_id = 0; input_id < 10; input_id++) {
types.push_back(GetOptionalQType<float>());
}
for (int input_id = 10; input_id < 15; input_id++) {
types.push_back(GetOptionalQType<int64_t>());
}
for (int iteration = 0; iteration < 10; ++iteration) {
std::vector<DecisionTree> trees;
for (int i = 0; i < 10; ++i) {
int num_splits = absl::Uniform<int32_t>(rnd, 1, 1024);
trees.push_back(
CreateRandomTree(&rnd, false, num_splits, &types));
}
RandomTestAgainstReferenceImplementation(
SourceLocation::current(), trees,
{kDefaultEval, kRegularEval, kSingleInputEval}, &rnd);
}
}
TEST(ForestEvaluator, TestAgainstReference) {
absl::BitGen rnd;
std::vector<QTypePtr> types;
for (int feature_id = 0; feature_id < 10; feature_id++) {
types.push_back(GetOptionalQType<float>());
}
for (int feature_id = 10; feature_id < 15; feature_id++) {
types.push_back(GetOptionalQType<int64_t>());
}
for (int iteration = 0; iteration < 5; ++iteration) {
std::vector<DecisionTree> trees;
for (int i = 0; i < 10; ++i) {
int num_splits = absl::Uniform<int32_t>(rnd, 0, 1024);
trees.push_back(
CreateRandomTree(&rnd, true, num_splits, &types));
}
for (int i = 0; i < 10; ++i) {
int num_splits = absl::Uniform<int32_t>(rnd, 0, 1024);
trees.push_back(
CreateRandomTree(&rnd, false, num_splits, &types));
}
for (int i = 0; i < 10; ++i) {
int num_splits = absl::Uniform<int32_t>(rnd, 0, 1024);
trees.push_back(CreateRandomFloatTree(
&rnd, 10, true, num_splits,
0.4, 0.4));
}
for (int i = 0; i < 10; ++i) {
int num_splits = absl::Uniform<int32_t>(rnd, 0, 32);
trees.push_back(
CreateRandomTree(&rnd, true, num_splits, &types));
}
for (int i = 0; i < 5; ++i) {
int depth = absl::Uniform<int32_t>(rnd, 1, 20);
trees.push_back(CreateRandomObliviousTree(&rnd, depth, &types));
}
RandomTestAgainstReferenceImplementation(
SourceLocation::current(), trees, {kDefaultEval, kRegularEval}, &rnd);
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/pointwise_evaluation/forest_evaluator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/pointwise_evaluation/forest_evaluator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
6a47b4ce-2bbe-45dc-a620-bf3980bd6292 | cpp | abseil/abseil-cpp | graphcycles | absl/synchronization/internal/graphcycles.cc | absl/synchronization/internal/graphcycles_test.cc | #include "absl/base/attributes.h"
#include "absl/base/internal/low_level_alloc.h"
#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
#include "absl/synchronization/internal/graphcycles.h"
#include <algorithm>
#include <array>
#include <cinttypes>
#include <limits>
#include "absl/base/internal/hide_ptr.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
namespace {
ABSL_CONST_INIT static absl::base_internal::SpinLock arena_mu(
absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
ABSL_CONST_INIT static base_internal::LowLevelAlloc::Arena* arena;
static void InitArenaIfNecessary() {
arena_mu.Lock();
if (arena == nullptr) {
arena = base_internal::LowLevelAlloc::NewArena(0);
}
arena_mu.Unlock();
}
static const uint32_t kInline = 8;
template <typename T>
class Vec {
public:
Vec() { Init(); }
~Vec() { Discard(); }
void clear() {
Discard();
Init();
}
bool empty() const { return size_ == 0; }
uint32_t size() const { return size_; }
T* begin() { return ptr_; }
T* end() { return ptr_ + size_; }
const T& operator[](uint32_t i) const { return ptr_[i]; }
T& operator[](uint32_t i) { return ptr_[i]; }
const T& back() const { return ptr_[size_-1]; }
void pop_back() { size_--; }
void push_back(const T& v) {
if (size_ == capacity_) Grow(size_ + 1);
ptr_[size_] = v;
size_++;
}
void resize(uint32_t n) {
if (n > capacity_) Grow(n);
size_ = n;
}
void fill(const T& val) {
for (uint32_t i = 0; i < size(); i++) {
ptr_[i] = val;
}
}
void MoveFrom(Vec<T>* src) {
if (src->ptr_ == src->space_) {
resize(src->size_);
std::copy_n(src->ptr_, src->size_, ptr_);
src->size_ = 0;
} else {
Discard();
ptr_ = src->ptr_;
size_ = src->size_;
capacity_ = src->capacity_;
src->Init();
}
}
private:
T* ptr_;
T space_[kInline];
uint32_t size_;
uint32_t capacity_;
void Init() {
ptr_ = space_;
size_ = 0;
capacity_ = kInline;
}
void Discard() {
if (ptr_ != space_) base_internal::LowLevelAlloc::Free(ptr_);
}
void Grow(uint32_t n) {
while (capacity_ < n) {
capacity_ *= 2;
}
size_t request = static_cast<size_t>(capacity_) * sizeof(T);
T* copy = static_cast<T*>(
base_internal::LowLevelAlloc::AllocWithArena(request, arena));
std::copy_n(ptr_, size_, copy);
Discard();
ptr_ = copy;
}
Vec(const Vec&) = delete;
Vec& operator=(const Vec&) = delete;
};
class NodeSet {
public:
NodeSet() { Init(); }
void clear() { Init(); }
bool contains(int32_t v) const { return table_[FindIndex(v)] == v; }
bool insert(int32_t v) {
uint32_t i = FindIndex(v);
if (table_[i] == v) {
return false;
}
if (table_[i] == kEmpty) {
occupied_++;
}
table_[i] = v;
if (occupied_ >= table_.size() - table_.size()/4) Grow();
return true;
}
void erase(int32_t v) {
uint32_t i = FindIndex(v);
if (table_[i] == v) {
table_[i] = kDel;
}
}
#define HASH_FOR_EACH(elem, eset) \
for (int32_t elem, _cursor = 0; (eset).Next(&_cursor, &elem); )
bool Next(int32_t* cursor, int32_t* elem) {
while (static_cast<uint32_t>(*cursor) < table_.size()) {
int32_t v = table_[static_cast<uint32_t>(*cursor)];
(*cursor)++;
if (v >= 0) {
*elem = v;
return true;
}
}
return false;
}
private:
enum : int32_t { kEmpty = -1, kDel = -2 };
Vec<int32_t> table_;
uint32_t occupied_;
static uint32_t Hash(int32_t a) { return static_cast<uint32_t>(a) * 41; }
uint32_t FindIndex(int32_t v) const {
const uint32_t mask = table_.size() - 1;
uint32_t i = Hash(v) & mask;
uint32_t deleted_index = 0;
bool seen_deleted_element = false;
while (true) {
int32_t e = table_[i];
if (v == e) {
return i;
} else if (e == kEmpty) {
return seen_deleted_element ? deleted_index : i;
} else if (e == kDel && !seen_deleted_element) {
deleted_index = i;
seen_deleted_element = true;
}
i = (i + 1) & mask;
}
}
void Init() {
table_.clear();
table_.resize(kInline);
table_.fill(kEmpty);
occupied_ = 0;
}
void Grow() {
Vec<int32_t> copy;
copy.MoveFrom(&table_);
occupied_ = 0;
table_.resize(copy.size() * 2);
table_.fill(kEmpty);
for (const auto& e : copy) {
if (e >= 0) insert(e);
}
}
NodeSet(const NodeSet&) = delete;
NodeSet& operator=(const NodeSet&) = delete;
};
inline GraphId MakeId(int32_t index, uint32_t version) {
GraphId g;
g.handle =
(static_cast<uint64_t>(version) << 32) | static_cast<uint32_t>(index);
return g;
}
inline int32_t NodeIndex(GraphId id) {
return static_cast<int32_t>(id.handle);
}
inline uint32_t NodeVersion(GraphId id) {
return static_cast<uint32_t>(id.handle >> 32);
}
struct Node {
int32_t rank;
uint32_t version;
int32_t next_hash;
bool visited;
uintptr_t masked_ptr;
NodeSet in;
NodeSet out;
int priority;
int nstack;
void* stack[40];
};
class PointerMap {
public:
explicit PointerMap(const Vec<Node*>* nodes) : nodes_(nodes) {
table_.fill(-1);
}
int32_t Find(void* ptr) {
auto masked = base_internal::HidePtr(ptr);
for (int32_t i = table_[Hash(ptr)]; i != -1;) {
Node* n = (*nodes_)[static_cast<uint32_t>(i)];
if (n->masked_ptr == masked) return i;
i = n->next_hash;
}
return -1;
}
void Add(void* ptr, int32_t i) {
int32_t* head = &table_[Hash(ptr)];
(*nodes_)[static_cast<uint32_t>(i)]->next_hash = *head;
*head = i;
}
int32_t Remove(void* ptr) {
auto masked = base_internal::HidePtr(ptr);
for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) {
int32_t index = *slot;
Node* n = (*nodes_)[static_cast<uint32_t>(index)];
if (n->masked_ptr == masked) {
*slot = n->next_hash;
n->next_hash = -1;
return index;
}
slot = &n->next_hash;
}
return -1;
}
private:
static constexpr uint32_t kHashTableSize = 262139;
const Vec<Node*>* nodes_;
std::array<int32_t, kHashTableSize> table_;
static uint32_t Hash(void* ptr) {
return reinterpret_cast<uintptr_t>(ptr) % kHashTableSize;
}
};
}
struct GraphCycles::Rep {
Vec<Node*> nodes_;
Vec<int32_t> free_nodes_;
PointerMap ptrmap_;
Vec<int32_t> deltaf_;
Vec<int32_t> deltab_;
Vec<int32_t> list_;
Vec<int32_t> merged_;
Vec<int32_t> stack_;
Rep() : ptrmap_(&nodes_) {}
};
static Node* FindNode(GraphCycles::Rep* rep, GraphId id) {
Node* n = rep->nodes_[static_cast<uint32_t>(NodeIndex(id))];
return (n->version == NodeVersion(id)) ? n : nullptr;
}
void GraphCycles::TestOnlyAddNodes(uint32_t n) {
uint32_t old_size = rep_->nodes_.size();
rep_->nodes_.resize(n);
for (auto i = old_size; i < n; ++i) {
rep_->nodes_[i] = nullptr;
}
}
GraphCycles::GraphCycles() {
InitArenaIfNecessary();
rep_ = new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Rep), arena))
Rep;
}
GraphCycles::~GraphCycles() {
for (auto* node : rep_->nodes_) {
if (node == nullptr) { continue; }
node->Node::~Node();
base_internal::LowLevelAlloc::Free(node);
}
rep_->Rep::~Rep();
base_internal::LowLevelAlloc::Free(rep_);
}
bool GraphCycles::CheckInvariants() const {
Rep* r = rep_;
NodeSet ranks;
for (uint32_t x = 0; x < r->nodes_.size(); x++) {
Node* nx = r->nodes_[x];
void* ptr = base_internal::UnhidePtr<void>(nx->masked_ptr);
if (ptr != nullptr && static_cast<uint32_t>(r->ptrmap_.Find(ptr)) != x) {
ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %" PRIu32 " %p",
x, ptr);
}
if (nx->visited) {
ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %" PRIu32, x);
}
if (!ranks.insert(nx->rank)) {
ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %" PRId32, nx->rank);
}
HASH_FOR_EACH(y, nx->out) {
Node* ny = r->nodes_[static_cast<uint32_t>(y)];
if (nx->rank >= ny->rank) {
ABSL_RAW_LOG(FATAL,
"Edge %" PRIu32 " ->%" PRId32
" has bad rank assignment %" PRId32 "->%" PRId32,
x, y, nx->rank, ny->rank);
}
}
}
return true;
}
GraphId GraphCycles::GetId(void* ptr) {
int32_t i = rep_->ptrmap_.Find(ptr);
if (i != -1) {
return MakeId(i, rep_->nodes_[static_cast<uint32_t>(i)]->version);
} else if (rep_->free_nodes_.empty()) {
Node* n =
new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Node), arena))
Node;
n->version = 1;
n->visited = false;
n->rank = static_cast<int32_t>(rep_->nodes_.size());
n->masked_ptr = base_internal::HidePtr(ptr);
n->nstack = 0;
n->priority = 0;
rep_->nodes_.push_back(n);
rep_->ptrmap_.Add(ptr, n->rank);
return MakeId(n->rank, n->version);
} else {
int32_t r = rep_->free_nodes_.back();
rep_->free_nodes_.pop_back();
Node* n = rep_->nodes_[static_cast<uint32_t>(r)];
n->masked_ptr = base_internal::HidePtr(ptr);
n->nstack = 0;
n->priority = 0;
rep_->ptrmap_.Add(ptr, r);
return MakeId(r, n->version);
}
}
void GraphCycles::RemoveNode(void* ptr) {
int32_t i = rep_->ptrmap_.Remove(ptr);
if (i == -1) {
return;
}
Node* x = rep_->nodes_[static_cast<uint32_t>(i)];
HASH_FOR_EACH(y, x->out) {
rep_->nodes_[static_cast<uint32_t>(y)]->in.erase(i);
}
HASH_FOR_EACH(y, x->in) {
rep_->nodes_[static_cast<uint32_t>(y)]->out.erase(i);
}
x->in.clear();
x->out.clear();
x->masked_ptr = base_internal::HidePtr<void>(nullptr);
if (x->version == std::numeric_limits<uint32_t>::max()) {
} else {
x->version++;
rep_->free_nodes_.push_back(i);
}
}
void* GraphCycles::Ptr(GraphId id) {
Node* n = FindNode(rep_, id);
return n == nullptr ? nullptr
: base_internal::UnhidePtr<void>(n->masked_ptr);
}
bool GraphCycles::HasNode(GraphId node) {
return FindNode(rep_, node) != nullptr;
}
bool GraphCycles::HasEdge(GraphId x, GraphId y) const {
Node* xn = FindNode(rep_, x);
return xn && FindNode(rep_, y) && xn->out.contains(NodeIndex(y));
}
void GraphCycles::RemoveEdge(GraphId x, GraphId y) {
Node* xn = FindNode(rep_, x);
Node* yn = FindNode(rep_, y);
if (xn && yn) {
xn->out.erase(NodeIndex(y));
yn->in.erase(NodeIndex(x));
}
}
static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound);
static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound);
static void Reorder(GraphCycles::Rep* r);
static void Sort(const Vec<Node*>&, Vec<int32_t>* delta);
static void MoveToList(
GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst);
bool GraphCycles::InsertEdge(GraphId idx, GraphId idy) {
Rep* r = rep_;
const int32_t x = NodeIndex(idx);
const int32_t y = NodeIndex(idy);
Node* nx = FindNode(r, idx);
Node* ny = FindNode(r, idy);
if (nx == nullptr || ny == nullptr) return true;
if (nx == ny) return false;
if (!nx->out.insert(y)) {
return true;
}
ny->in.insert(x);
if (nx->rank <= ny->rank) {
return true;
}
if (!ForwardDFS(r, y, nx->rank)) {
nx->out.erase(y);
ny->in.erase(x);
for (const auto& d : r->deltaf_) {
r->nodes_[static_cast<uint32_t>(d)]->visited = false;
}
return false;
}
BackwardDFS(r, x, ny->rank);
Reorder(r);
return true;
}
static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound) {
r->deltaf_.clear();
r->stack_.clear();
r->stack_.push_back(n);
while (!r->stack_.empty()) {
n = r->stack_.back();
r->stack_.pop_back();
Node* nn = r->nodes_[static_cast<uint32_t>(n)];
if (nn->visited) continue;
nn->visited = true;
r->deltaf_.push_back(n);
HASH_FOR_EACH(w, nn->out) {
Node* nw = r->nodes_[static_cast<uint32_t>(w)];
if (nw->rank == upper_bound) {
return false;
}
if (!nw->visited && nw->rank < upper_bound) {
r->stack_.push_back(w);
}
}
}
return true;
}
static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound) {
r->deltab_.clear();
r->stack_.clear();
r->stack_.push_back(n);
while (!r->stack_.empty()) {
n = r->stack_.back();
r->stack_.pop_back();
Node* nn = r->nodes_[static_cast<uint32_t>(n)];
if (nn->visited) continue;
nn->visited = true;
r->deltab_.push_back(n);
HASH_FOR_EACH(w, nn->in) {
Node* nw = r->nodes_[static_cast<uint32_t>(w)];
if (!nw->visited && lower_bound < nw->rank) {
r->stack_.push_back(w);
}
}
}
}
static void Reorder(GraphCycles::Rep* r) {
Sort(r->nodes_, &r->deltab_);
Sort(r->nodes_, &r->deltaf_);
r->list_.clear();
MoveToList(r, &r->deltab_, &r->list_);
MoveToList(r, &r->deltaf_, &r->list_);
r->merged_.resize(r->deltab_.size() + r->deltaf_.size());
std::merge(r->deltab_.begin(), r->deltab_.end(),
r->deltaf_.begin(), r->deltaf_.end(),
r->merged_.begin());
for (uint32_t i = 0; i < r->list_.size(); i++) {
r->nodes_[static_cast<uint32_t>(r->list_[i])]->rank = r->merged_[i];
}
}
static void Sort(const Vec<Node*>& nodes, Vec<int32_t>* delta) {
struct ByRank {
const Vec<Node*>* nodes;
bool operator()(int32_t a, int32_t b) const {
return (*nodes)[static_cast<uint32_t>(a)]->rank <
(*nodes)[static_cast<uint32_t>(b)]->rank;
}
};
ByRank cmp;
cmp.nodes = &nodes;
std::sort(delta->begin(), delta->end(), cmp);
}
static void MoveToList(
GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst) {
for (auto& v : *src) {
int32_t w = v;
v = r->nodes_[static_cast<uint32_t>(w)]->rank;
r->nodes_[static_cast<uint32_t>(w)]->visited = false;
dst->push_back(w);
}
}
int GraphCycles::FindPath(GraphId idx, GraphId idy, int max_path_len,
GraphId path[]) const {
Rep* r = rep_;
if (FindNode(r, idx) == nullptr || FindNode(r, idy) == nullptr) return 0;
const int32_t x = NodeIndex(idx);
const int32_t y = NodeIndex(idy);
int path_len = 0;
NodeSet seen;
r->stack_.clear();
r->stack_.push_back(x);
while (!r->stack_.empty()) {
int32_t n = r->stack_.back();
r->stack_.pop_back();
if (n < 0) {
path_len--;
continue;
}
if (path_len < max_path_len) {
path[path_len] =
MakeId(n, rep_->nodes_[static_cast<uint32_t>(n)]->version);
}
path_len++;
r->stack_.push_back(-1);
if (n == y) {
return path_len;
}
HASH_FOR_EACH(w, r->nodes_[static_cast<uint32_t>(n)]->out) {
if (seen.insert(w)) {
r->stack_.push_back(w);
}
}
}
return 0;
}
bool GraphCycles::IsReachable(GraphId x, GraphId y) const {
return FindPath(x, y, 0, nullptr) > 0;
}
void GraphCycles::UpdateStackTrace(GraphId id, int priority,
int (*get_stack_trace)(void** stack, int)) {
Node* n = FindNode(rep_, id);
if (n == nullptr || n->priority >= priority) {
return;
}
n->nstack = (*get_stack_trace)(n->stack, ABSL_ARRAYSIZE(n->stack));
n->priority = priority;
}
int GraphCycles::GetStackTrace(GraphId id, void*** ptr) {
Node* n = FindNode(rep_, id);
if (n == nullptr) {
*ptr = nullptr;
return 0;
} else {
*ptr = n->stack;
return n->nstack;
}
}
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/synchronization/internal/graphcycles.h"
#include <climits>
#include <map>
#include <random>
#include <unordered_set>
#include <utility>
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/macros.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
using Nodes = std::vector<int>;
struct Edge {
int from;
int to;
};
using Edges = std::vector<Edge>;
using RandomEngine = std::mt19937_64;
typedef std::map<int, GraphId> IdMap;
static GraphId Get(const IdMap& id, int num) {
auto iter = id.find(num);
return (iter == id.end()) ? InvalidGraphId() : iter->second;
}
static bool IsReachable(Edges *edges, int from, int to,
std::unordered_set<int> *seen) {
seen->insert(from);
if (from == to) return true;
for (const auto &edge : *edges) {
if (edge.from == from) {
if (edge.to == to) {
return true;
} else if (seen->find(edge.to) == seen->end() &&
IsReachable(edges, edge.to, to, seen)) {
return true;
}
}
}
return false;
}
static void PrintEdges(Edges *edges) {
LOG(INFO) << "EDGES (" << edges->size() << ")";
for (const auto &edge : *edges) {
int a = edge.from;
int b = edge.to;
LOG(INFO) << a << " " << b;
}
LOG(INFO) << "---";
}
static void PrintGCEdges(Nodes *nodes, const IdMap &id, GraphCycles *gc) {
LOG(INFO) << "GC EDGES";
for (int a : *nodes) {
for (int b : *nodes) {
if (gc->HasEdge(Get(id, a), Get(id, b))) {
LOG(INFO) << a << " " << b;
}
}
}
LOG(INFO) << "---";
}
static void PrintTransitiveClosure(Nodes *nodes, Edges *edges) {
LOG(INFO) << "Transitive closure";
for (int a : *nodes) {
for (int b : *nodes) {
std::unordered_set<int> seen;
if (IsReachable(edges, a, b, &seen)) {
LOG(INFO) << a << " " << b;
}
}
}
LOG(INFO) << "---";
}
static void PrintGCTransitiveClosure(Nodes *nodes, const IdMap &id,
GraphCycles *gc) {
LOG(INFO) << "GC Transitive closure";
for (int a : *nodes) {
for (int b : *nodes) {
if (gc->IsReachable(Get(id, a), Get(id, b))) {
LOG(INFO) << a << " " << b;
}
}
}
LOG(INFO) << "---";
}
static void CheckTransitiveClosure(Nodes *nodes, Edges *edges, const IdMap &id,
GraphCycles *gc) {
std::unordered_set<int> seen;
for (const auto &a : *nodes) {
for (const auto &b : *nodes) {
seen.clear();
bool gc_reachable = gc->IsReachable(Get(id, a), Get(id, b));
bool reachable = IsReachable(edges, a, b, &seen);
if (gc_reachable != reachable) {
PrintEdges(edges);
PrintGCEdges(nodes, id, gc);
PrintTransitiveClosure(nodes, edges);
PrintGCTransitiveClosure(nodes, id, gc);
LOG(FATAL) << "gc_reachable " << gc_reachable << " reachable "
<< reachable << " a " << a << " b " << b;
}
}
}
}
static void CheckEdges(Nodes *nodes, Edges *edges, const IdMap &id,
GraphCycles *gc) {
int count = 0;
for (const auto &edge : *edges) {
int a = edge.from;
int b = edge.to;
if (!gc->HasEdge(Get(id, a), Get(id, b))) {
PrintEdges(edges);
PrintGCEdges(nodes, id, gc);
LOG(FATAL) << "!gc->HasEdge(" << a << ", " << b << ")";
}
}
for (const auto &a : *nodes) {
for (const auto &b : *nodes) {
if (gc->HasEdge(Get(id, a), Get(id, b))) {
count++;
}
}
}
if (count != edges->size()) {
PrintEdges(edges);
PrintGCEdges(nodes, id, gc);
LOG(FATAL) << "edges->size() " << edges->size() << " count " << count;
}
}
static void CheckInvariants(const GraphCycles &gc) {
CHECK(gc.CheckInvariants()) << "CheckInvariants";
}
static int RandomNode(RandomEngine* rng, Nodes *nodes) {
std::uniform_int_distribution<int> uniform(0, nodes->size()-1);
return uniform(*rng);
}
static int RandomEdge(RandomEngine* rng, Edges *edges) {
std::uniform_int_distribution<int> uniform(0, edges->size()-1);
return uniform(*rng);
}
static int EdgeIndex(Edges *edges, int from, int to) {
int i = 0;
while (i != edges->size() &&
((*edges)[i].from != from || (*edges)[i].to != to)) {
i++;
}
return i == edges->size()? -1 : i;
}
TEST(GraphCycles, RandomizedTest) {
int next_node = 0;
Nodes nodes;
Edges edges;
IdMap id;
GraphCycles graph_cycles;
static const int kMaxNodes = 7;
static const int kDataOffset = 17;
int n = 100000;
int op = 0;
RandomEngine rng(testing::UnitTest::GetInstance()->random_seed());
std::uniform_int_distribution<int> uniform(0, 5);
auto ptr = [](intptr_t i) {
return reinterpret_cast<void*>(i + kDataOffset);
};
for (int iter = 0; iter != n; iter++) {
for (const auto &node : nodes) {
ASSERT_EQ(graph_cycles.Ptr(Get(id, node)), ptr(node)) << " node " << node;
}
CheckEdges(&nodes, &edges, id, &graph_cycles);
CheckTransitiveClosure(&nodes, &edges, id, &graph_cycles);
op = uniform(rng);
switch (op) {
case 0:
if (nodes.size() < kMaxNodes) {
int new_node = next_node++;
GraphId new_gnode = graph_cycles.GetId(ptr(new_node));
ASSERT_NE(new_gnode, InvalidGraphId());
id[new_node] = new_gnode;
ASSERT_EQ(ptr(new_node), graph_cycles.Ptr(new_gnode));
nodes.push_back(new_node);
}
break;
case 1:
if (nodes.size() > 0) {
int node_index = RandomNode(&rng, &nodes);
int node = nodes[node_index];
nodes[node_index] = nodes.back();
nodes.pop_back();
graph_cycles.RemoveNode(ptr(node));
ASSERT_EQ(graph_cycles.Ptr(Get(id, node)), nullptr);
id.erase(node);
int i = 0;
while (i != edges.size()) {
if (edges[i].from == node || edges[i].to == node) {
edges[i] = edges.back();
edges.pop_back();
} else {
i++;
}
}
}
break;
case 2:
if (nodes.size() > 0) {
int from = RandomNode(&rng, &nodes);
int to = RandomNode(&rng, &nodes);
if (EdgeIndex(&edges, nodes[from], nodes[to]) == -1) {
if (graph_cycles.InsertEdge(id[nodes[from]], id[nodes[to]])) {
Edge new_edge;
new_edge.from = nodes[from];
new_edge.to = nodes[to];
edges.push_back(new_edge);
} else {
std::unordered_set<int> seen;
ASSERT_TRUE(IsReachable(&edges, nodes[to], nodes[from], &seen))
<< "Edge " << nodes[to] << "->" << nodes[from];
}
}
}
break;
case 3:
if (edges.size() > 0) {
int i = RandomEdge(&rng, &edges);
int from = edges[i].from;
int to = edges[i].to;
ASSERT_EQ(i, EdgeIndex(&edges, from, to));
edges[i] = edges.back();
edges.pop_back();
ASSERT_EQ(-1, EdgeIndex(&edges, from, to));
graph_cycles.RemoveEdge(id[from], id[to]);
}
break;
case 4:
if (nodes.size() > 0) {
int from = RandomNode(&rng, &nodes);
int to = RandomNode(&rng, &nodes);
GraphId path[2*kMaxNodes];
int path_len = graph_cycles.FindPath(id[nodes[from]], id[nodes[to]],
ABSL_ARRAYSIZE(path), path);
std::unordered_set<int> seen;
bool reachable = IsReachable(&edges, nodes[from], nodes[to], &seen);
bool gc_reachable =
graph_cycles.IsReachable(Get(id, nodes[from]), Get(id, nodes[to]));
ASSERT_EQ(path_len != 0, reachable);
ASSERT_EQ(path_len != 0, gc_reachable);
ASSERT_LE(path_len, kMaxNodes + 1);
if (path_len != 0) {
ASSERT_EQ(id[nodes[from]], path[0]);
ASSERT_EQ(id[nodes[to]], path[path_len-1]);
for (int i = 1; i < path_len; i++) {
ASSERT_TRUE(graph_cycles.HasEdge(path[i-1], path[i]));
}
}
}
break;
case 5:
CheckInvariants(graph_cycles);
break;
default:
LOG(FATAL) << "op " << op;
}
std::bernoulli_distribution one_in_1024(1.0 / 1024);
if (one_in_1024(rng)) {
CheckEdges(&nodes, &edges, id, &graph_cycles);
CheckTransitiveClosure(&nodes, &edges, id, &graph_cycles);
for (int i = 0; i != 256; i++) {
int new_node = next_node++;
GraphId new_gnode = graph_cycles.GetId(ptr(new_node));
ASSERT_NE(InvalidGraphId(), new_gnode);
id[new_node] = new_gnode;
ASSERT_EQ(ptr(new_node), graph_cycles.Ptr(new_gnode));
for (const auto &node : nodes) {
ASSERT_NE(node, new_node);
}
nodes.push_back(new_node);
}
for (int i = 0; i != 256; i++) {
ASSERT_GT(nodes.size(), 0);
int node_index = RandomNode(&rng, &nodes);
int node = nodes[node_index];
nodes[node_index] = nodes.back();
nodes.pop_back();
graph_cycles.RemoveNode(ptr(node));
id.erase(node);
int j = 0;
while (j != edges.size()) {
if (edges[j].from == node || edges[j].to == node) {
edges[j] = edges.back();
edges.pop_back();
} else {
j++;
}
}
}
CheckInvariants(graph_cycles);
}
}
}
class GraphCyclesTest : public ::testing::Test {
public:
IdMap id_;
GraphCycles g_;
static void* Ptr(int i) {
return reinterpret_cast<void*>(static_cast<uintptr_t>(i));
}
static int Num(void* ptr) {
return static_cast<int>(reinterpret_cast<uintptr_t>(ptr));
}
GraphCyclesTest() {
for (int i = 0; i < 100; i++) {
id_[i] = g_.GetId(Ptr(i));
}
CheckInvariants(g_);
}
bool AddEdge(int x, int y) {
return g_.InsertEdge(Get(id_, x), Get(id_, y));
}
void AddMultiples() {
for (int x = 1; x < 25; x++) {
EXPECT_TRUE(AddEdge(x, 2*x)) << x;
EXPECT_TRUE(AddEdge(x, 3*x)) << x;
}
CheckInvariants(g_);
}
std::string Path(int x, int y) {
GraphId path[5];
int np = g_.FindPath(Get(id_, x), Get(id_, y), ABSL_ARRAYSIZE(path), path);
std::string result;
for (int i = 0; i < np; i++) {
if (i >= ABSL_ARRAYSIZE(path)) {
result += " ...";
break;
}
if (!result.empty()) result.push_back(' ');
char buf[20];
snprintf(buf, sizeof(buf), "%d", Num(g_.Ptr(path[i])));
result += buf;
}
return result;
}
};
TEST_F(GraphCyclesTest, NoCycle) {
AddMultiples();
CheckInvariants(g_);
}
TEST_F(GraphCyclesTest, SimpleCycle) {
AddMultiples();
EXPECT_FALSE(AddEdge(8, 4));
EXPECT_EQ("4 8", Path(4, 8));
CheckInvariants(g_);
}
TEST_F(GraphCyclesTest, IndirectCycle) {
AddMultiples();
EXPECT_TRUE(AddEdge(16, 9));
CheckInvariants(g_);
EXPECT_FALSE(AddEdge(9, 2));
EXPECT_EQ("2 4 8 16 9", Path(2, 9));
CheckInvariants(g_);
}
TEST_F(GraphCyclesTest, LongPath) {
ASSERT_TRUE(AddEdge(2, 4));
ASSERT_TRUE(AddEdge(4, 6));
ASSERT_TRUE(AddEdge(6, 8));
ASSERT_TRUE(AddEdge(8, 10));
ASSERT_TRUE(AddEdge(10, 12));
ASSERT_FALSE(AddEdge(12, 2));
EXPECT_EQ("2 4 6 8 10 ...", Path(2, 12));
CheckInvariants(g_);
}
TEST_F(GraphCyclesTest, RemoveNode) {
ASSERT_TRUE(AddEdge(1, 2));
ASSERT_TRUE(AddEdge(2, 3));
ASSERT_TRUE(AddEdge(3, 4));
ASSERT_TRUE(AddEdge(4, 5));
g_.RemoveNode(g_.Ptr(id_[3]));
id_.erase(3);
ASSERT_TRUE(AddEdge(5, 1));
}
TEST_F(GraphCyclesTest, ManyEdges) {
const int N = 50;
for (int i = 0; i < N; i++) {
for (int j = 1; j < N; j++) {
ASSERT_TRUE(AddEdge(i, i+j));
}
}
CheckInvariants(g_);
ASSERT_TRUE(AddEdge(2*N-1, 0));
CheckInvariants(g_);
ASSERT_FALSE(AddEdge(10, 9));
CheckInvariants(g_);
}
TEST(GraphCycles, IntegerOverflow) {
GraphCycles graph_cycles;
char *buf = (char *)nullptr;
GraphId prev_id = graph_cycles.GetId(buf);
buf += 1;
GraphId id = graph_cycles.GetId(buf);
ASSERT_TRUE(graph_cycles.InsertEdge(prev_id, id));
graph_cycles.TestOnlyAddNodes(INT_MAX / 40);
buf += 1;
GraphId newid = graph_cycles.GetId(buf);
graph_cycles.HasEdge(prev_id, newid);
graph_cycles.RemoveNode(buf);
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/synchronization/internal/graphcycles.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/synchronization/internal/graphcycles_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
711898fb-a9b8-4620-bf51-767c85a01c3c | cpp | tensorflow/tensorflow | const_op | tensorflow/compiler/tf2xla/kernels/const_op.cc | tensorflow/cc/ops/const_op_test.cc | #include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/xla_builder.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
namespace {
template <typename DstT, typename SrcT>
DstT CastTo(SrcT src) {
return static_cast<DstT>(src);
}
template <typename DstT,
typename std::enable_if<std::is_same<DstT, Eigen::half>::value ||
std::is_same<DstT, bfloat16>::value>::type* =
nullptr>
DstT CastTo(int32_t src) {
return absl::bit_cast<DstT>(static_cast<uint16>(src));
}
xla::XlaOp GetScalarConst(const TensorProto& proto, xla::XlaBuilder* b) {
if (!proto.tensor_content().empty()) return xla::XlaOp();
TensorShape shape(proto.tensor_shape());
if (shape.num_elements() > 1) {
switch (proto.dtype()) {
#define HANDLE_SPLAT(DTYPE, field_name, xla_type) \
case DTYPE: \
if (proto.field_name##_val_size() == 0) { \
return xla::ConstantR0(b, CastTo<xla_type>(0)); \
} else if (proto.field_name##_val_size() == 1) { \
return xla::ConstantR0(b, CastTo<xla_type>(proto.field_name##_val(0))); \
} \
break;
HANDLE_SPLAT(DT_BOOL, bool, bool);
HANDLE_SPLAT(DT_INT8, int, int8_t);
HANDLE_SPLAT(DT_INT16, int, int16_t);
HANDLE_SPLAT(DT_INT32, int, int32_t);
HANDLE_SPLAT(DT_INT64, int64, int64_t);
HANDLE_SPLAT(DT_UINT8, int, uint8_t);
HANDLE_SPLAT(DT_UINT16, int, uint16_t);
HANDLE_SPLAT(DT_UINT32, uint32, uint32_t);
HANDLE_SPLAT(DT_UINT64, uint64, uint64_t);
HANDLE_SPLAT(DT_FLOAT, float, float);
HANDLE_SPLAT(DT_DOUBLE, double, double);
HANDLE_SPLAT(DT_BFLOAT16, half, bfloat16);
HANDLE_SPLAT(DT_HALF, half, Eigen::half);
#undef HANDLE_SPLAT
#define HANDLE_COMPLEX_SPLAT(DTYPE, field_name, xla_type) \
case DTYPE: \
if (proto.field_name##_val_size() == 2) { \
return xla::ConstantR0<xla_type>( \
b, xla_type(proto.field_name##_val(0), proto.field_name##_val(1))); \
} \
break;
HANDLE_COMPLEX_SPLAT(DT_COMPLEX64, scomplex, xla::complex64);
HANDLE_COMPLEX_SPLAT(DT_COMPLEX128, dcomplex, xla::complex128);
#undef HANDLE_COMPLEXSPLAT
default:
break;
}
}
return xla::XlaOp();
}
class ConstOp : public XlaOpKernel {
public:
explicit ConstOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
const TensorProto* proto = nullptr;
OP_REQUIRES_OK(ctx, ctx->GetAttr("value", &proto));
proto_ = *proto;
OP_REQUIRES(
ctx, ctx->output_type(0) == proto_.dtype(),
errors::InvalidArgument("Type mismatch between value (",
DataTypeString(proto_.dtype()), ") and dtype (",
DataTypeString(ctx->output_type(0)), ")"));
OP_REQUIRES_OK(ctx, TensorShape::IsValidShape(proto_.tensor_shape()));
}
void Compile(XlaOpKernelContext* ctx) override {
xla::XlaBuilder* b = ctx->builder();
TensorShape shape(proto_.tensor_shape());
if (shape.num_elements() > 1) {
xla::XlaOp value = GetScalarConst(proto_, b);
if (value.valid()) {
ctx->SetOutput(0, xla::Broadcast(value, shape.dim_sizes()));
return;
}
}
Tensor tensor(proto_.dtype());
OP_REQUIRES(ctx, tensor.FromProto(cpu_allocator(), proto_),
errors::InvalidArgument("Cannot parse tensor from proto: ",
proto_.DebugString()));
ctx->SetConstantOutput(0, tensor);
}
private:
TensorProto proto_;
ConstOp(const ConstOp&) = delete;
void operator=(const ConstOp&) = delete;
};
REGISTER_XLA_OP(Name("Const").CompilationOnly(), ConstOp);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
template <typename T>
void ExpectNodeEqual(const Node* n, gtl::ArraySlice<T> values,
TensorShape shape) {
EXPECT_TRUE(n->IsConstant());
Tensor tensor;
TF_EXPECT_OK(GetNodeAttr(n->attrs(), "value", &tensor));
DataType dtype;
TF_EXPECT_OK(GetNodeAttr(n->attrs(), "dtype", &dtype));
EXPECT_EQ(tensor.dtype(), dtype);
test::ExpectTensorEqual<T>(tensor, test::AsTensor(values, shape));
}
void ExpectTypeAndShape(const Node* n, DataType expected_dtype,
TensorShape expected_shape) {
EXPECT_TRUE(n->IsConstant());
Tensor tensor;
TF_EXPECT_OK(GetNodeAttr(n->attrs(), "value", &tensor));
DataType dtype;
TF_EXPECT_OK(GetNodeAttr(n->attrs(), "dtype", &dtype));
EXPECT_EQ(dtype, expected_dtype);
EXPECT_EQ(expected_shape, TensorShape(tensor.shape()));
}
}
TEST(ConstOpTest, Basic) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, 42.0f);
TF_EXPECT_OK(root.status());
EXPECT_EQ(c.op().output_type(0), DT_FLOAT);
ExpectNodeEqual<float>(c.node(), {42.0f}, {});
}
TEST(ConstOpTest, MultiDim) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, {{2.0}, {3.0}});
TF_CHECK_OK(root.status());
EXPECT_EQ(c.op().output_type(0), DT_DOUBLE);
ExpectNodeEqual<double>(c.node(), {2.0, 3.0}, {2, 1});
}
TEST(ConstOpTest, Empty) {
Scope root = Scope::NewRootScope();
auto c1 = ops::Const(root, {});
TF_CHECK_OK(root.status());
ExpectTypeAndShape(c1.node(), DT_FLOAT, {0});
auto c2 = ops::Const(root, {{}});
TF_CHECK_OK(root.status());
ExpectTypeAndShape(c2.node(), DT_FLOAT, {1, 0});
auto c3 = ops::Const(root, {{{}, {}}});
TF_CHECK_OK(root.status());
ExpectTypeAndShape(c3.node(), DT_FLOAT, {1, 2, 0});
auto c4 = ops::Const<int>(root, {{{}}});
TF_CHECK_OK(root.status());
ExpectTypeAndShape(c4.node(), DT_INT32, {1, 1, 0});
ops::Const(root, {{}, {{}}});
EXPECT_FALSE(root.status().ok());
}
TEST(ConstOpTest, WithExplicitShape) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, 42.0, {2, 2});
TF_CHECK_OK(root.status());
EXPECT_EQ(c.op().output_type(0), DT_DOUBLE);
ExpectNodeEqual<double>(c.node(), {42.0, 42.0, 42.0, 42.0}, {2, 2});
auto d = ops::Const(root, {"1", "2", "3", "4", "5", "6"}, {2, 3});
TF_CHECK_OK(root.status());
EXPECT_EQ(d.op().output_type(0), DT_STRING);
ExpectNodeEqual<tstring>(d.node(), {"1", "2", "3", "4", "5", "6"}, {2, 3});
}
TEST(ConstOpTest, FromProto) {
Scope root = Scope::NewRootScope();
TensorProto proto;
proto.set_dtype(DT_DOUBLE);
TensorShape({2, 2}).AsProto(proto.mutable_tensor_shape());
for (int i = 0; i < 4; ++i) {
proto.add_double_val(static_cast<double>(i));
}
auto c = ops::ConstFromProto(root, proto);
TF_CHECK_OK(root.status());
EXPECT_EQ(c.op().output_type(0), DT_DOUBLE);
ExpectNodeEqual<double>(c.node(), {0.0, 1.0, 2.0, 3.0}, {2, 2});
}
TEST(ConstOpTest, InvalidInitializer) {
Scope root = Scope::NewRootScope();
ops::Const(root, {{2.0}, {"df"}});
EXPECT_FALSE(root.status().ok());
}
TEST(ConstOpTest, Names) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, {{2.0}, {3.0}});
EXPECT_EQ(c.node()->name(), "Const");
auto c_1 = ops::Const(root, {{2.0}, {3.0}});
EXPECT_EQ(c_1.node()->name(), "Const_1");
auto x = ops::Const(root.WithOpName("x"), 1);
EXPECT_EQ(x.node()->name(), "x");
auto x_1 = ops::Const(root.WithOpName("x"), 1);
EXPECT_EQ(x_1.node()->name(), "x_1");
Scope child = root.NewSubScope("c");
auto c_y = ops::Const(child.WithOpName("y"), 1);
EXPECT_EQ(c_y.node()->name(), "c/y");
auto c_y_1 = ops::Const(child.WithOpName("y"), 1);
EXPECT_EQ(c_y_1.node()->name(), "c/y_1");
}
TEST(ConstOpTest, TemplatedConst) {
Scope root = Scope::NewRootScope();
auto c1 = ops::Const<int>(root, {1, 2});
ExpectTypeAndShape(c1.node(), DT_INT32, {2});
auto c2 = ops::Const<tstring>(root, {{"this"}, {"is"}, {"a"}, {"constant"}});
ExpectTypeAndShape(c2.node(), DT_STRING, {4, 1});
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/const_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/ops/const_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d937813a-ec6a-45e2-9277-8cabb12ff3bb | cpp | tensorflow/tensorflow | host_tracer | third_party/xla/xla/backends/profiler/cpu/host_tracer.cc | third_party/xla/xla/backends/profiler/cpu/host_tracer_test.cc | #include "xla/backends/profiler/cpu/host_tracer.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "xla/tsl/profiler/backends/cpu/host_tracer_utils.h"
#include "xla/tsl/profiler/backends/cpu/threadpool_listener.h"
#include "xla/tsl/profiler/backends/cpu/traceme_recorder.h"
#include "xla/tsl/profiler/utils/time_utils.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/profiler/lib/profiler_collection.h"
#include "tsl/profiler/lib/profiler_interface.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace xla {
namespace profiler {
namespace {
class HostTracer : public tsl::profiler::ProfilerInterface {
public:
explicit HostTracer(int host_trace_level);
~HostTracer() override;
absl::Status Start() override;
absl::Status Stop() override;
absl::Status CollectData(
tensorflow::profiler::XSpace* space) override;
private:
const int host_trace_level_;
bool recording_ = false;
uint64_t start_timestamp_ns_ = 0;
tsl::profiler::TraceMeRecorder::Events events_;
};
HostTracer::HostTracer(int host_trace_level)
: host_trace_level_(host_trace_level) {}
HostTracer::~HostTracer() { Stop().IgnoreError(); }
absl::Status HostTracer::Start() {
if (recording_) {
return tsl::errors::Internal("TraceMeRecorder already started");
}
start_timestamp_ns_ = tsl::profiler::GetCurrentTimeNanos();
recording_ = tsl::profiler::TraceMeRecorder::Start(host_trace_level_);
if (!recording_) {
return tsl::errors::Internal("Failed to start TraceMeRecorder");
}
return absl::OkStatus();
}
absl::Status HostTracer::Stop() {
if (!recording_) {
return tsl::errors::Internal("TraceMeRecorder not started");
}
events_ = tsl::profiler::TraceMeRecorder::Stop();
recording_ = false;
return absl::OkStatus();
}
absl::Status HostTracer::CollectData(
tensorflow::profiler::XSpace* space) {
VLOG(2) << "Collecting data to XSpace from HostTracer.";
if (recording_) {
return tsl::errors::Internal("TraceMeRecorder not stopped");
}
if (events_.empty()) {
return absl::OkStatus();
}
tensorflow::profiler::XPlane* plane =
tsl::profiler::FindOrAddMutablePlaneWithName(
space, tsl::profiler::kHostThreadsPlaneName);
ConvertCompleteEventsToXPlane(start_timestamp_ns_, std::exchange(events_, {}),
plane);
return absl::OkStatus();
}
}
std::unique_ptr<tsl::profiler::ProfilerInterface> CreateHostTracer(
const HostTracerOptions& options) {
if (options.trace_level == 0) return nullptr;
std::vector<std::unique_ptr<tsl::profiler::ProfilerInterface>> profilers;
profilers.push_back(std::make_unique<HostTracer>(options.trace_level));
profilers.push_back(
std::make_unique<tsl::profiler::ThreadpoolProfilerInterface>());
return std::make_unique<tsl::profiler::ProfilerCollection>(
std::move(profilers));
}
}
} | #include "xla/backends/profiler/cpu/host_tracer.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <gtest/gtest.h>
#include "absl/types/optional.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/lib/profiler_interface.h"
#include "tsl/profiler/lib/traceme.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace xla {
namespace profiler {
namespace {
using ::tsl::Env;
using ::tsl::Thread;
using ::tsl::ThreadOptions;
using ::tsl::profiler::StatType;
using ::tsl::profiler::Timespan;
using ::tsl::profiler::TraceMe;
using ::tsl::profiler::XEventVisitor;
using ::tsl::profiler::XLineVisitor;
using ::tsl::profiler::XPlaneVisitor;
using ::tsl::profiler::XStatVisitor;
TEST(HostTracerTest, CollectsTraceMeEventsAsXSpace) {
tsl::uint32 thread_id;
std::string thread_name = "MyThreadName";
tensorflow::profiler::XSpace space;
std::unique_ptr<Thread> traced_thread(
Env::Default()->StartThread(ThreadOptions(), thread_name, [&] {
ASSERT_TRUE(Env::Default()->GetCurrentThreadName(&thread_name));
thread_id = Env::Default()->GetCurrentThreadId();
auto tracer = CreateHostTracer({});
TF_ASSERT_OK(tracer->Start());
{ TraceMe traceme("hello"); }
{ TraceMe traceme("world"); }
{ TraceMe traceme("contains#inside"); }
{ TraceMe traceme("good#key1=value1#"); }
{ TraceMe traceme("morning#key1=value1,key2=value2#"); }
{ TraceMe traceme("incomplete#key1=value1,key2#"); }
{ TraceMe traceme("Iterator::XXX::YYY::ParallelMap"); }
TF_ASSERT_OK(tracer->Stop());
TF_ASSERT_OK(tracer->CollectData(&space));
}));
traced_thread.reset();
ASSERT_NO_FATAL_FAILURE();
ASSERT_EQ(space.planes_size(), 1);
const auto& plane = space.planes(0);
XPlaneVisitor xplane(&plane);
ASSERT_EQ(plane.name(), ::tsl::profiler::kHostThreadsPlaneName);
ASSERT_EQ(plane.lines_size(), 1);
ASSERT_EQ(plane.event_metadata_size(), 7);
ASSERT_EQ(plane.stat_metadata_size(), 4);
const auto& line = plane.lines(0);
EXPECT_EQ(line.id(), thread_id);
EXPECT_EQ(line.name(), thread_name);
ASSERT_EQ(line.events_size(), 7);
const auto& events = line.events();
XEventVisitor e0(&xplane, &line, &events[0]);
EXPECT_EQ(e0.Name(), "hello");
ASSERT_EQ(events[0].stats_size(), 0);
XEventVisitor e1(&xplane, &line, &events[1]);
EXPECT_EQ(e1.Name(), "world");
ASSERT_EQ(events[1].stats_size(), 0);
XEventVisitor e2(&xplane, &line, &events[2]);
EXPECT_EQ(e2.Name(), "contains#inside");
ASSERT_EQ(events[2].stats_size(), 0);
XEventVisitor e3(&xplane, &line, &events[3]);
EXPECT_EQ(e3.Name(), "good");
ASSERT_EQ(events[3].stats_size(), 1);
{
std::optional<std::string> value;
e3.ForEachStat([&](const XStatVisitor& stat) {
if (stat.Name() == "key1") value = stat.ToString();
});
ASSERT_TRUE(value);
EXPECT_EQ(*value, "value1");
}
XEventVisitor e4(&xplane, &line, &events[4]);
EXPECT_EQ(e4.Name(), "morning");
ASSERT_EQ(events[4].stats_size(), 2);
{
std::optional<std::string> value1, value2;
e4.ForEachStat([&](const XStatVisitor& stat) {
if (stat.Name() == "key1") {
value1 = stat.ToString();
} else if (stat.Name() == "key2") {
value2 = stat.ToString();
}
});
ASSERT_TRUE(value1 && value2);
EXPECT_EQ(*value1, "value1");
EXPECT_EQ(*value2, "value2");
}
XEventVisitor e5(&xplane, &line, &events[5]);
EXPECT_EQ(e5.Name(), "incomplete");
ASSERT_EQ(events[5].stats_size(), 1);
{
std::optional<std::string> value1, value2;
e5.ForEachStat([&](const XStatVisitor& stat) {
if (stat.Name() == "key1") {
value1 = stat.ToString();
} else if (stat.Name() == "key2") {
value2 = stat.ToString();
}
});
ASSERT_TRUE(value1 && !value2);
EXPECT_EQ(*value1, "value1");
}
XEventVisitor e6(&xplane, &line, &events[6]);
EXPECT_EQ(e6.Name(), "Iterator::XXX::YYY::ParallelMap");
EXPECT_EQ(e6.DisplayName(), "Iterator::ParallelMap");
}
TEST(HostTracerTest, CollectEventsFromThreadPool) {
auto thread_pool =
std::make_unique<tsl::thread::ThreadPool>(Env::Default(),
"HostTracerTest",
1);
tsl::BlockingCounter counter(1);
auto tracer = CreateHostTracer({});
TF_EXPECT_OK(tracer->Start());
thread_pool->Schedule([&counter] {
TraceMe traceme("hello");
counter.DecrementCount();
});
counter.Wait();
thread_pool.reset();
TF_EXPECT_OK(tracer->Stop());
tensorflow::profiler::XSpace space;
TF_EXPECT_OK(tracer->CollectData(&space));
EXPECT_THAT(space.planes(), testing::SizeIs(1));
XPlaneVisitor xplane = tsl::profiler::CreateTfXPlaneVisitor(&space.planes(0));
bool has_record_event = false;
bool has_start_region_event = false;
bool has_end_region_event = false;
int64_t record_region_id = 0;
int64_t start_region_id = 0;
Timespan region_timespan;
Timespan traceme_timespan;
xplane.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Name() == tsl::profiler::kThreadpoolListenerRecord) {
has_record_event = true;
const auto& stat = event.GetStat(StatType::kProducerId);
EXPECT_TRUE(stat.has_value());
record_region_id = stat->IntOrUintValue();
} else if (event.Name() ==
tsl::profiler::kThreadpoolListenerStartRegion) {
has_start_region_event = true;
const auto& stat = event.GetStat(StatType::kConsumerId);
EXPECT_TRUE(stat.has_value());
start_region_id = stat->IntOrUintValue();
region_timespan = event.GetTimespan();
} else if (event.Name() == tsl::profiler::kThreadpoolListenerStopRegion) {
has_end_region_event = true;
region_timespan = Timespan::FromEndPoints(region_timespan.begin_ps(),
event.GetTimespan().end_ps());
} else if (event.Name() == "hello") {
traceme_timespan = event.GetTimespan();
}
});
});
EXPECT_TRUE(has_record_event);
EXPECT_TRUE(has_start_region_event);
EXPECT_TRUE(has_end_region_event);
EXPECT_EQ(record_region_id, start_region_id);
EXPECT_TRUE(region_timespan.Includes(traceme_timespan));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/profiler/cpu/host_tracer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/profiler/cpu/host_tracer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7fbb648b-bae7-4997-9456-1c16ed08c53c | cpp | tensorflow/tensorflow | dependency_optimizer | tensorflow/core/grappler/optimizers/dependency_optimizer.cc | tensorflow/core/grappler/optimizers/dependency_optimizer_test.cc | #include "tensorflow/core/grappler/optimizers/dependency_optimizer.h"
#include <unordered_set>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/match.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
namespace {
bool RemoveControlInput(NodeDef* node, const string& control_input_to_remove,
NodeMap* node_map) {
for (int pos = node->input_size() - 1; pos >= 0; --pos) {
const string& input = node->input(pos);
if (input[0] != '^') break;
if (input == control_input_to_remove) {
node->mutable_input()->SwapElements(pos, node->input_size() - 1);
node->mutable_input()->RemoveLast();
node_map->RemoveOutput(NodeName(input), node->name());
return true;
}
}
return false;
}
}
bool DependencyOptimizer::SafeToRemoveIdentity(const NodeDef& node) const {
if (!IsIdentity(node) && !IsIdentityN(node)) {
return true;
}
if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) {
return false;
}
if (!fetch_nodes_known_) {
return false;
}
if (node.input_size() < 1) {
return false;
}
const NodeDef* input = node_map_->GetNode(NodeName(node.input(0)));
if (input == nullptr) {
VLOG(1) << "node = " << node.name() << " input = " << node.input(0);
return false;
}
if (IsVariable(*input) || IsRecv(*input)) {
return false;
}
for (const auto& consumer : node_map_->GetOutputs(node.name())) {
if (node.input_size() > 1 && (IsRetval(*consumer) || IsMerge(*consumer))) {
return false;
}
if (IsSwitch(*input)) {
for (const string& consumer_input : consumer->input()) {
if (consumer_input == AsControlDependency(node.name())) {
return false;
}
}
}
}
return true;
}
bool DependencyOptimizer::SafeToConvertToNoOp(const NodeDef& node) const {
if (HasRegularOutputs(node, *node_map_)) {
VLOG(3) << "Not safe to convert '" << node.name()
<< " to NoOp. Node has outputs.";
return false;
}
if (!fetch_nodes_known_) {
VLOG(3) << "Not safe to convert '" << node.name()
<< " to NoOp. Fetches unknown.";
return false;
}
if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) {
VLOG(3) << "Not safe to convert to NoOp: " << node.name()
<< " is in preserve set.";
return false;
}
if (IsMerge(node) || IsSwitch(node) || ModifiesFrameInfo(node)) {
VLOG(3) << "Not safe to convert '" << node.name()
<< " to NoOp. Node modifies frame info.";
return false;
}
static const absl::flat_hash_set<string>* gather_ops =
new absl::flat_hash_set<string>{"Gather", "GatherV2", "GatherNd",
"ResourceGather", "ResourceGatherNd"};
const bool is_variable_read =
IsReadVariableOp(node) || IsReadVariablesOp(node) ||
gather_ops->find(node.op()) != gather_ops->end();
if (!is_variable_read && !IsFreeOfSideEffect(node)) {
VLOG(3) << "Not safe to convert '" << node.name()
<< " to NoOp. Node has side effect.";
return false;
}
if (absl::StartsWith(node.op(), "Submodel")) {
return false;
}
const OpDef* op_def = nullptr;
Status status = OpRegistry::Global()->LookUpOpDef(node.op(), &op_def);
if (!status.ok() || op_def->output_arg_size() == 0) {
return false;
}
const std::unordered_set<string> do_not_rewrite_ops{
"Assert", "CheckNumerics", "_Retval",
"_Arg", "_ParallelConcatUpdate", "TPUExecute",
"TPUCompile", "ControlTrigger"};
if (do_not_rewrite_ops.find(node.op()) != do_not_rewrite_ops.end()) {
return false;
}
if (!SafeToRemoveIdentity(node)) {
return false;
}
return true;
}
int DependencyOptimizer::NumEdgesIfBypassed(
const NodeDef& node, const std::vector<NodeDef*>& output_nodes) const {
const bool is_multi_input_identity_n =
IsIdentityN(node) && !IsIdentityNSingleInput(node);
const int num_outputs = output_nodes.size();
const int num_inputs = node.input_size();
if (is_multi_input_identity_n) {
int num_edges_if_bypassed(0);
for (const string& input_node_name : node.input()) {
if (IsControlInput(input_node_name)) {
num_edges_if_bypassed += num_outputs;
} else {
++num_edges_if_bypassed;
}
}
for (auto consumer : output_nodes) {
for (int j = 0; j < consumer->input_size(); ++j) {
const TensorId consumer_input = ParseTensorName(consumer->input(j));
if (consumer_input.node() == node.name()) {
if (IsControlInput(consumer_input)) {
num_edges_if_bypassed += num_inputs;
} else {
++num_edges_if_bypassed;
}
}
}
}
return num_edges_if_bypassed;
} else {
return num_inputs * num_outputs;
}
}
bool DependencyOptimizer::BypassingNodeIsBeneficial(
const NodeDef& node, const std::vector<NodeDef*>& input_nodes,
const std::vector<NodeDef*>& output_nodes) const {
const bool is_identity = IsIdentity(node) || IsIdentityNSingleInput(node);
const bool is_multi_input_identity_n =
IsIdentityN(node) && !IsIdentityNSingleInput(node);
const int num_outputs = output_nodes.size();
const int num_inputs = node.input_size();
if (NumEdgesIfBypassed(node, output_nodes) > num_inputs + num_outputs) {
return false;
}
if ((num_inputs == 1 && num_outputs > 1 &&
input_nodes[0]->device() != node.device()) ||
(num_inputs > 1 && num_outputs == 1 &&
output_nodes[0]->device() != node.device())) {
return false;
}
const string& node_dev = node.device();
int num_cross_in = 0;
for (NodeDef* input_node : input_nodes) {
num_cross_in += static_cast<int>(input_node->device() != node_dev);
}
int num_cross_out = 0;
for (NodeDef* output_node : output_nodes) {
num_cross_out += static_cast<int>(output_node->device() != node_dev);
}
const int num_cross_before = num_cross_in + num_cross_out;
int num_cross_after = 0;
for (NodeDef* input_node : input_nodes) {
for (NodeDef* output_node : output_nodes) {
num_cross_after +=
static_cast<int>(input_node->device() != output_node->device());
}
}
if (num_cross_after > num_cross_before) {
return false;
}
if ((is_identity || is_multi_input_identity_n) && num_cross_in > 0 &&
num_cross_out > 0 && num_cross_after > 0) {
return false;
}
return true;
}
void DependencyOptimizer::OptimizeNode(int node_idx,
SetVector<int>* nodes_to_simplify,
std::set<int>* nodes_to_delete) {
NodeDef* node = optimized_graph_->mutable_node(node_idx);
const bool is_noop = IsNoOp(*node);
const bool is_identity = IsIdentity(*node) || IsIdentityNSingleInput(*node);
const bool is_multi_input_identity =
IsIdentityN(*node) && !IsIdentityNSingleInput(*node);
const string node_name = node->name();
if (IsConstant(*node) && node->input_size() == 0) {
const auto output_nodes = node_map_->GetOutputs(node_name);
for (NodeDef* fanout : output_nodes) {
bool optimize_fanout = false;
bool data_connection = false;
for (int i = fanout->input_size() - 1; i >= 0; --i) {
const TensorId input_tensor = ParseTensorName(fanout->input(i));
if (input_tensor.node() == node_name) {
if (input_tensor.index() < 0) {
fanout->mutable_input()->SwapElements(i, fanout->input_size() - 1);
fanout->mutable_input()->RemoveLast();
optimize_fanout = true;
} else {
data_connection = true;
}
}
}
if (optimize_fanout) {
nodes_to_simplify->PushBack(node_to_idx_[fanout]);
if (!data_connection) {
node_map_->RemoveOutput(node_name, fanout->name());
}
}
}
if (node_map_->GetOutputs(node_name).empty() && fetch_nodes_known_ &&
nodes_to_preserve_.find(node_name) == nodes_to_preserve_.end()) {
nodes_to_delete->insert(node_to_idx_[node]);
}
return;
}
if (!is_noop && SafeToConvertToNoOp(*node)) {
VLOG(2) << "***** Replacing " << node_name << " (" << node->op()
<< ") with NoOp.";
std::unordered_set<string> ctrl_inputs;
int pos = 0;
while (pos < node->input_size()) {
const string old_input = node->input(pos);
if (IsControlInput(old_input)) {
if (!ctrl_inputs.insert(old_input).second) {
node->mutable_input()->SwapElements(pos, node->input_size() - 1);
node->mutable_input()->RemoveLast();
} else {
++pos;
}
continue;
}
const string ctrl_input = ConstantFolding::AddControlDependency(
old_input, optimized_graph_, node_map_.get());
ctrl_inputs.insert(ctrl_input);
node->set_input(pos, ctrl_input);
node_map_->UpdateInput(node_name, old_input, ctrl_input);
const NodeDef* old_input_node = node_map_->GetNode(old_input);
nodes_to_simplify->PushBack(node_to_idx_[old_input_node]);
++pos;
}
ChangeToNoOp(node);
EraseRegularNodeAttributes(node);
DedupControlInputs(node);
nodes_to_simplify->PushBack(node_to_idx_[node]);
return;
}
if (is_noop || ((is_identity || is_multi_input_identity) &&
SafeToRemoveIdentity(*node))) {
const int num_inputs = node->input_size();
std::vector<NodeDef*> input_nodes;
for (int i = 0; i < num_inputs; ++i) {
NodeDef* input_node = node_map_->GetNode(node->input(i));
if (input_node == nullptr) {
LOG(ERROR) << "Invalid input " << node->input(i);
return;
}
input_nodes.push_back(input_node);
}
const auto& output_node_set = node_map_->GetOutputs(node_name);
const std::vector<NodeDef*> output_nodes(output_node_set.begin(),
output_node_set.end());
if (!BypassingNodeIsBeneficial(*node, input_nodes, output_nodes)) {
return;
}
VLOG(2) << "***** Rerouting input around\n" << node->DebugString();
for (auto consumer : output_nodes) {
bool updated_consumer = false;
VLOG(2) << "consumer before:\n" << consumer->DebugString();
for (int i = 0; i < num_inputs; ++i) {
const NodeDef* input = input_nodes[i];
if ((is_identity && i == 0) ||
(is_multi_input_identity && !IsControlInput(node->input(i)))) {
string new_input;
const string& input_to_forward = node->input(i);
CHECK(!IsControlInput(input_to_forward));
for (int j = 0; j < consumer->input_size(); ++j) {
const TensorId old_input = ParseTensorName(consumer->input(j));
if (old_input.node() == node_name) {
if (old_input.index() == i) {
new_input = input_to_forward;
node_map_->UpdateInput(consumer->name(),
string(old_input.node()), new_input);
consumer->set_input(j, new_input);
} else if (old_input.index() == -1) {
new_input = AsControlDependency(NodeName(input_to_forward));
node_map_->UpdateInput(consumer->name(),
string(old_input.node()), new_input);
consumer->set_input(j, new_input);
}
}
}
updated_consumer = true;
} else {
if (node_map_->GetOutputs(input->name()).count(consumer) == 0) {
consumer->add_input(AsControlDependency(input->name()));
node_map_->AddOutput(input->name(), consumer->name());
nodes_to_simplify->PushBack(node_to_idx_[input]);
updated_consumer = true;
}
}
}
updated_consumer |= RemoveControlInput(
consumer, AsControlDependency(node_name), node_map_.get());
if (updated_consumer) {
nodes_to_simplify->PushBack(node_to_idx_[consumer]);
}
VLOG(2) << "consumer after:\n" << consumer->DebugString();
}
node_map_->RemoveOutputs(node_name);
if (fetch_nodes_known_ &&
nodes_to_preserve_.find(node_name) == nodes_to_preserve_.end()) {
nodes_to_delete->insert(node_idx);
node_map_->RemoveInputs(node_name);
node->clear_input();
}
}
}
void DependencyOptimizer::CleanControlInputs() {
for (int i = 0; i < optimized_graph_->node_size(); ++i) {
DedupControlInputs(optimized_graph_->mutable_node(i));
}
}
Status DependencyOptimizer::OptimizeDependencies() {
SetVector<int> nodes_to_simplify;
std::set<int> nodes_to_delete;
for (int i = 0; i < optimized_graph_->node_size(); ++i) {
const NodeDef& node = optimized_graph_->node(i);
if (IsNoOp(node) || IsIdentity(node) || IsIdentityN(node) ||
IsConstant(node) || SafeToConvertToNoOp(node)) {
nodes_to_simplify.PushBack(i);
}
}
while (!nodes_to_simplify.Empty()) {
int node_to_simplify = nodes_to_simplify.PopBack();
while (nodes_to_delete.find(node_to_simplify) != nodes_to_delete.end()) {
node_to_simplify = nodes_to_simplify.PopBack();
}
OptimizeNode(node_to_simplify, &nodes_to_simplify, &nodes_to_delete);
}
if (fetch_nodes_known_) {
VLOG(1) << "Deleted " << nodes_to_delete.size() << " out of "
<< optimized_graph_->node_size() << " nodes.";
EraseNodesFromGraph(nodes_to_delete, optimized_graph_);
node_map_.reset(new NodeMap(optimized_graph_));
BuildNodeToIdx();
}
return absl::OkStatus();
}
namespace {
enum DistanceFromSource : uint8 { ZERO = 0, ONE = 1, TWO_OR_GREATER = 2 };
void LongestPathsLowerBounds(
int source, const std::pair<int, int>& target_range,
const std::vector<std::vector<int>>& outputs,
std::vector<DistanceFromSource>* longest_distance) {
std::deque<int> queue;
queue.emplace_front(source);
while (!queue.empty()) {
int node = queue.front();
queue.pop_front();
for (int fanout : outputs[node]) {
if (fanout >= target_range.first && fanout <= target_range.second &&
(*longest_distance)[fanout] != TWO_OR_GREATER) {
(*longest_distance)[fanout] =
(*longest_distance)[fanout] == ZERO ? ONE : TWO_OR_GREATER;
queue.emplace_front(fanout);
}
}
}
}
}
Status DependencyOptimizer::TransitiveReduction() {
const int num_nodes = optimized_graph_->node_size();
int num_controls = 0;
std::vector<std::vector<int>> outputs(num_nodes);
std::vector<absl::InlinedVector<std::pair<int, int>, 2UL>> control_outputs(
num_nodes);
std::vector<std::pair<int, int>> target_range(num_nodes, {num_nodes, -1});
for (int node_idx = 0; node_idx < num_nodes; ++node_idx) {
const NodeDef& node = optimized_graph_->node(node_idx);
if (ModifiesFrameInfo(node) || !HasOpDef(node)) {
continue;
}
for (int input_slot = 0; input_slot < node.input_size(); ++input_slot) {
const string& input = node.input(input_slot);
const NodeDef* input_node = node_map_->GetNode(input);
if (ModifiesFrameInfo(*input_node) || IsMerge(*input_node)) {
continue;
}
const int input_node_idx = node_to_idx_[input_node];
outputs[input_node_idx].push_back(node_idx);
target_range[input_node_idx].first =
std::min(target_range[input_node_idx].first, node_idx);
if (IsControlInput(input)) {
++num_controls;
control_outputs[input_node_idx].emplace_back(node_idx, input_slot);
target_range[input_node_idx].second =
std::max(target_range[input_node_idx].second, node_idx);
}
}
}
int num_controls_removed = 0;
std::vector<DistanceFromSource> longest_distance(num_nodes);
typedef std::pair<int, int> InputSlotAndSource;
absl::flat_hash_map<
int, std::set<InputSlotAndSource, std::greater<InputSlotAndSource>>>
control_edges_to_remove;
for (int source = 0; source < num_nodes; ++source) {
if (target_range[source].first >= target_range[source].second ||
target_range[source].second <= source) {
continue;
}
std::fill(longest_distance.begin() + target_range[source].first,
longest_distance.begin() + target_range[source].second + 1, ZERO);
LongestPathsLowerBounds(source, target_range[source], outputs,
&longest_distance);
for (const auto& control_output : control_outputs[source]) {
const int target = control_output.first;
if (longest_distance[target] == TWO_OR_GREATER) {
const int input_slot = control_output.second;
control_edges_to_remove[target].emplace(input_slot, source);
}
}
}
for (const auto& it : control_edges_to_remove) {
const int target = it.first;
NodeDef* target_node = optimized_graph_->mutable_node(target);
for (const InputSlotAndSource& slot_and_source : it.second) {
const int input_slot = slot_and_source.first;
const int source = slot_and_source.second;
const NodeDef& source_node = optimized_graph_->node(source);
CHECK_LT(input_slot, target_node->input_size());
target_node->mutable_input()->SwapElements(input_slot,
target_node->input_size() - 1);
node_map_->RemoveOutput(source_node.name(), target_node->name());
target_node->mutable_input()->RemoveLast();
++num_controls_removed;
}
}
VLOG(1) << "Removed " << num_controls_removed << " out of " << num_controls
<< " control dependencies";
return absl::OkStatus();
}
void DependencyOptimizer::BuildNodeToIdx() {
node_to_idx_.clear();
for (int i = 0; i < optimized_graph_->node_size(); ++i) {
const NodeDef& node = optimized_graph_->node(i);
node_to_idx_[&node] = i;
}
}
void DependencyOptimizer::GroupCrossDeviceControlEdges(bool host_granularity) {
VLOG(1)
<< "DependencyOptimizer::GroupCrossDeviceControlEdges host_granularity="
<< host_granularity;
const int num_nodes = optimized_graph_->node_size();
for (int i = 0; i < num_nodes; ++i) {
NodeDef* node = optimized_graph_->mutable_node(i);
if (node->device().empty()) continue;
string rest, node_device = node->device();
if (host_granularity) {
DeviceNameUtils::SplitDeviceName(node->device(), &node_device, &rest);
}
std::map<string, NodeDef*> noops;
int num_noops = 0;
for (int j = 0; j < node->input_size(); ++j) {
if (IsControlInput(node->input(j))) {
const NodeDef* input = node_map_->GetNode(node->input(j));
if (input == nullptr || input->device().empty()) continue;
string input_device = input->device();
if (host_granularity) {
DeviceNameUtils::SplitDeviceName(input->device(), &input_device,
&rest);
}
if (input_device != node_device) {
VLOG(2) << "Cross-device " << node->name() << " " << input->device()
<< " -> " << node->device();
auto emplace_result = noops.emplace(input_device, nullptr);
if (!emplace_result.second &&
emplace_result.first->second == nullptr) {
VLOG(2) << "Duplicate input device from " << node->name();
string group_name;
NodeDef* noop;
do {
group_name = AddPrefixToNodeName(
node->name(),
strings::StrCat("GroupCrossDeviceControlEdges_", num_noops));
noop = node_map_->GetNode(group_name);
++num_noops;
} while (noop != nullptr);
noop = optimized_graph_->add_node();
noop->set_name(group_name);
noop->set_device(input->device());
noop->set_op("NoOp");
node_map_->AddNode(noop->name(), noop);
emplace_result.first->second = noop;
VLOG(1) << "GroupCrossDeviceControlEdges: Added "
<< SummarizeNodeDef(*noop);
}
}
}
}
int pos = 0;
while (pos < node->input_size()) {
const string& input_name = node->input(pos);
if (IsControlInput(input_name)) {
NodeDef* input = node_map_->GetNode(input_name);
if (input == nullptr) {
++pos;
} else {
string input_device = input->device();
if (host_granularity) {
DeviceNameUtils::SplitDeviceName(input->device(), &input_device,
&rest);
}
auto it = noops.find(input_device);
if (it == noops.end() || it->second == nullptr) {
++pos;
} else {
VLOG(2) << "Rewriting input from " << input_name;
node->mutable_input()->SwapElements(pos, node->input_size() - 1);
node->mutable_input()->RemoveLast();
it->second->add_input(AsControlDependency(*input));
node_map_->UpdateOutput(input_name, node->name(),
it->second->name());
}
}
} else {
++pos;
}
}
for (const auto& entry : noops) {
if (entry.second) {
node->add_input(AsControlDependency(*entry.second));
node_map_->AddOutput(entry.second->name(), node->name());
}
}
}
}
Status DependencyOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
optimized_graph_ = optimized_graph;
*optimized_graph_ = item.graph;
nodes_to_preserve_ = item.NodesToPreserve();
fetch_nodes_known_ = !item.fetch.empty();
CleanControlInputs();
const int num_iterations = 2;
for (int iteration = 0; iteration < num_iterations; ++iteration) {
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
Status topo_sort_status;
topo_sort_status = TopologicalSort(optimized_graph_);
node_map_.reset(new NodeMap(optimized_graph_));
BuildNodeToIdx();
if (topo_sort_status.ok()) {
TF_RETURN_IF_ERROR(TransitiveReduction());
} else {
LOG(ERROR) << "Iteration = " << iteration
<< ", topological sort failed with message: "
<< topo_sort_status.message();
}
TF_RETURN_IF_ERROR(OptimizeDependencies());
CleanControlInputs();
GroupCrossDeviceControlEdges(false);
GroupCrossDeviceControlEdges(true);
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/dependency_optimizer.h"
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/optimizers/model_pruner.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class DependencyOptimizerTest : public GrapplerTest {};
void VerifyGraphsEqual(const GraphDef& original_graph,
const GraphDef& optimized_graph, const string& func) {
EXPECT_EQ(original_graph.node_size(), optimized_graph.node_size()) << func;
for (int i = 0; i < original_graph.node_size(); ++i) {
const NodeDef& original = original_graph.node(i);
const NodeDef& optimized = optimized_graph.node(i);
EXPECT_EQ(original.name(), optimized.name()) << func;
EXPECT_EQ(original.op(), optimized.op()) << func;
EXPECT_EQ(original.input_size(), optimized.input_size()) << func;
for (int j = 0; j < original.input_size(); ++j) {
EXPECT_EQ(original.input(j), optimized.input(j)) << func;
}
}
}
TEST_F(DependencyOptimizerTest, NoOp) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(DependencyOptimizerTest, DependenciesDrivenByConstants) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output y = ops::Const(s.WithOpName("y"), {1.0f, 2.0f}, {1, 2});
Output z = ops::Const(s.WithOpName("z"), {1.0f, 2.0f}, {1, 2});
Output add = ops::Add(s.WithOpName("add"), x, y);
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies(x), add);
Output id2 = ops::Identity(
s.WithOpName("id2").WithControlDependencies(y).WithControlDependencies(z),
add);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("id1");
item.fetch.push_back("id2");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(5, output.node_size());
for (const NodeDef& node : item.graph.node()) {
if (node.name() == "id1" || node.name() == "id2") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("add", node.input(0));
}
}
}
TEST_F(DependencyOptimizerTest, ChangeToNoop) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y"), {1, 2}, DT_FLOAT);
Output add = ops::Add(s.WithOpName("add"), x, y);
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies(add), x);
Output id2 =
ops::Identity(s.WithOpName("id2").WithControlDependencies(add), y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("id1");
item.fetch.push_back("id2");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), output.node_size());
int found = 0;
for (int i = 0; i < item.graph.node_size(); ++i) {
const NodeDef& node = item.graph.node(i);
EXPECT_NE("add", node.name());
if (node.name() == "id1") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^y", node.input(1));
++found;
} else if (node.name() == "id2") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("^x", node.input(1));
++found;
}
}
EXPECT_EQ(2, found);
}
TEST_F(DependencyOptimizerTest, FullTypeForKeptNoop) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y"), {1, 2}, DT_FLOAT);
Output add = ops::Add(s.WithOpName("add"), x, y);
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies(add), x);
Output id2 =
ops::Identity(s.WithOpName("id2").WithControlDependencies(add), y);
Output id3 =
ops::Identity(s.WithOpName("id3").WithControlDependencies(add), y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("id1");
item.fetch.push_back("id2");
item.fetch.push_back("id3");
for (int i = 0; i < item.graph.node_size(); ++i) {
NodeDef* node = item.graph.mutable_node(i);
if (node->name() == "add") {
FullTypeDef t;
t.set_type_id(TFT_PRODUCT);
t.add_args()->set_type_id(TFT_TENSOR);
t.mutable_args(0)->add_args()->set_type_id(TFT_FLOAT);
*node->mutable_experimental_type() = t;
break;
}
}
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), output.node_size());
int found = 0;
for (int i = 0; i < item.graph.node_size(); ++i) {
const NodeDef& node = item.graph.node(i);
if (node.name() == "id1") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^add", node.input(1));
++found;
} else if (node.name() == "id2") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("^add", node.input(1));
++found;
} else if (node.name() == "id3") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("^add", node.input(1));
++found;
} else if (node.name() == "add") {
EXPECT_EQ(node.op(), "NoOp");
FullTypeDef t = node.experimental_type();
EXPECT_TRUE((t.type_id() == TFT_UNSET) ||
((t.type_id() == TFT_PRODUCT) && (t.args_size() == 0)));
++found;
}
}
EXPECT_EQ(4, found);
}
TEST_F(DependencyOptimizerTest, ChangeToNoop_RepeatedInput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output add = ops::Add(s.WithOpName("add"), x, x);
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies(add), x);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"id1"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), output.node_size());
int found = 0;
for (int i = 0; i < item.graph.node_size(); ++i) {
const NodeDef& node = item.graph.node(i);
EXPECT_NE("add", node.name());
if (node.name() == "id1") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("x", node.input(0));
++found;
}
}
EXPECT_EQ(1, found);
}
TEST_F(DependencyOptimizerTest, ChangeToNoop_SwitchIdentity) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
ops::Variable v_in(scope.WithOpName("v_in"), {3}, DT_FLOAT);
ops::Variable v_ctrl(scope.WithOpName("v_ctrl"), {}, DT_BOOL);
ops::Switch s(scope.WithOpName("switch"), v_in, v_ctrl);
Output neg = ops::Neg(scope.WithOpName("neg"), s.output_true);
Output c1 = ops::Const(scope.WithOpName("c1").WithControlDependencies(neg),
{1.0f, 2.0f}, {1, 2});
Output ctrl_dep_id = ops::Identity(
scope.WithOpName("ConstantFoldingCtrl/switch_1"), s.output_true);
Output c2 =
ops::Const(scope.WithOpName("c2").WithControlDependencies(ctrl_dep_id),
{1.0f, 2.0f}, {1, 2});
Output neg1 = ops::Neg(scope.WithOpName("neg1"), s.output_false);
Output neg2 = ops::Neg(scope.WithOpName("neg2"), ctrl_dep_id);
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch.push_back("c1");
item.fetch.push_back("c2");
item.fetch.push_back("neg1");
item.fetch.push_back("neg2");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size() - 1, output.node_size());
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
EXPECT_NE("neg", node.name());
if (node.name() == "c1") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("^ConstantFoldingCtrl/switch_1", node.input(0));
}
}
}
TEST_F(DependencyOptimizerTest, ChangeToNoop_NoFetch) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y"), {1, 2}, DT_FLOAT);
Output add = ops::Add(s.WithOpName("add"), x, y);
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies(add), x);
Output id2 =
ops::Identity(s.WithOpName("id2").WithControlDependencies(add), y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
TF_CHECK_OK(TopologicalSort(&item.graph));
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(DependencyOptimizerTest, RemoveNoOps_EmptyInputOrOutput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s, {1, 2}, DT_FLOAT);
auto noop1 = ops::NoOp(s);
auto noop2 = ops::NoOp(s.WithControlDependencies(x));
Output id = ops::Identity(s.WithControlDependencies({noop1.operation}), x);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("Identity");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), output.node_size());
for (const NodeDef& node : output.node()) {
if (node.name() == "NoOp" || node.name() == "NoOp_1") {
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "Identity") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("RandomUniform", node.input(0));
}
}
}
TEST_F(DependencyOptimizerTest, RemoveNoOps_DeviceBoundaries) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x").WithDevice("/CPU:0"), {1, 2},
DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y").WithDevice("/CPU:0"), {1, 2},
DT_FLOAT);
auto noop = ops::NoOp(s.WithControlDependencies(x).WithDevice("/CPU:1"));
auto noop_1 = ops::NoOp(
s.WithControlDependencies(x).WithControlDependencies(y).WithDevice(
"/CPU:0"));
Output id = ops::Identity(
s.WithControlDependencies({noop.operation}).WithDevice("/CPU:1"), x);
Output id_1 = ops::Identity(
s.WithControlDependencies({noop.operation, noop_1.operation})
.WithDevice("/CPU:1"),
y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("Identity");
item.fetch.push_back("Identity_1");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
TF_CHECK_OK(TopologicalSort(&item.graph));
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(DependencyOptimizerTest, RemoveIdentityOps_DeviceBoundaries) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x").WithDevice("/CPU:0"), {1, 2},
DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y").WithDevice("/CPU:0"), {1, 2},
DT_FLOAT);
auto id_a = ops::Identity(s.WithOpName("id_a").WithDevice("/CPU:1"), x);
auto id_b = ops::Identity(
s.WithOpName("id_b").WithControlDependencies(y).WithDevice("/CPU:0"), x);
Output id =
ops::Identity(s.WithControlDependencies(id_a).WithDevice("/CPU:1"), id_b);
Output id_1 = ops::Identity(s.WithDevice("/CPU:1"), id_a);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("Identity");
item.fetch.push_back("Identity_1");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
TF_CHECK_OK(TopologicalSort(&item.graph));
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(DependencyOptimizerTest, RemoveIdentityOps_IdenticalDevices) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x").WithDevice("/CPU:0"), {1, 2},
DT_FLOAT);
auto id_a = ops::Identity(s.WithOpName("id_a").WithDevice("/CPU:1"), x);
Output id =
ops::Identity(s.WithControlDependencies(id_a).WithDevice("/CPU:0"), id_a);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("Identity");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size() - 1, output.node_size());
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.name(), "id_a");
if (node.name() == "Identity") {
EXPECT_EQ(node.input(0), "x");
}
}
}
TEST_F(DependencyOptimizerTest, RemoveNoOps_SingleInputOrOutput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y"), {1, 2}, DT_FLOAT);
auto noop = ops::NoOp(s.WithControlDependencies(x));
auto noop_1 =
ops::NoOp(s.WithControlDependencies(x).WithControlDependencies(y));
Output id = ops::Identity(s.WithControlDependencies({noop.operation}), x);
Output id_1 = ops::Identity(
s.WithControlDependencies({noop.operation, noop_1.operation}), y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("Identity");
item.fetch.push_back("Identity_1");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), output.node_size());
for (const NodeDef& node : output.node()) {
if (node.name() == "NoOp" || node.name() == "NoOp_1") {
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "Identity") {
EXPECT_EQ("x", node.input(0));
} else if (node.name() == "Identity_1") {
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("^x", node.input(1));
}
}
}
TEST_F(DependencyOptimizerTest, RemoveIdentity) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y"), {1, 2}, DT_FLOAT);
Output z = ops::RandomUniform(s.WithOpName("z"), {1, 2}, DT_FLOAT);
auto id_a = ops::Identity(s.WithOpName("id_a"), x);
auto id_b = ops::Identity(
s.WithOpName("id_b").WithControlDependencies(y).WithControlDependencies(
z),
x);
auto id_c = ops::Identity(s.WithOpName("id_c").WithControlDependencies(y), x);
Output a_a = ops::Identity(s.WithOpName("a_a"), id_a);
Output a_b = ops::Identity(s.WithOpName("a_b"), id_a);
Output a_c =
ops::Identity(s.WithOpName("a_c").WithControlDependencies(id_a), z);
Output a_d =
ops::Identity(s.WithOpName("a_d").WithControlDependencies(id_a), z);
Output b_a = ops::Identity(s.WithOpName("b_a"), id_b);
Output c_a = ops::Identity(s.WithOpName("c_a"), id_c);
Output c_b =
ops::Identity(s.WithOpName("c_b").WithControlDependencies(id_c), z);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"a_a", "a_b", "a_c", "a_d", "b_a", "c_a", "c_b"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size() - 3, output.node_size());
int found = 0;
for (const NodeDef& node : output.node()) {
EXPECT_NE("id_a", node.name());
EXPECT_NE("id_b", node.name());
EXPECT_NE("id_c", node.name());
if (node.name() == "a_a" || node.name() == "a_b") {
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("x", node.input(0));
++found;
}
if (node.name() == "a_c" || node.name() == "a_d") {
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("z", node.input(0));
EXPECT_EQ("^x", node.input(1));
++found;
}
if (node.name() == "b_a") {
ASSERT_EQ(3, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^y", node.input(1));
EXPECT_EQ("^z", node.input(2));
++found;
}
if (node.name() == "c_a") {
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^y", node.input(1));
++found;
}
if (node.name() == "c_b") {
ASSERT_EQ(3, node.input_size());
EXPECT_EQ("z", node.input(0));
EXPECT_EQ("^x", node.input(1));
EXPECT_EQ("^y", node.input(2));
++found;
}
}
EXPECT_EQ(found, 7);
}
TEST_F(DependencyOptimizerTest, RemoveIdentity_RepeatedInputs) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
ops::Variable x(scope.WithOpName("x"), {}, DT_BOOL);
ops::Variable y(scope.WithOpName("y"), {}, DT_BOOL);
ops::Switch sw(scope.WithOpName("switch"), x, x);
Output id0 = ops::Identity(scope.WithOpName("id0"), sw.output_true);
Output id1 = ops::Identity(scope.WithOpName("id1"), sw.output_false);
Output or0 = ops::LogicalOr(scope.WithOpName("or0"), id0, id0);
Output or1 = ops::LogicalOr(scope.WithOpName("or1"), id0, y);
Output or2 = ops::LogicalOr(
scope.WithOpName("or2").WithControlDependencies(id1), y, y);
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch.push_back("or0");
item.fetch.push_back("or1");
item.fetch.push_back("or2");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size() - 1, output.node_size());
int found = 0;
for (const NodeDef& node : output.node()) {
EXPECT_NE("id0", node.name());
if (node.name() == "or0") {
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("switch:1", node.input(0));
EXPECT_EQ("switch:1", node.input(1));
++found;
}
if (node.name() == "or1") {
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("switch:1", node.input(0));
EXPECT_EQ("y", node.input(1));
++found;
}
if (node.name() == "or2") {
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^id1", node.input(2));
++found;
}
}
EXPECT_EQ(found, 3);
}
TEST_F(DependencyOptimizerTest, Transitive_Reduction_Simple) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
Output x = ops::Square(s.WithOpName("x"), c);
Output neg1 = ops::Neg(s.WithOpName("neg1"), x);
Output neg2 =
ops::Neg(s.WithOpName("neg2").WithControlDependencies({x}), neg1);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("neg2");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(4, output.node_size());
EXPECT_EQ("neg2", output.node(3).name());
EXPECT_EQ(1, output.node(3).input_size());
EXPECT_EQ("neg1", output.node(3).input(0));
}
TEST_F(DependencyOptimizerTest, ChangeToNoop_Identity) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
ops::Variable v_in(scope.WithOpName("v_in"), {3}, DT_FLOAT);
Output id_after_var = ops::Identity(scope.WithOpName("id_after_var"), v_in);
ops::Variable v_ctrl(scope.WithOpName("v_ctrl"), {}, DT_BOOL);
ops::Switch s(
scope.WithOpName("switch").WithControlDependencies(id_after_var), v_in,
v_ctrl);
Output id0 = ops::Identity(scope.WithOpName("id0"), s.output_true);
Output grappler_added_id = ops::Identity(
scope.WithOpName("ConstantFoldingCtrl/switch_1"), s.output_true);
Output c1 = ops::Const(scope.WithOpName("c1")
.WithControlDependencies(id_after_var)
.WithControlDependencies(grappler_added_id),
{1.0f, 2.0f}, {1, 2});
Output id1 = ops::Identity(scope.WithOpName("id1"), c1);
Output id2 = ops::Identity(scope.WithOpName("id2"), id0);
Output fetch =
ops::Identity(scope.WithOpName("fetch").WithControlDependencies(id1), c1);
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch.push_back("c1");
item.fetch.push_back("id2");
item.fetch.push_back("fetch");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size() - 2, output.node_size());
bool found = false;
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
EXPECT_NE("id0", node.name());
EXPECT_NE("id1", node.name());
if (node.name() == "c1") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("^ConstantFoldingCtrl/switch_1", node.input(0));
found = true;
}
}
EXPECT_TRUE(found);
}
TEST_F(DependencyOptimizerTest, IdentityInputs) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output b = ops::Placeholder(scope.WithOpName("b"), DT_BOOL);
Output x = ops::RandomUniform(scope.WithOpName("x"), {1, 2}, DT_FLOAT);
auto s = ops::Switch(scope.WithOpName("s"), x, b);
auto id_f = ops::Identity(scope.WithOpName("id_f"), s.output_false);
auto id_t = ops::Identity(scope.WithOpName("id_t"), s.output_true);
Output out1 = ops::Identity(scope.WithOpName("out1"), id_f);
Output out2 = ops::Identity(scope.WithOpName("out2"), id_t);
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch = {"out1", "out2"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(6, output.node_size());
EXPECT_EQ("out1", output.node(4).name());
EXPECT_EQ(1, output.node(4).input_size());
EXPECT_EQ("s", output.node(4).input(0));
EXPECT_EQ("out2", output.node(5).name());
EXPECT_EQ(1, output.node(5).input_size());
EXPECT_EQ("s:1", output.node(5).input(0));
}
TEST_F(DependencyOptimizerTest, RemoveIdentityN_SwitchInput) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output b = ops::Placeholder(scope.WithOpName("b"), DT_BOOL);
Output x = ops::RandomUniform(scope.WithOpName("x"), {1, 2}, DT_FLOAT);
auto s = ops::Switch(scope.WithOpName("s"), x, b);
auto id_f = ops::IdentityN(scope.WithOpName("id_f"), {s.output_false});
auto id_t = ops::IdentityN(scope.WithOpName("id_t"), {s.output_true});
auto id_b =
ops::IdentityN(scope.WithOpName("id_b"), {s.output_false, s.output_true});
Output out1 = ops::Identity(scope.WithOpName("out1"), id_f[0]);
Output out2 = ops::Identity(scope.WithOpName("out2"), id_t[0]);
Output out3 = ops::Identity(scope.WithOpName("out3"), id_b[0]);
Output out4 = ops::Identity(scope.WithOpName("out4"), id_b[1]);
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch = {"out1", "out2", "out3", "out4"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(8, output.node_size());
auto out1_node = output.node(7);
EXPECT_EQ("out1", out1_node.name());
EXPECT_EQ(1, out1_node.input_size());
EXPECT_EQ("s", out1_node.input(0));
auto out2_node = output.node(4);
EXPECT_EQ("out2", out2_node.name());
EXPECT_EQ(1, out2_node.input_size());
EXPECT_EQ("s:1", out2_node.input(0));
auto out3_node = output.node(5);
EXPECT_EQ("out3", out3_node.name());
EXPECT_EQ(1, out3_node.input_size());
EXPECT_EQ("s", out3_node.input(0));
auto out4_node = output.node(6);
EXPECT_EQ("out4", out4_node.name());
EXPECT_EQ(1, out4_node.input_size());
EXPECT_EQ("s:1", out4_node.input(0));
}
TEST_F(DependencyOptimizerTest, DoNotRemoveIdentityNWithControlDependency) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output input1 = ops::Placeholder(scope.WithOpName("input1"), DT_BOOL);
Output input2 = ops::Const(scope.WithOpName("input2"), {1, 2});
auto id_n = ops::IdentityN(scope.WithOpName("id_n"), {input1, input2});
Output out1 = ops::Identity(scope.WithOpName("out1"), id_n[0]);
Output out2 = ops::Identity(scope.WithOpName("out2"), id_n[1]);
auto out3 =
ops::NoOp(scope.WithOpName("out3").WithControlDependencies(id_n[1]));
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch = {"out1", "out2", "out3"};
DependencyOptimizer optimizer;
GraphDef optimized_graph_def;
Status status = optimizer.Optimize(nullptr, item, &optimized_graph_def);
TF_EXPECT_OK(status);
EXPECT_EQ(6, optimized_graph_def.node_size());
}
TEST_F(DependencyOptimizerTest,
Identity_DeviceCrossing_ConsumerOnDifferentDevice) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x_on_1 =
ops::Const(s.WithOpName("x_on_1").WithDevice("/gpu:1"), {1.0f}, {});
Output one_on_3 =
ops::Const(s.WithOpName("one_on_3").WithDevice("/gpu:3"), {1.0f}, {});
Output x_on_2 =
ops::Identity(s.WithOpName("x_on_2").WithDevice("/gpu:2"), x_on_1);
Output result =
ops::Add(s.WithOpName("result").WithDevice("/gpu:3"), x_on_2, one_on_3);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"result"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(DependencyOptimizerTest, Identity_DeviceCrossing_ConsumerOnSameDevice) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x_on_1 =
ops::Const(s.WithOpName("x_on_1").WithDevice("/gpu:1"), {1.0f}, {});
Output one_on_2 =
ops::Const(s.WithOpName("one_on_2").WithDevice("/gpu:2"), {1.0f}, {});
Output x_on_2 =
ops::Identity(s.WithOpName("x_on_2").WithDevice("/gpu:2"), x_on_1);
Output result =
ops::Add(s.WithOpName("result").WithDevice("/gpu:2"), x_on_2, one_on_2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"result"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(3, output.node_size());
for (const auto& node : output.node()) {
EXPECT_NE("x_on_2", node.name());
if (node.name() == "result") {
EXPECT_EQ("x_on_1", node.input(0));
}
}
}
TEST_F(DependencyOptimizerTest, RemoveGreaterEqualWithNoOp) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT,
ops::Placeholder::Shape({}));
Output y = ops::Placeholder(s.WithOpName("y"), DT_FLOAT,
ops::Placeholder::Shape({}));
auto greaterequal = ops::GreaterEqual(s.WithOpName("GreaterEqual"), x, y);
auto noop =
ops::NoOp(s.WithOpName("NoOp").WithControlDependencies(greaterequal));
Output add = ops::Add(
s.WithOpName("z").WithControlDependencies({noop.operation}), x, y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DependencyOptimizer optimizer;
GraphDef output;
item.fetch.push_back("z");
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
count++;
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "y") {
count++;
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "GreaterEqual") {
count++;
} else if (node.name() == "NoOp") {
count++;
} else if (node.name() == "z") {
count++;
EXPECT_EQ("Add", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("y", node.input(1));
}
}
EXPECT_EQ(3, count);
}
TEST_F(DependencyOptimizerTest, GroupCrossDeviceControlDeps) {
GrapplerItem item;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::RandomUniform(s.WithOpName("a").WithDevice("/CPU:1"),
{1, 2}, DT_FLOAT);
Output b = ops::RandomUniform(s.WithOpName("b").WithDevice("/CPU:2"),
{1, 2}, DT_FLOAT);
Output c = ops::RandomUniform(s.WithOpName("c").WithDevice("/CPU:1"),
{1, 2}, DT_FLOAT);
Output d = ops::RandomUniform(s.WithOpName("d").WithDevice("/CPU:3"),
{1, 2}, DT_FLOAT);
Output e = ops::RandomUniform(s.WithOpName("e").WithDevice("/CPU:0"),
{1, 2}, DT_FLOAT);
auto fetch = ops::Identity(
s.WithOpName("f")
.WithControlDependencies({a.op(), b.op(), c.op(), d.op()})
.WithDevice("/GPU:0"),
{e});
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("f");
}
GraphDef expected;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::RandomUniform(s.WithOpName("a").WithDevice("/CPU:1"),
{1, 2}, DT_FLOAT);
Output b = ops::RandomUniform(s.WithOpName("b").WithDevice("/CPU:2"),
{1, 2}, DT_FLOAT);
Output c = ops::RandomUniform(s.WithOpName("c").WithDevice("/CPU:1"),
{1, 2}, DT_FLOAT);
Output d = ops::RandomUniform(s.WithOpName("d").WithDevice("/CPU:3"),
{1, 2}, DT_FLOAT);
Output e = ops::RandomUniform(s.WithOpName("e").WithDevice("/CPU:0"),
{1, 2}, DT_FLOAT);
auto noop = ops::NoOp(s.WithOpName("GroupCrossDeviceControlEdges_0/f")
.WithDevice("/CPU:1")
.WithControlDependencies({a.op(), c.op()}));
auto fetch =
ops::Identity(s.WithOpName("f")
.WithControlDependencies({b.op(), d.op(), noop})
.WithDevice("/GPU:0"),
{e});
TF_CHECK_OK(s.ToGraphDef(&expected));
}
DependencyOptimizer optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
CompareGraphs(expected, output);
item.graph.Swap(&output);
output.Clear();
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
CompareGraphs(expected, output);
}
TEST_F(DependencyOptimizerTest, GroupCrossHostControlDeps) {
GrapplerItem item;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
std::vector<Operation> ops;
Output a = ops::RandomUniform(s.WithOpName("a").WithDevice("/CPU:0"),
{1, 2}, DT_FLOAT);
for (int t = 0; t < 4; ++t) {
for (int c = 0; c < 8; ++c) {
string opname = absl::StrCat("t", t, "/c", c);
string device = absl::StrCat("/task:", t, "/device:TPU:", c);
Output output = ops::RandomUniform(
s.WithOpName(opname).WithDevice(device), {1, 2}, DT_FLOAT);
ops.push_back(output.op());
}
}
auto fetch = ops::Identity(
s.WithOpName("f").WithControlDependencies(ops).WithDevice("/CPU:0"),
{a});
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("f");
}
GraphDef expected;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
TF_CHECK_OK(s.ToGraphDef(&expected));
}
DependencyOptimizer optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(output.node_size(), item.graph.node_size() + 4);
std::set<string> tasks;
for (const auto& n : output.node()) {
if (n.op() == "NoOp") {
EXPECT_TRUE(absl::StartsWith(n.name(), "GroupCrossDeviceControlEdges"));
EXPECT_EQ(n.input_size(), 8);
tasks.insert(n.device());
}
if (n.name() == "f") {
EXPECT_EQ(n.input_size(), 5);
for (const auto& i : n.input()) {
EXPECT_TRUE(i == "a" ||
absl::StartsWith(i, "^GroupCrossDeviceControlEdges"));
}
}
}
EXPECT_EQ(tasks.size(), 4);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/dependency_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/dependency_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2f9e7e83-870e-495c-80a5-8d17755d9c9a | cpp | tensorflow/tensorflow | tfrt_fallback_util | tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_util.cc | tensorflow/compiler/mlir/tfrt/tests/ir/tfrt_fallback_util_test.cc | #include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_util.h"
#include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.h"
namespace tfrt {
namespace fallback_async {
bool IsArgConsumedByFallback(mlir::func::FuncOp func, int arg_index) {
auto arg = func.getArgument(arg_index);
for (mlir::Operation *user : arg.getUsers()) {
if (llvm::isa<FallbackAsyncDialect>(user->getDialect())) return true;
}
return false;
}
void ForEachArgConsumedByFallback(
mlir::func::FuncOp func, llvm::function_ref<void(int arg_index)> action) {
for (int arg_index = 0; arg_index < func.getNumArguments(); ++arg_index) {
if (IsArgConsumedByFallback(func, arg_index)) action(arg_index);
}
}
void ForEachArgConsumedByFallback(
mlir::ModuleOp module,
llvm::function_ref<void(llvm::StringRef func_name, int arg_index)> action) {
for (auto func : module.getOps<mlir::func::FuncOp>()) {
ForEachArgConsumedByFallback(
func, [func_name = func.getName(), action](int arg_index) {
action(func_name, arg_index);
});
}
}
}
} | #include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_util.h"
#include <string>
#include <utility>
#include <vector>
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.h"
#include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_sync.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tfrt/init_tfrt_dialects.h"
namespace tfrt {
namespace fallback_async {
namespace {
TEST(SavedModelTest, MapFallbackArgs) {
std::string saved_model_mlir_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tfrt/tests/ir/testdata/test.mlir");
mlir::DialectRegistry registry;
RegisterTFRTDialects(registry);
registry.insert<tfrt::fallback_async::FallbackAsyncDialect>();
registry.insert<tfrt::fallback_sync::FallbackSyncDialect>();
mlir::MLIRContext context(registry);
auto module =
mlir::parseSourceFile<mlir::ModuleOp>(saved_model_mlir_path, &context);
ASSERT_TRUE(module);
std::vector<std::pair<std::string, int>> func_and_index;
ForEachArgConsumedByFallback(
module.get(),
[&func_and_index](llvm::StringRef func_name, int arg_index) {
func_and_index.push_back({func_name.str(), arg_index});
});
ASSERT_EQ(func_and_index.size(), 1);
EXPECT_EQ(func_and_index[0].first, "test");
EXPECT_EQ(func_and_index[0].second, 2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/tests/ir/tfrt_fallback_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e0f4dcb8-c860-4741-8d6b-fcd45186e83e | cpp | google/cel-cpp | null_value | common/values/null_value.cc | common/values/null_value_test.cc | #include <cstddef>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/value.h"
#include "internal/serialize.h"
#include "internal/status_macros.h"
namespace cel {
absl::Status NullValue::SerializeTo(AnyToJsonConverter&,
absl::Cord& value) const {
return internal::SerializeValue(kJsonNull, value);
}
absl::Status NullValue::Equal(ValueManager&, const Value& other,
Value& result) const {
result = BoolValue{InstanceOf<NullValue>(other)};
return absl::OkStatus();
}
absl::StatusOr<Value> NullValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
} | #include <sstream>
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/native_type.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::An;
using ::testing::Ne;
using NullValueTest = common_internal::ThreadCompatibleValueTest<>;
TEST_P(NullValueTest, Kind) {
EXPECT_EQ(NullValue().kind(), NullValue::kKind);
EXPECT_EQ(Value(NullValue()).kind(), NullValue::kKind);
}
TEST_P(NullValueTest, DebugString) {
{
std::ostringstream out;
out << NullValue();
EXPECT_EQ(out.str(), "null");
}
{
std::ostringstream out;
out << Value(NullValue());
EXPECT_EQ(out.str(), "null");
}
}
TEST_P(NullValueTest, ConvertToJson) {
EXPECT_THAT(NullValue().ConvertToJson(value_manager()),
IsOkAndHolds(Json(kJsonNull)));
}
TEST_P(NullValueTest, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(NullValue()), NativeTypeId::For<NullValue>());
EXPECT_EQ(NativeTypeId::Of(Value(NullValue())),
NativeTypeId::For<NullValue>());
}
TEST_P(NullValueTest, InstanceOf) {
EXPECT_TRUE(InstanceOf<NullValue>(NullValue()));
EXPECT_TRUE(InstanceOf<NullValue>(Value(NullValue())));
}
TEST_P(NullValueTest, Cast) {
EXPECT_THAT(Cast<NullValue>(NullValue()), An<NullValue>());
EXPECT_THAT(Cast<NullValue>(Value(NullValue())), An<NullValue>());
}
TEST_P(NullValueTest, As) {
EXPECT_THAT(As<NullValue>(Value(NullValue())), Ne(absl::nullopt));
}
INSTANTIATE_TEST_SUITE_P(
NullValueTest, NullValueTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting)),
NullValueTest::ToString);
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/null_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/null_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
bc2dbb66-ecd6-4962-8879-565a8becbf6f | cpp | google/tensorstore | byte_strided_pointer | tensorstore/util/byte_strided_pointer.h | tensorstore/util/byte_strided_pointer_test.cc | #ifndef TENSORSTORE_UTIL_BYTE_STRIDED_POINTER_H_
#define TENSORSTORE_UTIL_BYTE_STRIDED_POINTER_H_
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <type_traits>
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/element_traits.h"
namespace tensorstore {
template <typename T>
class ByteStridedPointer {
public:
using element_type = T;
using difference_type = std::ptrdiff_t;
constexpr static size_t alignment =
alignof(std::conditional_t<std::is_void_v<T>, char, T>);
ByteStridedPointer() = default;
template <
typename U,
std::enable_if_t<IsElementTypeImplicitlyConvertible<U, T>>* = nullptr>
ByteStridedPointer(U* value)
: value_(reinterpret_cast<std::uintptr_t>(value)) {
assert(value_ % alignment == 0);
}
template <
typename U,
std::enable_if_t<IsElementTypeOnlyExplicitlyConvertible<U, T>>* = nullptr>
explicit ByteStridedPointer(U* value)
: value_(reinterpret_cast<std::uintptr_t>(value)) {
assert(value_ % alignment == 0);
}
template <
typename U,
std::enable_if_t<IsElementTypeImplicitlyConvertible<U, T>>* = nullptr>
ByteStridedPointer(ByteStridedPointer<U> value)
: value_(reinterpret_cast<std::uintptr_t>(value.get())) {
assert(value_ % alignment == 0);
}
template <
typename U,
std::enable_if_t<IsElementTypeOnlyExplicitlyConvertible<U, T>>* = nullptr>
explicit ByteStridedPointer(ByteStridedPointer<U> value)
: value_(reinterpret_cast<std::uintptr_t>(value.get())) {
assert(value_ % alignment == 0);
}
T* get() const {
assert(value_ % alignment == 0);
return reinterpret_cast<T*>(value_);
}
T* operator->() const { return get(); }
template <typename U = T>
U& operator*() const {
return *static_cast<U*>(get());
}
operator T*() const { return get(); }
template <
typename U,
std::enable_if_t<IsElementTypeOnlyExplicitlyConvertible<T, U>>* = nullptr>
explicit operator U*() const {
return static_cast<U*>(get());
}
template <typename Integer>
std::enable_if_t<std::is_integral_v<Integer>, ByteStridedPointer&> operator+=(
Integer byte_offset) {
value_ = internal::wrap_on_overflow::Add(
value_, static_cast<std::uintptr_t>(byte_offset));
assert(value_ % alignment == 0);
return *this;
}
template <typename Integer>
std::enable_if_t<std::is_integral_v<Integer>, ByteStridedPointer&> operator-=(
Integer byte_offset) {
value_ = internal::wrap_on_overflow::Subtract(
value_, static_cast<std::uintptr_t>(byte_offset));
assert(value_ % alignment == 0);
return *this;
}
template <typename Integer>
std::enable_if_t<std::is_integral_v<Integer>, T>& operator[](
Integer byte_offset) const {
ByteStridedPointer x = *this;
x += byte_offset;
assert(x.value_ % alignment == 0);
return *x;
}
template <typename U>
friend std::ptrdiff_t operator-(ByteStridedPointer<T> a,
ByteStridedPointer<U> b) {
return reinterpret_cast<const char*>(a.get()) -
reinterpret_cast<const char*>(b.get());
}
template <typename Integer>
friend std::enable_if_t<std::is_integral_v<Integer>, ByteStridedPointer<T>>
operator+(ByteStridedPointer<T> ptr, Integer byte_offset) {
ptr += static_cast<std::uintptr_t>(byte_offset);
return ptr;
}
template <typename Integer>
friend inline std::enable_if_t<std::is_integral_v<Integer>,
ByteStridedPointer<T>>
operator+(Integer byte_offset, ByteStridedPointer<T> ptr) {
ptr += static_cast<std::uintptr_t>(byte_offset);
return ptr;
}
template <typename Integer>
friend inline std::enable_if_t<std::is_integral_v<Integer>,
ByteStridedPointer<T>>
operator-(ByteStridedPointer<T> ptr, Integer byte_offset) {
ptr -= static_cast<std::uintptr_t>(byte_offset);
return ptr;
}
private:
std::uintptr_t value_;
};
}
#endif | #include "tensorstore/util/byte_strided_pointer.h"
#include <limits>
#include <type_traits>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::ByteStridedPointer;
struct Base {};
struct Derived : Base {};
static_assert(std::is_convertible_v<int*, ByteStridedPointer<int>>);
static_assert(std::is_constructible_v<int*, ByteStridedPointer<void>>);
static_assert(!std::is_constructible_v<int*, ByteStridedPointer<const void>>);
static_assert(std::is_convertible_v<ByteStridedPointer<int>, int*>);
static_assert(std::is_convertible_v<ByteStridedPointer<int>, const int*>);
static_assert(
std::is_convertible_v<ByteStridedPointer<int>, ByteStridedPointer<void>>);
static_assert(std::is_convertible_v<ByteStridedPointer<const int>,
ByteStridedPointer<const void>>);
static_assert(!std::is_convertible_v<ByteStridedPointer<const int>,
ByteStridedPointer<void>>);
static_assert(
!std::is_convertible_v<ByteStridedPointer<void>, ByteStridedPointer<int>>);
static_assert(
std::is_constructible_v<ByteStridedPointer<int>, ByteStridedPointer<void>>);
static_assert(!std::is_convertible_v<ByteStridedPointer<const int>,
ByteStridedPointer<const float>>);
static_assert(!std::is_convertible_v<ByteStridedPointer<Derived>,
ByteStridedPointer<Base>>);
static_assert(!std::is_convertible_v<ByteStridedPointer<Base>,
ByteStridedPointer<Derived>>);
TEST(ByteStridedPointerTest, DefaultConstructor) {
ByteStridedPointer<int> ptr;
static_cast<void>(ptr);
}
TEST(ByteStridedPointerTest, ConstructFromRaw) {
int value;
ByteStridedPointer<int> ptr = &value;
EXPECT_EQ(&value, ptr.get());
}
TEST(ByteStridedPointerTest, ConstructFromRawConvertImplicit) {
int value;
ByteStridedPointer<const int> ptr = &value;
EXPECT_EQ(&value, ptr.get());
}
TEST(ByteStridedPointerTest, ConstructFromRawConvertExplicit) {
int value;
ByteStridedPointer<const int> ptr(static_cast<void*>(&value));
EXPECT_EQ(&value, ptr.get());
}
TEST(ByteStridedPointerTest, ConstructFromOther) {
int value;
ByteStridedPointer<int> ptr = ByteStridedPointer<int>(&value);
EXPECT_EQ(&value, ptr.get());
}
TEST(ByteStridedPointerTest, ConstructFromOtherConvertImplicit) {
int value;
ByteStridedPointer<const int> ptr = ByteStridedPointer<int>(&value);
EXPECT_EQ(&value, ptr.get());
}
TEST(ByteStridedPointerTest, ConstructFromOtherConvertExplicit) {
int value;
ByteStridedPointer<const int> ptr{ByteStridedPointer<void>(&value)};
EXPECT_EQ(&value, ptr.get());
}
TEST(ByteStridedPointerTest, ArrowOperator) {
int value;
ByteStridedPointer<const int> x(&value);
EXPECT_EQ(&value, x.operator->());
}
TEST(ByteStridedPointerTest, Dereference) {
int value = 3;
ByteStridedPointer<const int> x(&value);
EXPECT_EQ(3, *x);
EXPECT_EQ(&value, &*x);
}
TEST(ByteStridedPointerTest, CastImplicit) {
int value = 3;
ByteStridedPointer<const int> x(&value);
const int* p = x;
EXPECT_EQ(&value, p);
}
TEST(ByteStridedPointerTest, CastExplicit) {
int value = 3;
ByteStridedPointer<void> x(&value);
const int* p = static_cast<const int*>(x);
EXPECT_EQ(&value, p);
}
TEST(ByteStridedPointerTest, Add) {
int arr[] = {1, 2, 3};
ByteStridedPointer<int> x(&arr[0]);
x += sizeof(int);
EXPECT_EQ(x, ByteStridedPointer<int>(&arr[0]) + sizeof(int));
EXPECT_EQ(x, sizeof(int) + ByteStridedPointer<int>(&arr[0]));
EXPECT_EQ(&arr[1], x.get());
}
TEST(ByteStridedPointerTest, Subtract) {
int arr[] = {1, 2, 3};
ByteStridedPointer<int> x(&arr[2]);
x -= sizeof(int);
EXPECT_EQ(x, ByteStridedPointer<int>(&arr[2]) - sizeof(int));
EXPECT_EQ(&arr[1], x.get());
}
TEST(ByteStridedPointerTest, AddWrapOnOverflow) {
int arr[] = {1, 2, 3};
ByteStridedPointer<int> x(&arr[0]);
const std::uintptr_t base_index =
std::numeric_limits<std::uintptr_t>::max() - 99;
x -= base_index;
x += (base_index + sizeof(int));
EXPECT_EQ(x, ByteStridedPointer<int>(&arr[0]) + sizeof(int));
EXPECT_EQ(x, sizeof(int) + ByteStridedPointer<int>(&arr[0]));
EXPECT_EQ(&arr[1], x.get());
}
TEST(ByteStridedPointerTest, Difference) {
int arr[] = {1, 2, 3};
ByteStridedPointer<int> x(&arr[2]);
ByteStridedPointer<int> y(&arr[1]);
EXPECT_EQ(4, x - y);
}
TEST(ByteStridedPointerTest, Comparison) {
int arr[] = {1, 2, 3};
ByteStridedPointer<int> x(&arr[2]);
ByteStridedPointer<int> y = x;
EXPECT_TRUE(x == y);
x -= sizeof(int);
EXPECT_FALSE(x == y);
EXPECT_TRUE(x < y);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/byte_strided_pointer.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/byte_strided_pointer_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
7d28bedf-da1c-40ae-8533-c0db0131ec7d | cpp | tensorflow/tensorflow | device_compiler | tensorflow/compiler/jit/device_compiler.h | tensorflow/compiler/jit/device_compiler_test.cc | #ifndef TENSORFLOW_COMPILER_JIT_DEVICE_COMPILER_H_
#define TENSORFLOW_COMPILER_JIT_DEVICE_COMPILER_H_
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/container/flat_hash_map.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/jit/device_compilation_cache.h"
#include "tensorflow/compiler/jit/device_compilation_cluster_signature.h"
#include "tensorflow/compiler/jit/device_compilation_profiler.h"
#include "tensorflow/compiler/jit/device_compiler_client.h"
#include "tensorflow/compiler/jit/device_executable_persistor.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/tf_graph_to_hlo_compiler.h"
#include "tensorflow/compiler/jit/xla_compile_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "xla/client/local_client.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
template <typename ExecutableType, typename ClientType>
class DeviceCompiler : public ResourceBase {
public:
DeviceCompiler(
std::unique_ptr<DeviceExecutablePersistor<ExecutableType, ClientType>>
persistor,
std::unique_ptr<DeviceCompilerClient<ExecutableType, ClientType>>
compiler_client);
~DeviceCompiler() override;
enum class CompileScope {
kOp,
kFunction,
};
Status CompileIfNeeded(
const XlaCompiler::Options& options, const NameAttrList& function,
const std::vector<XlaCompiler::Argument>& args,
const XlaCompiler::CompileOptions& compile_options,
DeviceCompileMode compile_mode, DeviceCompilationProfiler* profiler,
const XlaCompiler::CompilationResult** out_compilation_result,
ExecutableType** out_executable);
Status CompileSingleOpIfNeeded(
const XlaCompiler::Options& options,
const std::vector<XlaCompiler::Argument>& args,
const XlaCompiler::CompileOptions& compile_options, OpKernelContext* ctx,
DeviceCompilationProfiler* profiler,
const XlaCompiler::CompilationResult** out_compilation_result,
ExecutableType** out_executable);
ClientType* client() const { return compiler_client_->client(); }
const DeviceType& device_type() const { return persistor_->device_type(); }
DeviceCompilationCache<ExecutableType>* cache() { return cache_.get(); }
DeviceExecutablePersistor<ExecutableType, ClientType>* persistor() {
return persistor_.get();
}
DeviceCompilerClient<ExecutableType, ClientType>* compiler_client() {
return compiler_client_.get();
}
string DebugString() const override;
private:
Status CompileImpl(
const XlaCompiler::CompileOptions& compile_options,
const XlaCompiler::Options& options, const NameAttrList& function,
const std::vector<XlaCompiler::Argument>& args, CompileScope scope,
DeviceCompileMode compile_mode, OpKernelContext* ctx,
DeviceCompilationProfiler* profiler,
const XlaCompiler::CompilationResult** out_compilation_result,
ExecutableType** out_executable);
StatusOr<typename DeviceCompilationCache<ExecutableType>::Value>
CompileStrict(
const DeviceCompilationClusterSignature& sig,
const XlaCompiler::CompileOptions& compile_options,
const XlaCompiler::Options& options,
const std::vector<XlaCompiler::Argument>& args,
const NameAttrList& function,
typename DeviceCompilationCache<ExecutableType>::Value cache_value,
CompileScope scope, OpKernelContext* ctx,
DeviceCompilationProfiler* profiler, mutex* mu)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu);
Status CompileAsynchronous(const DeviceCompilationClusterSignature& sig,
const XlaCompiler::CompileOptions& compile_options,
const XlaCompiler::Options& options,
const std::vector<XlaCompiler::Argument>& args,
const NameAttrList& function, CompileScope scope,
OpKernelContext* ctx,
DeviceCompilationProfiler* profiler);
std::unique_ptr<DeviceExecutablePersistor<ExecutableType, ClientType>>
persistor_;
std::unique_ptr<DeviceCompilerClient<ExecutableType, ClientType>>
compiler_client_;
std::unique_ptr<DeviceCompilationCache<ExecutableType>> cache_;
std::unique_ptr<thread::ThreadPool> async_compiler_threads_;
mutex cluster_mutexes_mu_;
absl::flat_hash_map<DeviceCompilationClusterSignature, std::unique_ptr<mutex>,
DeviceCompilationClusterSignature::Hash>
cluster_mutexes_ TF_GUARDED_BY(cluster_mutexes_mu_);
DeviceCompiler(const DeviceCompiler&) = delete;
void operator=(const DeviceCompiler&) = delete;
};
namespace device_compiler_internal {
inline void LogOnceXlaCompiledFirstCluster() {
static absl::once_flag log_once;
absl::call_once(log_once, [] {
LOG(INFO) << "Compiled cluster using XLA! This line is logged at most "
"once for the lifetime of the process.";
});
}
template <typename ExecutableType>
inline Status EligibleToPersist(DeviceCompileState compile_state,
const ExecutableType* executable) {
if (compile_state != DeviceCompileState::kCompiled) {
return errors::FailedPrecondition(
"Cache entry to serialize is not compiled.");
}
if (executable == nullptr) {
return errors::FailedPrecondition(
"LocalExecutable not found for cache entry to serialize.");
}
return absl::OkStatus();
}
}
template <typename ExecutableType, typename ClientType>
DeviceCompiler<ExecutableType, ClientType>::DeviceCompiler(
std::unique_ptr<DeviceExecutablePersistor<ExecutableType, ClientType>>
persistor,
std::unique_ptr<DeviceCompilerClient<ExecutableType, ClientType>>
compiler_client)
: persistor_(std::move(persistor)),
compiler_client_(std::move(compiler_client)) {
cache_ = std::make_unique<DeviceCompilationCache<ExecutableType>>();
async_compiler_threads_ = std::make_unique<tensorflow::thread::ThreadPool>(
tensorflow::Env::Default(), "async_compiler_threads",
kNumAsyncDeviceCompilerThreads);
}
template <typename ExecutableType, typename ClientType>
DeviceCompiler<ExecutableType, ClientType>::~DeviceCompiler() {
compiler_client_->WaitForProgramsToFinish();
async_compiler_threads_.reset();
}
template <typename ExecutableType, typename ClientType>
string DeviceCompiler<ExecutableType, ClientType>::DebugString() const {
return "DeviceCompiler";
}
template <typename ExecutableType, typename ClientType>
Status DeviceCompiler<ExecutableType, ClientType>::CompileIfNeeded(
const XlaCompiler::Options& options, const NameAttrList& function,
const std::vector<XlaCompiler::Argument>& args,
const XlaCompiler::CompileOptions& compile_options,
DeviceCompileMode compile_mode, DeviceCompilationProfiler* profiler,
const XlaCompiler::CompilationResult** out_compilation_result,
ExecutableType** out_executable) {
return CompileImpl(compile_options, options, function, args,
CompileScope::kFunction, compile_mode, nullptr,
profiler, out_compilation_result, out_executable);
}
template <typename ExecutableType, typename ClientType>
Status DeviceCompiler<ExecutableType, ClientType>::CompileSingleOpIfNeeded(
const XlaCompiler::Options& options,
const std::vector<XlaCompiler::Argument>& args,
const XlaCompiler::CompileOptions& compile_options, OpKernelContext* ctx,
DeviceCompilationProfiler* profiler,
const XlaCompiler::CompilationResult** out_compilation_result,
ExecutableType** out_executable) {
const NodeDef& def = ctx->op_kernel().def();
NameAttrList name;
name.set_name(def.op());
*name.mutable_attr() = def.attr();
name.mutable_attr()->erase("_class");
return CompileImpl(compile_options, options, name, args, CompileScope::kOp,
DeviceCompileMode::kStrict, ctx, profiler,
out_compilation_result, out_executable);
}
template <typename ExecutableType, typename ClientType>
StatusOr<typename DeviceCompilationCache<ExecutableType>::Value>
DeviceCompiler<ExecutableType, ClientType>::CompileStrict(
const DeviceCompilationClusterSignature& sig,
const XlaCompiler::CompileOptions& compile_options,
const XlaCompiler::Options& options,
const std::vector<XlaCompiler::Argument>& args,
const NameAttrList& function,
typename DeviceCompilationCache<ExecutableType>::Value cache_value,
CompileScope scope, OpKernelContext* ctx,
DeviceCompilationProfiler* profiler, mutex* mu) {
tensorflow::Env* env = tensorflow::Env::Default();
const uint64 compile_start_us = env->NowMicros();
TfGraphToHloCompiler compiler(options);
cache_value.compile_state = DeviceCompileState::kCompiled;
std::unique_ptr<ExecutableType> out_executable;
auto out_compilation_result =
std::make_unique<XlaCompiler::CompilationResult>();
if (scope == CompileScope::kOp) {
cache_value.compilation_status = compiler.CompileSingleOp(
compile_options, ctx, args, out_compilation_result.get());
} else {
CHECK(scope == CompileScope::kFunction);
cache_value.compilation_status = compiler.Compile(
compile_options, function, args, out_compilation_result.get());
}
TF_RETURN_IF_ERROR(cache_value.compilation_status);
TF_RET_CHECK(cache_value.executable == nullptr);
TF_RET_CHECK(out_compilation_result->computation != nullptr);
auto loaded_executable = persistor_->TryToLoadExecutable(
DeviceCompilationClusterSignature::Hash()(sig), sig.HumanString(),
options, *out_compilation_result, compiler_client_.get());
if (loaded_executable.has_value()) {
cache_value.compilation_status = loaded_executable->status();
if (loaded_executable->ok()) {
out_executable = *std::move(*loaded_executable);
metrics::UpdatePersistentCacheLoadCount();
}
} else {
auto built_executable =
compiler_client_->BuildExecutable(options, *out_compilation_result);
TF_RETURN_IF_ERROR(built_executable.status());
out_executable = *std::move(built_executable);
TF_RETURN_IF_ERROR(
device_compiler_internal::EligibleToPersist<ExecutableType>(
cache_value.compile_state, out_executable.get()));
TF_RETURN_IF_ERROR(persistor_->TryToPersistExecutable(
DeviceCompilationClusterSignature::Hash()(sig), sig.HumanString(),
options, *out_compilation_result, *out_executable,
compiler_client_.get()));
}
cache_value.compilation_result = out_compilation_result.get();
cache_value.executable = out_executable.get();
cache_->Store(sig, cache_value.compile_state, cache_value.compilation_status,
std::move(out_compilation_result), std::move(out_executable));
const uint64 compile_end_us = env->NowMicros();
const uint64 compile_time_us = compile_end_us - compile_start_us;
device_compiler_internal::LogOnceXlaCompiledFirstCluster();
TF_RETURN_IF_ERROR(profiler->RegisterCompilation(
function, compile_time_us, loaded_executable.has_value()));
return cache_value;
}
template <typename ExecutableType, typename ClientType>
Status DeviceCompiler<ExecutableType, ClientType>::CompileAsynchronous(
const DeviceCompilationClusterSignature& signature,
const XlaCompiler::CompileOptions& compile_options,
const XlaCompiler::Options& options,
const std::vector<XlaCompiler::Argument>& args,
const NameAttrList& function, CompileScope scope, OpKernelContext* ctx,
DeviceCompilationProfiler* profiler) {
cache_->Store(signature, DeviceCompileState::kCompiling, std::nullopt,
std::nullopt, std::nullopt);
profiler->IncrementOngoingAsyncCompilations();
const std::string& function_name = function.name();
async_compiler_threads_->Schedule([=] {
VLOG(2) << "Starting asynchronous compilation of cluster " << function_name
<< '.';
mutex mu;
mutex_lock lock(mu);
auto cache_value = typename DeviceCompilationCache<ExecutableType>::Value();
auto s = CompileStrict(signature, compile_options, options, args, function,
cache_value, scope, ctx, profiler, &mu);
VLOG(2) << "Finished asynchronous compililation of cluster "
<< function_name << '.';
profiler->DecrementOngoingAsyncCompilations();
if (!s.ok()) {
cache_->Store(signature, std::nullopt, s.status(), std::nullopt,
std::nullopt);
}
});
return absl::OkStatus();
}
template <typename ExecutableType, typename ClientType>
Status DeviceCompiler<ExecutableType, ClientType>::CompileImpl(
const XlaCompiler::CompileOptions& compile_options,
const XlaCompiler::Options& options, const NameAttrList& function,
const std::vector<XlaCompiler::Argument>& args, CompileScope scope,
DeviceCompileMode compile_mode, OpKernelContext* ctx,
DeviceCompilationProfiler* profiler,
const XlaCompiler::CompilationResult** out_compilation_result,
ExecutableType** out_executable) {
DCHECK_NE(out_executable, nullptr);
VLOG(2) << "DeviceCompiler::Compile " << DebugString();
if (VLOG_IS_ON(2)) {
VLOG(2) << "num_inputs=" << args.size();
for (int i = 0, end = args.size(); i < end; i++) {
VLOG(3) << i << ": " << args[i].HumanString();
}
}
TF_ASSIGN_OR_RETURN(auto signature,
DeviceCompilationClusterSignature::Build(function, args));
mutex* cluster_mutex;
{
mutex_lock lock(cluster_mutexes_mu_);
auto it =
cluster_mutexes_.emplace(signature, std::make_unique<mutex>()).first;
cluster_mutex = it->second.get();
}
profiler->RegisterExecution(function);
string human_signature;
if (VLOG_IS_ON(2)) {
human_signature = VLOG_IS_ON(3) ? signature.HumanString() : function.name();
VLOG(2) << "DeviceCompilationClusterSignature: " << human_signature;
}
mutex_lock cluster_compile_lock(*cluster_mutex);
auto cache_value = cache_->LookupOrCreate(signature);
int64_t current_request_count = cache_value.request_count;
VLOG(2) << "Compilation cache entry hit: "
<< static_cast<int>(cache_value.compile_state)
<< " signature: " << human_signature << " with request count "
<< current_request_count;
DeviceCompileState state = cache_value.compile_state;
*out_compilation_result = nullptr;
*out_executable = nullptr;
if (state == DeviceCompileState::kUncompiled && FailOnXlaCompilation()) {
VLOG(1) << "XLA compilation disabled: " << function.name() << "\n"
<< absl::StrJoin(
args, "\n",
[](std::string* out, const XlaCompiler::Argument& arg) {
absl::StrAppend(out, " arg: ", arg.HumanString());
});
return errors::Internal("XLA compilation disabled");
}
if (state == DeviceCompileState::kUncompiled) {
XLA_SCOPED_LOGGING_TIMER("Compilation of XLA executable");
if (!profiler->ShouldCompileCluster(function, compile_mode,
current_request_count)) {
VLOG(2) << "Not compiling for signature: " << human_signature;
return absl::OkStatus();
} else if (compile_mode == DeviceCompileMode::kAsync) {
VLOG(2) << "Queueing asynchronous compilation for signature: "
<< human_signature;
TF_RETURN_IF_ERROR(CompileAsynchronous(signature, compile_options,
options, args, function, scope,
ctx, profiler));
return absl::OkStatus();
} else {
VLOG(2) << "Instantly compiling for signature: " << human_signature;
TF_ASSIGN_OR_RETURN(
cache_value,
CompileStrict(signature, compile_options, options, args, function,
cache_value, scope, ctx, profiler, cluster_mutex));
}
} else if (state == DeviceCompileState::kCompiling) {
VLOG(2) << "Ongoing asynchronous compilation for signature: "
<< human_signature;
return absl::OkStatus();
} else if (state == DeviceCompileState::kCompiled) {
VLOG(2) << "Already Compiled for signature: " << human_signature;
}
TF_RETURN_IF_ERROR(cache_value.compilation_status);
*out_compilation_result = cache_value.compilation_result;
*out_executable = cache_value.executable;
return absl::OkStatus();
}
}
#endif | #include "tensorflow/compiler/jit/device_compiler.h"
#include <iostream>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/compiler/jit/device_compilation_cluster_signature.h"
#include "tensorflow/compiler/jit/device_compiler_client.h"
#include "tensorflow/compiler/jit/tests/device_compiler_test_helper.h"
#include "tensorflow/compiler/jit/xla_device_compiler_client.h"
#include "xla/client/client_library.h"
#include "xla/stream_executor/platform_manager.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
namespace tensorflow {
namespace {
using ::testing::_;
using ::testing::Return;
using XlaDeviceCompiler =
DeviceCompiler<xla::LocalExecutable, xla::LocalClient>;
using XlaDeviceExecutablePersistor =
DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>;
using Signature = DeviceCompilationClusterSignature;
xla::LocalClient* GetLocalClient() {
auto platform = se::PlatformManager::PlatformWithName("cuda").value();
return xla::ClientLibrary::GetOrCreateLocalClient(platform).value();
}
XlaDeviceCompiler* CreateXlaDeviceCompiler(bool enable_persistence = false) {
auto xla_compiler_client =
std::make_unique<XlaDeviceCompilerClient>(GetLocalClient());
auto xla_persistor = std::make_unique<XlaDeviceExecutablePersistor>(
XlaDeviceExecutablePersistor::Config{
enable_persistence ? testing::TmpDir() : "", false, "xla"},
DeviceType(DEVICE_GPU_XLA_JIT));
return new XlaDeviceCompiler(std::move(xla_persistor),
std::move(xla_compiler_client));
}
absl::StatusOr<std::unique_ptr<Graph>> SampleGraphAddXY() {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1);
auto c = ops::Add(scope.WithOpName("C"), a, b);
auto d = ops::_Retval(scope.WithOpName("D"), c, 0);
TF_RETURN_IF_ERROR(scope.ToGraph(graph.get()));
return graph;
}
absl::StatusOr<FunctionDef> SampleFuntionAddXY(const std::string& name) {
TF_ASSIGN_OR_RETURN(auto graph, SampleGraphAddXY());
FunctionDef fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*graph, name, &fdef));
return fdef;
}
std::vector<XlaCompiler::Argument> SampleArgsForAddXY() {
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].type = DT_INT32;
args[1].shape = TensorShape({2});
return args;
}
class MockXlaDeviceExecutablePersistor
: public DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient> {
public:
MockXlaDeviceExecutablePersistor()
: DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>(
Config{testing::TmpDir(), false, "xla"},
DeviceType(DEVICE_CPU_XLA_JIT)) {}
MOCK_METHOD(Status, TryToPersistExecutable,
(uint64, const std::string&, const XlaCompiler::Options&,
const XlaCompiler::CompilationResult&,
const xla::LocalExecutable&,
(DeviceCompilerClient<xla::LocalExecutable, xla::LocalClient>*)),
(const, override));
};
class MockDeviceCompilationProfiler : public DeviceCompilationProfiler {
public:
MOCK_METHOD(bool, ShouldCompileCluster,
(const NameAttrList& function, DeviceCompileMode compile_mode,
int64_t current_request_count),
(override));
MOCK_METHOD(Status, RegisterCompilation,
(const NameAttrList& function, int64_t compile_time_us,
bool used_persistent_cache),
(override));
};
class DeviceCompilerTest : public ::testing::Test {
protected:
void SetUp() override {
flib_def_ = std::make_unique<FunctionLibraryDefinition>(
OpRegistry::Global(), FunctionDefLibrary());
TF_ASSERT_OK_AND_ASSIGN(auto fdef, SampleFuntionAddXY("foo"));
TF_ASSERT_OK(flib_def_->AddFunctionDef(fdef));
profiler_ = new DeviceCompilationProfiler();
profiler_ref_ = std::make_unique<core::ScopedUnref>(profiler_);
mock_profiler_ = new MockDeviceCompilationProfiler();
mock_profiler_ref_ = std::make_unique<core::ScopedUnref>(mock_profiler_);
xla_device_compiler_ = CreateXlaDeviceCompiler();
xla_device_compiler_ref_ =
std::make_unique<core::ScopedUnref>(xla_device_compiler_);
auto listener = std::make_unique<JitCompilationListener>();
listener_ = listener.get();
RegisterXlaActivityListener(std::move(listener));
}
XlaCompiler::Options GetDefaultXlaOptions() {
XlaCompiler::Options options;
options.device_type = DeviceType(DEVICE_GPU_XLA_JIT);
options.client = xla_device_compiler_->client();
options.flib_def = flib_def_.get();
return options;
}
absl::StatusOr<std::unique_ptr<xla::LocalExecutable>>
BuildSampleXlaExecutable() {
TF_ASSIGN_OR_RETURN(auto graph, SampleGraphAddXY());
auto args = SampleArgsForAddXY();
XlaCompiler compiler(GetDefaultXlaOptions());
XlaCompiler::CompilationResult compilation_result;
TF_RETURN_IF_ERROR(compiler.CompileGraph(XlaCompiler::CompileOptions(),
"graph", std::move(graph), args,
&compilation_result));
return xla_device_compiler_->compiler_client()->BuildExecutable(
GetDefaultXlaOptions(), compilation_result);
}
std::unique_ptr<FunctionLibraryDefinition> flib_def_;
JitCompilationListener* listener_;
DeviceCompilationProfiler* profiler_;
std::unique_ptr<core::ScopedUnref> profiler_ref_;
MockDeviceCompilationProfiler* mock_profiler_;
std::unique_ptr<core::ScopedUnref> mock_profiler_ref_;
XlaDeviceCompiler* xla_device_compiler_;
std::unique_ptr<core::ScopedUnref> xla_device_compiler_ref_;
};
TEST_F(DeviceCompilerTest, CompileStrictSuccess) {
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::LocalExecutable* xla_executable = nullptr;
XlaCompiler::Options options = GetDefaultXlaOptions();
NameAttrList fn;
fn.set_name("foo");
TF_EXPECT_OK(xla_device_compiler_->CompileIfNeeded(
options, fn, SampleArgsForAddXY(), XlaCompiler::CompileOptions{},
DeviceCompileMode::kStrict, profiler_, &compilation_result,
&xla_executable));
EXPECT_TRUE(compilation_result != nullptr);
EXPECT_TRUE(xla_executable != nullptr);
}
TEST_F(DeviceCompilerTest, CompileShouldCompileClusterFalse) {
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::LocalExecutable* xla_executable = nullptr;
XlaCompiler::Options options = GetDefaultXlaOptions();
NameAttrList fn;
fn.set_name("foo");
EXPECT_CALL(*mock_profiler_,
ShouldCompileCluster(_, DeviceCompileMode::kLazy, 1))
.WillOnce(Return(false));
TF_EXPECT_OK(xla_device_compiler_->CompileIfNeeded(
options, fn, SampleArgsForAddXY(), XlaCompiler::CompileOptions{},
DeviceCompileMode::kLazy, mock_profiler_, &compilation_result,
&xla_executable));
EXPECT_TRUE(compilation_result == nullptr);
EXPECT_TRUE(xla_executable == nullptr);
}
TEST_F(DeviceCompilerTest, CompileCacheHit) {
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::LocalExecutable* xla_executable = nullptr;
XlaCompiler::Options options = GetDefaultXlaOptions();
NameAttrList fn;
fn.set_name("foo");
TF_EXPECT_OK(xla_device_compiler_->CompileIfNeeded(
options, fn, SampleArgsForAddXY(), XlaCompiler::CompileOptions{},
DeviceCompileMode::kStrict, profiler_, &compilation_result,
&xla_executable));
EXPECT_TRUE(compilation_result != nullptr);
EXPECT_TRUE(xla_executable != nullptr);
const XlaCompiler::CompilationResult* new_compilation_result = nullptr;
xla::LocalExecutable* new_xla_executable = nullptr;
TF_EXPECT_OK(xla_device_compiler_->CompileIfNeeded(
options, fn, SampleArgsForAddXY(), XlaCompiler::CompileOptions{},
DeviceCompileMode::kStrict, profiler_, &new_compilation_result,
&new_xla_executable));
EXPECT_EQ(compilation_result, new_compilation_result);
EXPECT_EQ(xla_executable, new_xla_executable);
}
TEST_F(DeviceCompilerTest, CompileAsyncSuccess) {
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::LocalExecutable* xla_executable = nullptr;
XlaCompiler::Options options = GetDefaultXlaOptions();
NameAttrList fn;
fn.set_name("foo");
Notification done;
EXPECT_CALL(*mock_profiler_,
ShouldCompileCluster(_, DeviceCompileMode::kAsync, 1))
.WillOnce(Return(true));
EXPECT_CALL(*mock_profiler_, RegisterCompilation(_, _, false))
.WillOnce([&done] {
done.Notify();
return absl::OkStatus();
});
auto args = SampleArgsForAddXY();
TF_EXPECT_OK(xla_device_compiler_->CompileIfNeeded(
options, fn, args, XlaCompiler::CompileOptions{},
DeviceCompileMode::kAsync, mock_profiler_, &compilation_result,
&xla_executable));
EXPECT_TRUE(compilation_result == nullptr);
EXPECT_TRUE(xla_executable == nullptr);
auto xla_cache = xla_device_compiler_->cache();
TF_ASSERT_OK_AND_ASSIGN(auto signature, Signature::Build(fn, args));
auto cache_value = xla_cache->Lookup(signature);
EXPECT_TRUE(cache_value);
EXPECT_TRUE(cache_value->compile_state != DeviceCompileState::kUncompiled);
done.WaitForNotification();
cache_value = xla_cache->Lookup(signature);
EXPECT_TRUE(cache_value);
EXPECT_TRUE(cache_value->compile_state == DeviceCompileState::kCompiled);
EXPECT_TRUE(cache_value->compilation_result != nullptr);
EXPECT_TRUE(cache_value->executable != nullptr);
EXPECT_TRUE(cache_value->compilation_status.ok());
}
TEST_F(DeviceCompilerTest, CompilePersistentCacheEnabled) {
auto xla_device_compiler =
CreateXlaDeviceCompiler(true);
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
NameAttrList fn;
fn.set_name("foo");
auto args = SampleArgsForAddXY();
XlaCompiler::Options options = GetDefaultXlaOptions();
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::LocalExecutable* xla_executable = nullptr;
TF_EXPECT_OK(xla_device_compiler->CompileIfNeeded(
options, fn, args, XlaCompiler::CompileOptions{},
DeviceCompileMode::kStrict, profiler_, &compilation_result,
&xla_executable));
EXPECT_TRUE(compilation_result != nullptr);
EXPECT_TRUE(xla_executable != nullptr);
std::vector<XlaJitCompilationActivity> activity_history =
listener_->GetListenerHistory();
EXPECT_EQ(activity_history.size(), 1);
EXPECT_EQ(activity_history[0].cluster_name(), fn.name());
EXPECT_EQ(activity_history[0].compile_count(), 1);
EXPECT_FALSE(activity_history[0].used_persistent_cache());
listener_->ClearListenerHistory();
auto xla_device_compiler_2 =
CreateXlaDeviceCompiler(true);
core::ScopedUnref xla_device_compiler_ref_2(xla_device_compiler_2);
auto profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
const XlaCompiler::CompilationResult* compilation_result_2 = nullptr;
xla::LocalExecutable* xla_executable_2 = nullptr;
TF_EXPECT_OK(xla_device_compiler_2->CompileIfNeeded(
options, fn, args, XlaCompiler::CompileOptions{},
DeviceCompileMode::kStrict, profiler, &compilation_result_2,
&xla_executable_2));
EXPECT_TRUE(compilation_result_2 != nullptr);
EXPECT_TRUE(xla_executable_2 != nullptr);
activity_history = listener_->GetListenerHistory();
EXPECT_EQ(activity_history.size(), 1);
EXPECT_EQ(activity_history[0].cluster_name(), fn.name());
EXPECT_EQ(activity_history[0].compile_count(), 1);
EXPECT_TRUE(activity_history[0].used_persistent_cache());
}
TEST_F(DeviceCompilerTest, CompileFailedToLoadFromPersistentCache) {
auto xla_device_compiler =
CreateXlaDeviceCompiler(true);
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
NameAttrList fn;
fn.set_name("foo");
auto args = SampleArgsForAddXY();
XlaCompiler::Options options = GetDefaultXlaOptions();
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::LocalExecutable* xla_executable = nullptr;
TF_EXPECT_OK(xla_device_compiler->CompileIfNeeded(
options, fn, args, XlaCompiler::CompileOptions{},
DeviceCompileMode::kStrict, profiler_, &compilation_result,
&xla_executable));
std::vector<string> files;
TF_ASSERT_OK(Env::Default()->GetChildren(testing::TmpDir(), &files));
std::string const* serialized_executable_filename = nullptr;
for (const auto& file : files) {
if (absl::StartsWith(file, "xla__")) {
serialized_executable_filename = &file;
break;
}
}
EXPECT_TRUE(serialized_executable_filename != nullptr);
std::string serialized_executable_filepath =
io::JoinPath(testing::TmpDir(), *serialized_executable_filename);
std::unique_ptr<WritableFile> serialized_executable_file;
TF_ASSERT_OK(Env::Default()->NewWritableFile(serialized_executable_filepath,
&serialized_executable_file));
TF_ASSERT_OK(serialized_executable_file->Append("Garbage."));
TF_ASSERT_OK(serialized_executable_file->Close());
auto xla_device_compiler_2 =
CreateXlaDeviceCompiler(true);
core::ScopedUnref xla_device_compiler_ref_2(xla_device_compiler_2);
const XlaCompiler::CompilationResult* compilation_result_2 = nullptr;
xla::LocalExecutable* xla_executable_2 = nullptr;
EXPECT_FALSE(xla_device_compiler_2
->CompileIfNeeded(options, fn, args,
XlaCompiler::CompileOptions{},
DeviceCompileMode::kStrict, profiler_,
&compilation_result_2, &xla_executable_2)
.ok());
EXPECT_TRUE(compilation_result_2 == nullptr);
EXPECT_TRUE(xla_executable_2 == nullptr);
}
TEST_F(DeviceCompilerTest, CompileStrictPersistentCacheFailedToPersist) {
auto xla_compiler_client =
std::make_unique<XlaDeviceCompilerClient>(GetLocalClient());
auto xla_persistor = std::make_unique<MockXlaDeviceExecutablePersistor>();
auto xla_device_compiler = new XlaDeviceCompiler(
std::move(xla_persistor), std::move(xla_compiler_client));
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
NameAttrList fn;
fn.set_name("foo");
auto args = SampleArgsForAddXY();
XlaCompiler::Options options = GetDefaultXlaOptions();
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::LocalExecutable* xla_executable = nullptr;
auto persistor = down_cast<MockXlaDeviceExecutablePersistor*>(
xla_device_compiler->persistor());
TF_ASSERT_OK_AND_ASSIGN(auto signature, Signature::Build(fn, args));
EXPECT_CALL(*persistor,
TryToPersistExecutable(Signature::Hash()(signature),
signature.HumanString(), _, _, _, _))
.WillOnce(Return(errors::FailedPrecondition("Random error.")));
EXPECT_THAT(xla_device_compiler->CompileIfNeeded(
options, fn, args, XlaCompiler::CompileOptions{},
DeviceCompileMode::kStrict, profiler_, &compilation_result,
&xla_executable),
testing::StatusIs(error::FAILED_PRECONDITION,
::testing::HasSubstr("Random error.")));
EXPECT_TRUE(compilation_result == nullptr);
EXPECT_TRUE(xla_executable == nullptr);
}
TEST_F(OpsTestBase, CompileSingleOpSuccess) {
TF_EXPECT_OK(NodeDefBuilder("identity_op", "Identity")
.Input(FakeInput(DT_FLOAT))
.Attr("T", DT_FLOAT)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 2}), {6.9, 4.2});
TF_EXPECT_OK(RunOpKernel());
auto xla_device_compiler = CreateXlaDeviceCompiler();
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
auto profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::LocalExecutable* xla_executable = nullptr;
XlaOpRegistry::RegisterCompilationKernels();
auto flib_def = std::make_unique<FunctionLibraryDefinition>(
OpRegistry::Global(), FunctionDefLibrary());
XlaCompiler::Options options;
options.device_type = DeviceType(DEVICE_GPU_XLA_JIT);
options.client = GetLocalClient();
options.flib_def = flib_def.get();
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kConstant;
args[0].type = DT_FLOAT;
args[0].shape = TensorShape({1, 2});
args[0].constant_value = GetInput(0);
args[0].initialized = true;
NameAttrList fn;
fn.set_name("foo");
TF_EXPECT_OK(xla_device_compiler->CompileSingleOpIfNeeded(
options, args, XlaCompiler::CompileOptions{}, context_.get(), profiler,
&compilation_result, &xla_executable));
EXPECT_TRUE(compilation_result != nullptr);
EXPECT_TRUE(xla_executable != nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compiler.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e8d77878-5c89-4f56-bedc-a9c773970949 | cpp | google/cel-cpp | legacy_type_adapter | eval/public/structs/legacy_type_adapter.h | eval/public/structs/legacy_type_adapter_test.cc | #ifndef THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_STRUCTS_LEGACY_TYPE_ADPATER_H_
#define THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_STRUCTS_LEGACY_TYPE_ADPATER_H_
#include <cstdint>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "base/attribute.h"
#include "common/memory.h"
#include "eval/public/cel_options.h"
#include "eval/public/cel_value.h"
namespace google::api::expr::runtime {
class LegacyTypeMutationApis {
public:
virtual ~LegacyTypeMutationApis() = default;
virtual bool DefinesField(absl::string_view field_name) const = 0;
virtual absl::StatusOr<CelValue::MessageWrapper::Builder> NewInstance(
cel::MemoryManagerRef memory_manager) const = 0;
virtual absl::StatusOr<CelValue> AdaptFromWellKnownType(
cel::MemoryManagerRef memory_manager,
CelValue::MessageWrapper::Builder instance) const = 0;
virtual absl::Status SetField(
absl::string_view field_name, const CelValue& value,
cel::MemoryManagerRef memory_manager,
CelValue::MessageWrapper::Builder& instance) const = 0;
virtual absl::Status SetFieldByNumber(
int64_t field_number, const CelValue& value,
cel::MemoryManagerRef memory_manager,
CelValue::MessageWrapper::Builder& instance) const {
return absl::UnimplementedError("SetFieldByNumber is not yet implemented");
}
};
class LegacyTypeAccessApis {
public:
struct LegacyQualifyResult {
CelValue value;
int qualifier_count;
};
virtual ~LegacyTypeAccessApis() = default;
virtual absl::StatusOr<bool> HasField(
absl::string_view field_name,
const CelValue::MessageWrapper& value) const = 0;
virtual absl::StatusOr<CelValue> GetField(
absl::string_view field_name, const CelValue::MessageWrapper& instance,
ProtoWrapperTypeOptions unboxing_option,
cel::MemoryManagerRef memory_manager) const = 0;
virtual absl::StatusOr<LegacyQualifyResult> Qualify(
absl::Span<const cel::SelectQualifier>,
const CelValue::MessageWrapper& instance, bool presence_test,
cel::MemoryManagerRef memory_manager) const {
return absl::UnimplementedError("Qualify unsupported.");
}
virtual bool IsEqualTo(const CelValue::MessageWrapper&,
const CelValue::MessageWrapper&) const {
return false;
}
virtual std::vector<absl::string_view> ListFields(
const CelValue::MessageWrapper& instance) const = 0;
};
class LegacyTypeAdapter {
public:
LegacyTypeAdapter(const LegacyTypeAccessApis* access,
const LegacyTypeMutationApis* mutation)
: access_apis_(access), mutation_apis_(mutation) {}
const LegacyTypeAccessApis* access_apis() { return access_apis_; }
const LegacyTypeMutationApis* mutation_apis() { return mutation_apis_; }
private:
const LegacyTypeAccessApis* access_apis_;
const LegacyTypeMutationApis* mutation_apis_;
};
}
#endif | #include "eval/public/structs/legacy_type_adapter.h"
#include <vector>
#include "google/protobuf/arena.h"
#include "eval/public/cel_value.h"
#include "eval/public/structs/trivial_legacy_type_info.h"
#include "eval/public/testing/matchers.h"
#include "eval/testutil/test_message.pb.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
namespace {
class TestAccessApiImpl : public LegacyTypeAccessApis {
public:
TestAccessApiImpl() {}
absl::StatusOr<bool> HasField(
absl::string_view field_name,
const CelValue::MessageWrapper& value) const override {
return absl::UnimplementedError("Not implemented");
}
absl::StatusOr<CelValue> GetField(
absl::string_view field_name, const CelValue::MessageWrapper& instance,
ProtoWrapperTypeOptions unboxing_option,
cel::MemoryManagerRef memory_manager) const override {
return absl::UnimplementedError("Not implemented");
}
std::vector<absl::string_view> ListFields(
const CelValue::MessageWrapper& instance) const override {
return std::vector<absl::string_view>();
}
};
TEST(LegacyTypeAdapterAccessApis, DefaultAlwaysInequal) {
TestMessage message;
MessageWrapper wrapper(&message, nullptr);
MessageWrapper wrapper2(&message, nullptr);
TestAccessApiImpl impl;
EXPECT_FALSE(impl.IsEqualTo(wrapper, wrapper2));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/structs/legacy_type_adapter.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/structs/legacy_type_adapter_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
dba42569-ea24-4bc0-b340-9f383dc1dacc | cpp | tensorflow/tensorflow | config | tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/config_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/config.h"
#include <utility>
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
namespace stablehlo::quantization {
namespace {
void PopulateDefaultCalibrationOptions(QuantizationConfig& quant_config) {
if (!quant_config.has_calibration_options() ||
quant_config.calibration_options().calibration_method() ==
CalibrationOptions::CALIBRATION_METHOD_UNSPECIFIED) {
quant_config.mutable_calibration_options()->set_calibration_method(
CalibrationOptions::CALIBRATION_METHOD_MIN_MAX);
}
switch (quant_config.calibration_options().calibration_method()) {
case CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_PERCENTILE:
case CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE:
case CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_MAX_FREQUENCY:
case CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_SYMMETRIC:
if (quant_config.calibration_options()
.calibration_parameters()
.num_bins() == 0) {
quant_config.mutable_calibration_options()
->mutable_calibration_parameters()
->set_num_bins(512);
}
if (quant_config.calibration_options().calibration_method() ==
CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_PERCENTILE) {
if (quant_config.calibration_options()
.calibration_parameters()
.min_percentile() == 0) {
quant_config.mutable_calibration_options()
->mutable_calibration_parameters()
->set_min_percentile(0.001);
}
if (quant_config.calibration_options()
.calibration_parameters()
.max_percentile() == 0) {
quant_config.mutable_calibration_options()
->mutable_calibration_parameters()
->set_max_percentile(99.999);
}
}
break;
default:
break;
}
}
QuantizationSpec GetDefaultStaticRangePtqSpec(StaticRangePtqPreset preset) {
QuantizationSpec spec{};
spec.mutable_matcher()->mutable_function_name()->set_regex(
preset.enable_full_int_quantization() ? ".*"
: "^.*(dot_general|gather).*");
spec.mutable_method()->mutable_static_range_ptq();
return spec;
}
QuantizationSpec GetDefaultWeightOnlyPtqSpec() {
QuantizationSpec spec{};
spec.mutable_matcher()->mutable_function_name()->set_regex(
"^.*(conv|dot_general).*");
WeightOnlyPtq& weight_only_ptq_spec =
*spec.mutable_method()->mutable_weight_only_ptq();
if (auto [iter, inserted] =
weight_only_ptq_spec.mutable_input_quantized_types()->try_emplace(1);
inserted) {
iter->second.mutable_dimension_specs();
}
return spec;
}
QuantizationSpec GetPtqSpecForConvolution(Method::MethodCase method_case) {
QuantizationSpec spec{};
if (method_case != Method::kStaticRangePtq) {
return spec;
}
spec.mutable_matcher()->mutable_function_name()->set_regex(
"composite_conv.*");
QuantizedType conv_weight_quantized_type{};
conv_weight_quantized_type.mutable_dimension_specs()->set_dimension(3);
StaticRangePtq& static_range_ptq_spec =
*spec.mutable_method()->mutable_static_range_ptq();
static_range_ptq_spec.mutable_input_quantized_types()->try_emplace(
1, std::move(conv_weight_quantized_type));
return spec;
};
void ExpandStaticRangePtqPreset(const StaticRangePtqPreset& preset,
QuantizationConfig& config) {
if (config.calibration_options().representative_datasets().empty()) {
auto preset_datasets = preset.representative_datasets();
config.mutable_calibration_options()
->mutable_representative_datasets()
->Add(preset_datasets.begin(), preset_datasets.end());
}
QuantizationSpecs new_specs{};
*new_specs.add_specs() =
GetDefaultStaticRangePtqSpec(config.static_range_ptq_preset());
*new_specs.add_specs() =
GetPtqSpecForConvolution(Method::MethodCase::kStaticRangePtq);
const QuantizationSpecs& previous_specs = config.specs();
new_specs.mutable_specs()->Add(previous_specs.specs().begin(),
previous_specs.specs().end());
config.clear_static_range_ptq_preset();
config.mutable_specs()->Swap(&new_specs);
}
void ExpandWeightOnlyPtqPreset(QuantizationConfig& config) {
QuantizationSpecs new_specs{};
*new_specs.add_specs() = GetDefaultWeightOnlyPtqSpec();
const QuantizationSpecs& previous_specs = config.specs();
new_specs.mutable_specs()->Add(previous_specs.specs().begin(),
previous_specs.specs().end());
config.clear_weight_only_ptq_preset();
config.mutable_specs()->Swap(&new_specs);
}
}
QuantizationConfig ExpandPresets(const QuantizationConfig& config) {
QuantizationConfig new_config = config;
switch (config.preset_case()) {
case QuantizationConfig::kStaticRangePtqPreset:
ExpandStaticRangePtqPreset(config.static_range_ptq_preset(), new_config);
break;
case QuantizationConfig::kWeightOnlyPtqPreset:
ExpandWeightOnlyPtqPreset(new_config);
break;
default:
break;
}
return new_config;
}
bool HasQuantizationMethod(const QuantizationSpecs& specs,
Method::MethodCase method_case) {
for (const auto& spec : specs.specs()) {
if (spec.method().method_case() == method_case) {
return true;
}
}
return false;
}
QuantizationConfig PopulateDefaults(
const QuantizationConfig& user_provided_config) {
QuantizationConfig config = user_provided_config;
PopulateDefaultCalibrationOptions(config);
PipelineConfig& pipeline_config = *config.mutable_pipeline_config();
if (!pipeline_config.has_unpack_quantized_types()) {
pipeline_config.set_unpack_quantized_types(true);
}
return config;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/config.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
namespace stablehlo::quantization {
namespace {
using ::testing::Eq;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::testing::Truly;
TEST(PopulateDefaultsTest, PopulateDefaultsForEmptyConfig) {
QuantizationConfig config{};
const QuantizationConfig new_config = PopulateDefaults(config);
EXPECT_TRUE(new_config.pipeline_config().unpack_quantized_types());
}
TEST(PopulateDefaultsTest, PopulateDefaultsForConfigWithUnpackQuantizedTypes) {
QuantizationConfig config{};
config.mutable_pipeline_config()->set_unpack_quantized_types(false);
const QuantizationConfig new_config = PopulateDefaults(config);
EXPECT_FALSE(new_config.pipeline_config().unpack_quantized_types());
}
TEST(PopulateDefaultsTest, DefaultCalibrationOptionsPopulated) {
QuantizationConfig config{};
const QuantizationConfig new_config = PopulateDefaults(config);
EXPECT_THAT(new_config.calibration_options().calibration_method(),
Eq(CalibrationOptions::CALIBRATION_METHOD_MIN_MAX));
}
TEST(PopulateDefaultsTest,
DefaultCalibrationOptionsPopulatedForUnspecifiedMethod) {
QuantizationConfig config{};
CalibrationOptions& calibration_options =
*config.mutable_calibration_options();
calibration_options.set_calibration_method(
CalibrationOptions::CALIBRATION_METHOD_UNSPECIFIED);
const QuantizationConfig new_config = PopulateDefaults(config);
EXPECT_THAT(new_config.calibration_options().calibration_method(),
Eq(CalibrationOptions::CALIBRATION_METHOD_MIN_MAX));
}
TEST(PopulateDefaultsTest, ExplicitCalibrationOptionsNotOverridden) {
QuantizationConfig config{};
CalibrationOptions& calibration_options =
*config.mutable_calibration_options();
calibration_options.set_calibration_method(
CalibrationOptions::CALIBRATION_METHOD_AVERAGE_MIN_MAX);
calibration_options.mutable_calibration_parameters()->set_num_bins(512);
const QuantizationConfig new_config = PopulateDefaults(config);
EXPECT_THAT(new_config.calibration_options().calibration_method(),
Eq(CalibrationOptions::CALIBRATION_METHOD_AVERAGE_MIN_MAX));
EXPECT_THAT(
new_config.calibration_options().calibration_parameters().num_bins(),
Eq(512));
}
TEST(PopulateDefaultsTest, DefaultNumbersPopulatedForPartOfCalibrationOptions) {
QuantizationConfig config{};
CalibrationOptions& calibration_options =
*config.mutable_calibration_options();
calibration_options.set_calibration_method(
CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_PERCENTILE);
calibration_options.mutable_calibration_parameters()->set_num_bins(512);
const QuantizationConfig new_config = PopulateDefaults(config);
EXPECT_THAT(new_config.calibration_options().calibration_method(),
Eq(CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_PERCENTILE));
EXPECT_THAT(
new_config.calibration_options().calibration_parameters().num_bins(),
Eq(512));
EXPECT_THAT(new_config.calibration_options()
.calibration_parameters()
.min_percentile(),
Eq(0.001f));
EXPECT_THAT(new_config.calibration_options()
.calibration_parameters()
.max_percentile(),
Eq(99.999f));
}
TEST(PopulateDefaultsTest,
DefaultNumbersPopulatedForCalibrationOptionsOfHistogramMseBruteforce) {
QuantizationConfig config{};
CalibrationOptions& calibration_options =
*config.mutable_calibration_options();
calibration_options.set_calibration_method(
CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE);
const QuantizationConfig new_config = PopulateDefaults(config);
EXPECT_THAT(
new_config.calibration_options().calibration_method(),
Eq(CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE));
EXPECT_THAT(
new_config.calibration_options().calibration_parameters().num_bins(),
Eq(512));
EXPECT_THAT(new_config.calibration_options()
.calibration_parameters()
.min_percentile(),
Eq(0.0f));
EXPECT_THAT(new_config.calibration_options()
.calibration_parameters()
.max_percentile(),
Eq(0.0f));
}
TEST(ExpandPresetsTest, ExpandUnspecifiedPreset) {
QuantizationConfig config{};
const QuantizationConfig new_config = ExpandPresets(config);
EXPECT_FALSE(new_config.has_specs());
EXPECT_FALSE(new_config.has_calibration_options());
EXPECT_FALSE(new_config.has_pipeline_config());
}
TEST(ExpandPresetsTest, ExpandStaticRangePtqEnableFullIntquantization) {
QuantizationConfig config{};
RepresentativeDatasetConfig& preset_dataset_config =
*config.mutable_static_range_ptq_preset()->add_representative_datasets();
config.mutable_static_range_ptq_preset()->set_enable_full_int_quantization(
true);
preset_dataset_config.mutable_tf_record()->set_path("/test/path");
const QuantizationConfig new_config = ExpandPresets(config);
ASSERT_THAT(new_config.specs().specs(), SizeIs(2));
const QuantizationSpec& default_spec = new_config.specs().specs(0);
EXPECT_THAT(default_spec.matcher().function_name().regex(), StrEq(".*"));
EXPECT_TRUE(default_spec.method().has_static_range_ptq());
const QuantizationSpec& conv_spec = new_config.specs().specs(1);
EXPECT_THAT(conv_spec.matcher().function_name().regex(),
StrEq("composite_conv.*"));
ASSERT_TRUE(conv_spec.method().has_static_range_ptq());
const StaticRangePtq& srq_spec = conv_spec.method().static_range_ptq();
ASSERT_THAT(srq_spec.input_quantized_types(), SizeIs(1));
ASSERT_TRUE(srq_spec.input_quantized_types().contains(1));
ASSERT_TRUE(srq_spec.input_quantized_types().at(1).has_dimension_specs());
const QuantizedDimension& dimension_specs =
srq_spec.input_quantized_types().at(1).dimension_specs();
ASSERT_TRUE(dimension_specs.has_dimension());
EXPECT_THAT(dimension_specs.dimension(), Eq(3));
ASSERT_THAT(new_config.calibration_options().representative_datasets(),
SizeIs(1));
EXPECT_THAT(new_config.calibration_options()
.representative_datasets(0)
.tf_record()
.path(),
StrEq("/test/path"));
}
TEST(ExpandPresetsTest, ExpandStaticRangePtqPresetDefault) {
QuantizationConfig config{};
RepresentativeDatasetConfig& preset_dataset_config =
*config.mutable_static_range_ptq_preset()->add_representative_datasets();
preset_dataset_config.mutable_tf_record()->set_path("/test/path");
const QuantizationConfig new_config = ExpandPresets(config);
ASSERT_THAT(new_config.specs().specs(), SizeIs(2));
const QuantizationSpec& spec = new_config.specs().specs(0);
EXPECT_THAT(spec.matcher().function_name().regex(),
StrEq("^.*(dot_general|gather).*"));
EXPECT_TRUE(spec.method().has_static_range_ptq());
}
TEST(ExpandPresetsTest,
ExpandStaticRangePtqPresetWithTopLevelRepresentativeDataset) {
QuantizationConfig config{};
RepresentativeDatasetConfig& top_level_dataset_config =
*config.mutable_calibration_options()->add_representative_datasets();
top_level_dataset_config.mutable_tf_record()->set_path("/test/path/1");
RepresentativeDatasetConfig& preset_dataset_config =
*config.mutable_static_range_ptq_preset()->add_representative_datasets();
preset_dataset_config.mutable_tf_record()->set_path("/test/path/2");
const QuantizationConfig new_config = ExpandPresets(config);
ASSERT_THAT(new_config.calibration_options().representative_datasets(),
SizeIs(1));
EXPECT_THAT(new_config.calibration_options()
.representative_datasets(0)
.tf_record()
.path(),
StrEq("/test/path/1"));
}
TEST(ExpandPresetsTest, ExpandStaticRangePtqPresetThenAppendExplicitSpecs) {
QuantizationConfig config{};
config.mutable_static_range_ptq_preset()->set_enable_full_int_quantization(
true);
QuantizationSpec& user_provided_spec = *config.mutable_specs()->add_specs();
user_provided_spec.mutable_matcher()->mutable_function_name()->set_regex(
"composite_dot_general_fn_1");
user_provided_spec.mutable_method()->mutable_no_quantization();
const QuantizationConfig new_config = ExpandPresets(config);
ASSERT_THAT(new_config.specs().specs(), SizeIs(3));
const QuantizationSpec& first_spec = new_config.specs().specs(0);
EXPECT_THAT(first_spec.matcher().function_name().regex(), StrEq(".*"));
EXPECT_TRUE(first_spec.method().has_static_range_ptq());
const QuantizationSpec& second_spec = new_config.specs().specs(1);
EXPECT_THAT(second_spec.matcher().function_name().regex(),
StrEq("composite_conv.*"));
EXPECT_TRUE(second_spec.method().has_static_range_ptq());
const QuantizationSpec& third_spec = new_config.specs().specs(2);
EXPECT_THAT(third_spec.matcher().function_name().regex(),
StrEq("composite_dot_general_fn_1"));
EXPECT_TRUE(third_spec.method().has_no_quantization());
}
TEST(ExpandPresetsTest, ExpandWeightOnlyPtqPresetDefault) {
QuantizationConfig config{};
*config.mutable_weight_only_ptq_preset() = WeightOnlyPtqPreset();
const QuantizationConfig new_config = ExpandPresets(config);
ASSERT_THAT(new_config.specs().specs(), SizeIs(1));
const QuantizationSpec& spec = new_config.specs().specs(0);
EXPECT_THAT(spec.matcher().function_name().regex(),
StrEq("^.*(conv|dot_general).*"));
EXPECT_TRUE(spec.method().has_weight_only_ptq());
const WeightOnlyPtq& weight_only_ptq_spec = spec.method().weight_only_ptq();
EXPECT_THAT(weight_only_ptq_spec.input_quantized_types(),
UnorderedElementsAre(Pair(
1, Truly([](const auto& quantized_type) {
return quantized_type.has_dimension_specs() &&
!quantized_type.dimension_specs().has_dimension();
}))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/config_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4207ac5e-ac3c-4889-a0b0-7b2ecbf83816 | cpp | tensorflow/tensorflow | tool_params | tensorflow/lite/tools/tool_params.cc | tensorflow/lite/tools/tool_params_test.cc | #include "tensorflow/lite/tools/tool_params.h"
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/lite/tools/logging.h"
namespace tflite {
namespace tools {
void ToolParam::AssertHasSameType(ToolParam::ParamType a,
ToolParam::ParamType b) {
TFLITE_TOOLS_CHECK(a == b) << "Type mismatch while accessing parameter.";
}
template <>
ToolParam::ParamType ToolParam::GetValueType<int32_t>() {
return ToolParam::ParamType::TYPE_INT32;
}
template <>
ToolParam::ParamType ToolParam::GetValueType<bool>() {
return ToolParam::ParamType::TYPE_BOOL;
}
template <>
ToolParam::ParamType ToolParam::GetValueType<float>() {
return ToolParam::ParamType::TYPE_FLOAT;
}
template <>
ToolParam::ParamType ToolParam::GetValueType<std::string>() {
return ToolParam::ParamType::TYPE_STRING;
}
void ToolParams::AssertParamExists(const std::string& name) const {
TFLITE_TOOLS_CHECK(HasParam(name)) << name << " was not found.";
}
void ToolParams::Set(const ToolParams& other) {
for (const auto& param : params_) {
const ToolParam* other_param = other.GetParam(param.first);
if (other_param == nullptr) continue;
param.second->Set(*other_param);
}
}
void ToolParams::Merge(const ToolParams& other, bool overwrite) {
for (const auto& one : other.params_) {
auto it = params_.find(one.first);
if (it == params_.end()) {
AddParam(one.first, one.second->Clone());
} else if (overwrite) {
it->second->Set(*one.second);
}
}
}
}
} | #include "tensorflow/lite/tools/tool_params.h"
#include <gtest/gtest.h>
namespace tflite {
namespace tools {
namespace {
TEST(ToolParams, SetTest) {
ToolParams params;
params.AddParam("some-int1", ToolParam::Create<int>(13 ));
params.AddParam("some-int2", ToolParam::Create<int>(17 ));
ToolParams others;
others.AddParam("some-int1", ToolParam::Create<int>(19, 5));
others.AddParam("some-bool", ToolParam::Create<bool>(true, 1));
params.Set(others);
EXPECT_EQ(19, params.Get<int>("some-int1"));
EXPECT_EQ(5, params.GetPosition<int>("some-int1"));
EXPECT_TRUE(params.HasValueSet<int>("some-int1"));
EXPECT_EQ(17, params.Get<int>("some-int2"));
EXPECT_EQ(0, params.GetPosition<int>("some-int2"));
EXPECT_FALSE(params.HasValueSet<int>("some-int2"));
EXPECT_FALSE(params.HasParam("some-bool"));
}
TEST(ToolParams, MergeTestOverwriteTrue) {
ToolParams params;
params.AddParam("some-int1", ToolParam::Create<int>(13 ));
params.AddParam("some-int2", ToolParam::Create<int>(17 ));
ToolParams others;
others.AddParam("some-int1", ToolParam::Create<int>(19, 5));
others.AddParam("some-bool", ToolParam::Create<bool>(true ));
params.Merge(others, true );
EXPECT_EQ(19, params.Get<int>("some-int1"));
EXPECT_EQ(5, params.GetPosition<int>("some-int1"));
EXPECT_EQ(17, params.Get<int>("some-int2"));
EXPECT_TRUE(params.Get<bool>("some-bool"));
}
TEST(ToolParams, MergeTestOverwriteFalse) {
ToolParams params;
params.AddParam("some-int1", ToolParam::Create<int>(13 ));
params.AddParam("some-int2", ToolParam::Create<int>(17 ));
ToolParams others;
others.AddParam("some-int1", ToolParam::Create<int>(19, 5));
others.AddParam("some-bool", ToolParam::Create<bool>(true ));
params.Merge(others);
EXPECT_EQ(13, params.Get<int>("some-int1"));
EXPECT_EQ(0, params.GetPosition<int>("some-int1"));
EXPECT_EQ(17, params.Get<int>("some-int2"));
EXPECT_TRUE(params.Get<bool>("some-bool"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/tool_params.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/tool_params_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0dc92b90-ee98-47c9-89c7-7aa60cc39d16 | cpp | google/tensorstore | nditerable_util | tensorstore/internal/nditerable_util.cc | tensorstore/internal/nditerable_util_test.cc | #include "tensorstore/internal/nditerable_util.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
namespace {
#ifndef NDEBUG
bool nditerable_use_unit_block_size = false;
#endif
template <bool Full>
void GetNDIterationLayoutInfo(const NDIterableLayoutConstraint& iterable,
tensorstore::span<const Index> shape,
IterationConstraints constraints,
NDIterationLayoutInfo<Full>* info) {
info->shape.assign(shape.begin(), shape.end());
info->directions.resize(shape.size());
info->iteration_dimensions.clear();
info->iteration_shape.clear();
if constexpr (Full) {
info->full_iteration_dimensions.clear();
}
info->empty = false;
using DirectionPref = NDIterableLayoutConstraint::DirectionPref;
DirectionPref direction_prefs[kMaxRank];
std::fill_n(
direction_prefs, shape.size(),
constraints.repeated_elements_constraint() == skip_repeated_elements
? DirectionPref::kCanSkip
: DirectionPref::kEither);
iterable.UpdateDirectionPrefs(direction_prefs);
for (DimensionIndex dim_i = 0; dim_i < shape.size(); ++dim_i) {
const Index size = shape[dim_i];
if (size == 0) {
info->empty = true;
} else if ((size == 1 &&
direction_prefs[dim_i] != DirectionPref::kForwardRequired) ||
direction_prefs[dim_i] == DirectionPref::kCanSkip) {
if constexpr (Full) {
info->full_iteration_dimensions.push_back(dim_i);
}
continue;
}
info->iteration_dimensions.push_back(dim_i);
}
if (info->iteration_dimensions.empty()) {
info->iteration_dimensions.push_back(-1);
info->iteration_dimensions.push_back(-1);
info->iteration_shape.push_back(1);
info->iteration_shape.push_back(1);
} else {
if (constraints.order_constraint() == ContiguousLayoutOrder::fortran) {
std::reverse(info->iteration_dimensions.begin(),
info->iteration_dimensions.end());
} else if (constraints.order_constraint() == unspecified_order) {
std::sort(info->iteration_dimensions.begin(),
info->iteration_dimensions.end(),
[&](DimensionIndex dim_i, DimensionIndex dim_j) {
return iterable.GetDimensionOrder(dim_i, dim_j) < 0;
});
}
DimensionIndex dim_i = info->iteration_dimensions[0];
Index size_i = shape[dim_i];
info->iteration_shape.push_back(size_i);
int dir_i =
NDIterableLayoutConstraint::GetDirection(direction_prefs[dim_i]);
info->directions[dim_i] = dir_i;
auto next_iteration_dim_it = info->iteration_dimensions.begin();
if constexpr (Full) {
info->full_iteration_dimensions.push_back(dim_i);
}
for (DimensionIndex i = 1;
i < static_cast<DimensionIndex>(info->iteration_dimensions.size());
++i) {
DimensionIndex dim_j = info->iteration_dimensions[i];
Index size_j = shape[dim_j];
int dir_j =
NDIterableLayoutConstraint::GetDirection(direction_prefs[dim_j]);
info->directions[dim_j] = dir_j;
if constexpr (Full) {
info->full_iteration_dimensions.push_back(dim_j);
}
Index size_combined;
if (iterable.CanCombineDimensions(dim_i, dir_i, dim_j, dir_j, size_j) &&
!MulOverflow(size_i, size_j, &size_combined)) {
size_j = size_combined;
info->iteration_shape.back() = size_combined;
} else {
info->iteration_shape.push_back(size_j);
++next_iteration_dim_it;
}
*next_iteration_dim_it = dim_j;
dim_i = dim_j;
size_i = size_j;
dir_i = dir_j;
}
info->iteration_dimensions.erase(next_iteration_dim_it + 1,
info->iteration_dimensions.end());
}
if (info->iteration_dimensions.size() < 2) {
assert(info->iteration_dimensions.size() == 1);
info->iteration_dimensions.insert(info->iteration_dimensions.begin(), -1);
info->iteration_shape.insert(info->iteration_shape.begin(), 1);
}
}
}
void GetNDIterationLayoutInfo(const NDIterableLayoutConstraint& iterable,
tensorstore::span<const Index> shape,
IterationConstraints constraints,
NDIterationSimplifiedLayoutInfo* info) {
GetNDIterationLayoutInfo<false>(iterable, shape, constraints, info);
}
void GetNDIterationLayoutInfo(const NDIterableLayoutConstraint& iterable,
tensorstore::span<const Index> shape,
IterationConstraints constraints,
NDIterationFullLayoutInfo* info) {
GetNDIterationLayoutInfo<true>(iterable, shape, constraints, info);
}
IterationBufferShape GetNDIterationBlockShape(
ptrdiff_t working_memory_bytes_per_element,
tensorstore::span<const Index> iteration_shape) {
#ifdef TENSORSTORE_INTERNAL_NDITERABLE_TEST_UNIT_BLOCK_SIZE
return {1, 1};
#else
#if !defined(NDEBUG)
if (nditerable_use_unit_block_size) {
return {1, 1};
}
#endif
constexpr Index kTargetMemoryUsage = 24 * 1024;
const Index penultimate_dimension_size =
iteration_shape[iteration_shape.size() - 2];
const Index last_dimension_size = iteration_shape[iteration_shape.size() - 1];
if (working_memory_bytes_per_element == 0) {
return {penultimate_dimension_size, last_dimension_size};
} else {
const Index target_size = std::max(
Index(8), kTargetMemoryUsage / Index(working_memory_bytes_per_element));
const Index block_inner_size =
std::max(Index(1), std::min(last_dimension_size, target_size));
Index block_outer_size = 1;
if (block_inner_size < target_size) {
block_outer_size =
std::min(penultimate_dimension_size, target_size / block_inner_size);
}
return {block_outer_size, block_inner_size};
}
#endif
}
IterationBufferShape GetNDIterationBlockShape(
const NDIterableBufferConstraint& iterable,
NDIterable::IterationLayoutView layout, IterationBufferKind buffer_kind) {
return GetNDIterationBlockShape(
iterable.GetWorkingMemoryBytesPerElement(layout, buffer_kind),
layout.iteration_shape);
}
void GetNDIterationBufferInfo(const NDIterableBufferConstraint& iterable,
NDIterable::IterationLayoutView layout,
NDIterationBufferInfo* buffer_info) {
buffer_info->buffer_kind =
iterable.GetIterationBufferConstraint(layout).min_buffer_kind;
buffer_info->block_shape =
GetNDIterationBlockShape(iterable, layout, buffer_info->buffer_kind);
}
#ifndef NDEBUG
void SetNDIterableTestUnitBlockSize(bool value) {
nditerable_use_unit_block_size = value;
}
#endif
Index UpdatePartialBlock(NDIterator& iterator,
tensorstore::span<const Index> indices,
IterationBufferShape block_shape,
IterationBufferKind buffer_kind,
IterationBufferPointer buffer, Index modified_count,
absl::Status* status) {
Index full_rows = modified_count / block_shape[1];
Index final_row_count = modified_count % block_shape[1];
Index updated = 0;
if (full_rows != 0) {
updated = iterator.UpdateBlock(indices, {full_rows, block_shape[1]}, buffer,
status);
if (ABSL_PREDICT_FALSE(updated != full_rows * block_shape[1])) {
return updated;
}
}
if (final_row_count != 0) {
buffer.AddElementOffset(buffer_kind, full_rows, 0);
Index final_row_indices[kMaxRank];
std::copy(indices.begin(), indices.end(), final_row_indices);
final_row_indices[indices.size() - 2] += full_rows;
updated += iterator.UpdateBlock(
tensorstore::span<const Index>(final_row_indices, indices.size()),
{1, final_row_count}, buffer, status);
}
return updated;
}
}
} | #include "tensorstore/internal/nditerable_util.h"
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::internal::GetNDIterationBlockShape;
using ::tensorstore::internal::NDIterationPositionStepper;
using ::tensorstore::internal::ResetBufferPositionAtBeginning;
using ::tensorstore::internal::ResetBufferPositionAtEnd;
using ::tensorstore::internal::StepBufferPositionBackward;
using ::tensorstore::internal::StepBufferPositionForward;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
TEST(GetNDIterationBlockShape, Basic) {
#ifndef TENSORSTORE_INTERNAL_NDITERABLE_TEST_UNIT_BLOCK_SIZE
constexpr auto expected_block_size = [](Index block_size) {
return block_size;
};
#else
constexpr auto expected_block_size = [](Index block_size) { return 1; };
#endif
EXPECT_THAT(
GetNDIterationBlockShape(0,
tensorstore::span<const Index>({3, 4, 1000000})),
ElementsAre(expected_block_size(4), expected_block_size(1000000)));
EXPECT_THAT(
GetNDIterationBlockShape(1,
tensorstore::span<const Index>({3, 4, 15})),
ElementsAre(expected_block_size(4), expected_block_size(15)));
EXPECT_THAT(
GetNDIterationBlockShape(1,
tensorstore::span<const Index>({3, 4, 1000000})),
ElementsAre(1, expected_block_size(24 * 1024)));
EXPECT_THAT(
GetNDIterationBlockShape(32,
tensorstore::span<const Index>({3, 4, 1000000})),
ElementsAre(1, expected_block_size(768)));
EXPECT_THAT(
GetNDIterationBlockShape(64,
tensorstore::span<const Index>({3, 4, 1000000})),
ElementsAre(1, expected_block_size(384)));
}
TEST(ResetBufferPositionTest, OneDimensional) {
std::vector<Index> shape{10};
std::vector<Index> position{42};
ResetBufferPositionAtBeginning(position);
EXPECT_THAT(position, ElementsAre(0));
ResetBufferPositionAtEnd(shape, 1, position.data());
EXPECT_THAT(position, ElementsAre(9));
ResetBufferPositionAtEnd(shape, 4, position.data());
EXPECT_THAT(position, ElementsAre(6));
}
TEST(ResetBufferPositionTest, TwoDimensional) {
std::vector<Index> shape{10, 15};
std::vector<Index> position{42, 43};
ResetBufferPositionAtBeginning(position);
EXPECT_THAT(position, ElementsAre(0, 0));
ResetBufferPositionAtEnd(shape, 4, position.data());
EXPECT_THAT(position, ElementsAre(9, 11));
}
TEST(ResetBufferPositionTest, ThreeDimensional) {
std::vector<Index> shape{10, 15, 19};
std::vector<Index> position{42, 43, 44};
ResetBufferPositionAtBeginning(position);
EXPECT_THAT(position, ElementsAre(0, 0, 0));
ResetBufferPositionAtEnd(shape, 4, position.data());
EXPECT_THAT(position, ElementsAre(9, 14, 15));
}
TEST(StepBufferPositionForwardTest, OneDimensional) {
std::vector<Index> shape{10};
std::vector<Index> position{0};
EXPECT_EQ(4, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(4));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(8));
EXPECT_EQ(0, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(10));
}
TEST(StepBufferPositionForwardTest, TwoDimensional) {
std::vector<Index> shape{2, 10};
std::vector<Index> position{0, 0};
EXPECT_EQ(4, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(0, 4));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(0, 8));
EXPECT_EQ(4, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 0));
EXPECT_EQ(4, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 4));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 8));
EXPECT_EQ(0, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(2, 0));
}
TEST(StepBufferPositionForwardTest, ThreeDimensional) {
std::vector<Index> shape{2, 2, 6};
std::vector<Index> position{0, 0, 0};
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(0, 0, 4));
EXPECT_EQ(4, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(0, 1, 0));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(0, 1, 4));
EXPECT_EQ(4, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 0, 0));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 0, 4));
EXPECT_EQ(4, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 1, 0));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 1, 4));
EXPECT_EQ(0, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(2, 0, 0));
}
TEST(StepBufferPositionBackwardTest, OneDimensional) {
std::vector<Index> shape{10};
std::vector<Index> position{6};
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0));
EXPECT_EQ(0, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0));
}
TEST(StepBufferPositionBackwardTest, TwoDimensional) {
std::vector<Index> shape{2, 10};
std::vector<Index> position{1, 6};
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 0));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 6));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0));
EXPECT_EQ(0, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0));
}
TEST(StepBufferPositionBackwardTest, ThreeDimensional) {
std::vector<Index> shape{2, 2, 10};
std::vector<Index> position{1, 1, 6};
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 1, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 1, 0));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 0, 6));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 0, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 0, 0));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 1, 6));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 1, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 1, 0));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0, 6));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0, 0));
EXPECT_EQ(0, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0, 0));
}
TEST(NDIterationPositionStepperTest, Forward) {
std::vector<Index> shape({2, 3, 7});
NDIterationPositionStepper stepper(shape, 4);
EXPECT_THAT(stepper.shape(), ElementsAreArray(shape));
using PositionsAndBlockSizes =
std::vector<std::pair<std::vector<Index>, Index>>;
PositionsAndBlockSizes expected_results{
{{0, 0, 0}, 4}, {{0, 0, 4}, 3},
{{0, 1, 0}, 4}, {{0, 1, 4}, 3},
{{0, 2, 0}, 4}, {{0, 2, 4}, 3},
{{1, 0, 0}, 4}, {{1, 0, 4}, 3},
{{1, 1, 0}, 4}, {{1, 1, 4}, 3},
{{1, 2, 0}, 4}, {{1, 2, 4}, 3},
};
PositionsAndBlockSizes results;
for (Index block_size = stepper.ResetAtBeginning(); block_size;
block_size = stepper.StepForward(block_size)) {
results.emplace_back(
std::vector(stepper.position().begin(), stepper.position().end()),
block_size);
}
EXPECT_THAT(results, ElementsAreArray(expected_results));
}
TEST(NDIterationPositionStepperTest, Backward) {
std::vector<Index> shape({2, 3, 7});
NDIterationPositionStepper stepper(shape, 4);
EXPECT_THAT(stepper.shape(), ElementsAreArray(shape));
using PositionsAndBlockSizes =
std::vector<std::pair<std::vector<Index>, Index>>;
PositionsAndBlockSizes expected_results{
{{1, 2, 3}, 4}, {{1, 2, 0}, 3},
{{1, 1, 3}, 4}, {{1, 1, 0}, 3},
{{1, 0, 3}, 4}, {{1, 0, 0}, 3},
{{0, 2, 3}, 4}, {{0, 2, 0}, 3},
{{0, 1, 3}, 4}, {{0, 1, 0}, 3},
{{0, 0, 3}, 4}, {{0, 0, 0}, 3},
};
PositionsAndBlockSizes results;
for (Index block_size = stepper.ResetAtEnd(); block_size;
block_size = stepper.StepBackward()) {
results.emplace_back(
std::vector(stepper.position().begin(), stepper.position().end()),
block_size);
}
EXPECT_THAT(results, ElementsAreArray(expected_results));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_util.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_util_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
55717161-4c5c-47f4-9796-477dd280f8de | cpp | tensorflow/tensorflow | op_kernel_runner | tensorflow/core/tfrt/fallback/op_kernel_runner.cc | tensorflow/core/tfrt/fallback/op_kernel_runner_test.cc | #include "tensorflow/core/tfrt/fallback/op_kernel_runner.h"
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
Status CheckOpDefCompatibility(const tensorflow::OpDef& op_def) {
auto check_arg_def = [&](const auto& arg_def) {
if (arg_def.is_ref())
return tensorflow::errors::Internal(
"TFRT kernel fallback error: Unsupported ref args in ",
op_def.name());
return absl::OkStatus();
};
for (const auto& arg_def : op_def.input_arg())
TF_RETURN_IF_ERROR(check_arg_def(arg_def));
for (const auto& arg_def : op_def.output_arg())
TF_RETURN_IF_ERROR(check_arg_def(arg_def));
return absl::OkStatus();
}
absl::StatusOr<tensorflow::NodeDef> BuildNodeDef(
const tensorflow::OpDef& op_def, absl::string_view node_name, int num_args,
const std::function<Status(tensorflow::AttrValueMap*)>& attr_builder) {
tensorflow::NodeDef node_def;
node_def.set_name(std::string(node_name));
node_def.set_op(op_def.name());
for (int i = 0; i < num_args; ++i) {
node_def.add_input("dummy_input");
}
auto* attr_value_map = node_def.mutable_attr();
TF_RETURN_IF_ERROR(attr_builder(attr_value_map));
for (const auto& attr_def : op_def.attr()) {
if (attr_def.has_default_value()) {
attr_value_map->insert({attr_def.name(), attr_def.default_value()});
}
}
return node_def;
}
tensorflow::Status CreateOpKernel(
tensorflow::FunctionLibraryRuntime* flr, tensorflow::NodeDef ndef,
std::unique_ptr<tensorflow::OpKernel>* result) {
std::shared_ptr<const tensorflow::NodeProperties> props;
TF_RETURN_IF_ERROR(tensorflow::NodeProperties::CreateFromNodeDef(
std::move(ndef), flr->GetFunctionLibraryDefinition(), &props));
tensorflow::OpKernel* k = nullptr;
TF_RETURN_IF_ERROR(flr->CreateKernel(props, &k));
result->reset(k);
return absl::OkStatus();
}
}
absl::StatusOr<OpKernelRunner> OpKernelRunner::Create(
absl::string_view op_name, absl::string_view node_name,
absl::string_view device_name, int num_args,
const std::function<Status(tensorflow::AttrValueMap*)>& attr_builder,
const tensorflow::DeviceMgr& device_manager,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime) {
tensorflow::Device* device = nullptr;
Status s = device_manager.LookupDevice(device_name, &device);
if (!s.ok()) {
LOG_EVERY_N_SEC(WARNING, 30)
<< "Failed to find device " << device_name
<< " when creating OpKernel: " << op_name << ". Error: " << s
<< ", fallback to host device instead";
device = device_manager.HostCPU();
}
return Create(op_name, node_name, num_args, attr_builder,
process_function_library_runtime, device);
}
absl::StatusOr<OpKernelRunner> OpKernelRunner::Create(
absl::string_view op_name, absl::string_view node_name, int num_args,
const std::function<Status(tensorflow::AttrValueMap*)>& attr_builder,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime,
tensorflow::Device* device) {
const OpDef* op_def = nullptr;
TF_RETURN_IF_ERROR(tensorflow::OpRegistry::Global()->LookUpOpDef(
std::string(op_name), &op_def));
TF_RETURN_IF_ERROR(CheckOpDefCompatibility(*op_def));
VLOG(1) << "KernelFallbackExecuteCompat creating op from OpDef: "
<< op_def->DebugString();
TF_ASSIGN_OR_RETURN(auto node_def,
BuildNodeDef(*op_def, node_name, num_args, attr_builder));
VLOG(1) << "KernelFallbackExecuteCompat created NodeDef: "
<< node_def.DebugString();
tensorflow::FunctionLibraryRuntime* function_library_runtime = nullptr;
function_library_runtime =
process_function_library_runtime.GetFLR(device->name());
std::unique_ptr<OpKernel> op_kernel;
TF_RETURN_IF_ERROR(CreateOpKernel(function_library_runtime,
std::move(node_def), &op_kernel));
return OpKernelRunner(device, function_library_runtime, std::move(op_kernel));
}
OpKernelRunner::OpKernelRunner(
tensorflow::Device* device,
tensorflow::FunctionLibraryRuntime* function_library_runtime,
std::unique_ptr<tensorflow::OpKernel> op_kernel)
: op_kernel_(std::move(op_kernel)), info_(std::make_unique<Info>()) {
DCHECK(device);
DCHECK(function_library_runtime);
info_->device = device;
info_->function_library_runtime = function_library_runtime;
info_->resource_manager = device->resource_manager();
info_->is_async = (op_kernel_->AsAsync() != nullptr);
const auto& input_memory_types = op_kernel_->input_memory_types();
auto& input_alloc_attrs = info_->input_alloc_attrs;
auto& output_alloc_attrs = info_->output_alloc_attrs;
input_alloc_attrs.resize(op_kernel_->num_inputs());
for (size_t i = 0, e = op_kernel_->num_inputs(); i < e; ++i) {
input_alloc_attrs[i].set_on_host(input_memory_types[i] ==
tensorflow::HOST_MEMORY);
}
const auto& output_memory_types = op_kernel_->output_memory_types();
output_alloc_attrs.resize(op_kernel_->num_outputs());
for (size_t i = 0, e = output_alloc_attrs.size(); i < e; ++i) {
output_alloc_attrs[i].set_on_host(output_memory_types[i] ==
tensorflow::HOST_MEMORY);
}
input_alloc_attrs_ = input_alloc_attrs;
output_alloc_attrs_ = output_alloc_attrs;
}
void OpKernelRunner::RunAsync(OpKernelContext* context,
AsyncOpKernel::DoneCallback done_callback) const {
DVLOG(1) << "KernelFallbackExecuteCompat Running Async Op: "
<< op_kernel_->def().DebugString()
<< ", on Device: " << context->device()->name();
AsyncOpKernel* async = op_kernel_->AsAsync();
DCHECK(async);
async->ComputeAsync(context, std::move(done_callback));
}
}
} | #include "tensorflow/core/tfrt/fallback/op_kernel_runner.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/fallback/op_kernel_runner_cache.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using ::testing::IsNull;
using ::testing::SizeIs;
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
constexpr const char* kDeviceType = "GPU";
#else
constexpr const char* kDeviceType = "CPU";
#endif
class TestOpKernel : public OpKernel {
public:
using OpKernel::OpKernel;
~TestOpKernel() override = default;
void Compute(OpKernelContext* context) override {
context->set_output(0, context->input(0));
}
};
REGISTER_KERNEL_BUILDER(Name("TestOp").Device(DEVICE_CPU), TestOpKernel);
REGISTER_OP("TestOp").Input("x: int32").Output("y: int32");
TEST(OpKernelRunnerTest, Create) {
tensorflow::SessionOptions session_options;
tensorflow::FunctionDefLibrary fdef_lib;
TF_ASSERT_OK_AND_ASSIGN(auto fallback_state,
FallbackState::Create(session_options, fdef_lib));
TF_ASSERT_OK_AND_ASSIGN(
auto runner,
OpKernelRunner::Create(
"TestOp", "TestOp_node_name",
"/job:localhost/replica:0/task:0/device:CPU:0",
1,
[](tensorflow::AttrValueMap*) { return absl::OkStatus(); },
fallback_state->device_manager(),
fallback_state->process_function_library_runtime()));
ASSERT_TRUE(runner);
EXPECT_EQ(runner.op_kernel()->name(), "TestOp_node_name");
}
TEST(OpKernelRunnerTest, OpKernelRunnerCache) {
tensorflow::SessionOptions session_options;
tensorflow::FunctionDefLibrary fdef_lib;
TF_ASSERT_OK_AND_ASSIGN(auto fallback_state,
FallbackState::Create(session_options, fdef_lib));
OpKernelRunnerCache cache;
tfrt::Location loc(nullptr, 100);
TF_ASSERT_OK_AND_ASSIGN(
auto* runner,
cache.GetOrCreate(
loc,
"TestOp",
"/job:localhost/replica:0/task:0/device:CPU:0",
1,
[](tensorflow::AttrValueMap*) { return absl::OkStatus(); },
fallback_state->device_manager(),
fallback_state->process_function_library_runtime()));
ASSERT_TRUE(runner);
EXPECT_EQ(runner->op_kernel()->name(), "TestOp_100_0");
TF_ASSERT_OK_AND_ASSIGN(
runner,
cache.GetOrCreate(
loc,
"TestOp",
"/job:localhost/replica:0/task:0/device:CPU:0",
1,
[](tensorflow::AttrValueMap*) { return absl::OkStatus(); },
fallback_state->device_manager(),
fallback_state->process_function_library_runtime()));
ASSERT_TRUE(runner);
EXPECT_EQ(runner->op_kernel()->name(), "TestOp_100_0");
}
TEST(OpKernelRunnerTest, OpKernelRunState) {
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
device_count->insert({kDeviceType, 1});
std::vector<std::unique_ptr<Device>> devices;
TF_ASSERT_OK(DeviceFactory::GetFactory(kDeviceType)
->CreateDevices(options,
"/job:a/replica:0/task:0",
&devices));
ASSERT_EQ(devices.size(), 1);
OpKernelContext::Params params;
params.device = devices[0].get();
params.ensure_eigen_gpu_device();
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
ASSERT_THAT(params.eigen_gpu_device, ::testing::NotNull());
#endif
Tensor a(DT_FLOAT, TensorShape({}));
Tensor b(DT_INT32, TensorShape({}));
absl::InlinedVector<TensorValue, 4UL> inputs{TensorValue(&a),
TensorValue(&b)};
params.inputs = inputs;
Tensor c(DT_UINT8, TensorShape({}));
absl::InlinedVector<TensorValue, 4UL> new_inputs{TensorValue(&c)};
OpKernelRunState run_state(new_inputs, params);
EXPECT_THAT(run_state.input_tf_tensors, SizeIs(1));
EXPECT_THAT(run_state.input_tf_tensor_values, SizeIs(1));
EXPECT_EQ(run_state.params.inputs.data(),
run_state.input_tf_tensor_values.data());
EXPECT_THAT(run_state.params.eigen_gpu_device, IsNull());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/fallback/op_kernel_runner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/fallback/op_kernel_runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0b3cd866-7823-44dc-8ae9-23c7a7b89396 | cpp | tensorflow/tensorflow | resource_handle | tensorflow/core/framework/resource_handle.cc | tensorflow/core/framework/resource_handle_test.cc | #include "tensorflow/core/framework/resource_handle.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_format.h"
#include "tensorflow/core/framework/resource_handle.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/demangle.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
namespace {
std::string DtypeAndShapesToString(
const std::vector<DtypeAndPartialTensorShape>& dtype_and_shapes) {
std::vector<std::string> dtype_and_shape_strings;
dtype_and_shape_strings.reserve(dtype_and_shapes.size());
for (const DtypeAndPartialTensorShape& dtype_and_shape : dtype_and_shapes) {
dtype_and_shape_strings.push_back(
absl::StrFormat("DType enum: %d, Shape: %s", dtype_and_shape.dtype,
dtype_and_shape.shape.DebugString()));
}
return absl::StrFormat("[ %s ]", absl::StrJoin(dtype_and_shape_strings, ","));
}
}
constexpr const char* ResourceHandle::ANONYMOUS_NAME;
ResourceHandle::ResourceHandle() {}
ResourceHandle::ResourceHandle(const ResourceHandleProto& proto) {
TF_CHECK_OK(FromProto(proto));
}
Status ResourceHandle::BuildResourceHandle(const ResourceHandleProto& proto,
ResourceHandle* out) {
if (out == nullptr)
return errors::Internal(
"BuildResourceHandle() was called with nullptr for the output");
return out->FromProto(proto);
}
ResourceHandle::~ResourceHandle() {}
void ResourceHandle::AsProto(ResourceHandleProto* proto) const {
proto->set_device(device());
proto->set_container(container());
proto->set_name(name());
proto->set_hash_code(hash_code());
proto->set_maybe_type_name(maybe_type_name());
for (const auto& dtype_and_shape_pair : dtypes_and_shapes_) {
auto dtype_and_shape = proto->add_dtypes_and_shapes();
dtype_and_shape->set_dtype(dtype_and_shape_pair.dtype);
dtype_and_shape_pair.shape.AsProto(dtype_and_shape->mutable_shape());
}
}
Status ResourceHandle::FromProto(const ResourceHandleProto& proto) {
set_device(proto.device());
set_container(proto.container());
set_name(proto.name());
set_hash_code(proto.hash_code());
set_maybe_type_name(proto.maybe_type_name());
std::vector<DtypeAndPartialTensorShape> dtypes_and_shapes;
for (const auto& dtype_and_shape : proto.dtypes_and_shapes()) {
DataType dtype = dtype_and_shape.dtype();
PartialTensorShape shape;
Status s = PartialTensorShape::BuildPartialTensorShape(
dtype_and_shape.shape(), &shape);
if (!s.ok()) {
return s;
}
dtypes_and_shapes.push_back(DtypeAndPartialTensorShape{dtype, shape});
}
dtypes_and_shapes_ = std::move(dtypes_and_shapes);
return absl::OkStatus();
}
string ResourceHandle::SerializeAsString() const {
ResourceHandleProto proto;
AsProto(&proto);
return proto.SerializeAsString();
}
bool ResourceHandle::ParseFromString(const string& s) {
ResourceHandleProto proto;
return proto.ParseFromString(s) && FromProto(proto).ok();
}
string ResourceHandle::DebugString() const {
return absl::StrFormat(
"device: %s container: %s name: %s hash_code: 0x%X maybe_type_name %s, "
"dtype and shapes : %s",
device(), container(), name(), hash_code(),
port::Demangle(maybe_type_name()),
DtypeAndShapesToString(dtypes_and_shapes()));
}
string ResourceHandle::SummarizeValue() const {
return absl::StrFormat(
"ResourceHandle(name=\"%s\", device=\"%s\", container=\"%s\", "
"type=\"%s\", dtype and shapes : \"%s\")",
name(), device(), container(), port::Demangle(maybe_type_name()),
DtypeAndShapesToString(dtypes_and_shapes()));
}
ResourceHandle ResourceHandle::MakeRefCountingHandle(
ResourceBase* resource, const string& device_name,
const TypeIndex& type_index,
const std::vector<DtypeAndPartialTensorShape>& dtypes_and_shapes,
const absl::optional<ManagedStackTrace>& definition_stack_trace) {
ResourceHandle result;
result.resource_.reset(resource, false);
result.set_device(device_name);
result.set_container("Anonymous");
result.set_definition_stack_trace(definition_stack_trace);
auto resource_id = GenerateUniqueId();
std::string handle_name = resource->MakeRefCountingHandleName(resource_id);
result.set_name(handle_name);
result.set_hash_code(type_index.hash_code());
result.set_maybe_type_name(type_index.name());
result.set_dtypes_and_shapes(dtypes_and_shapes);
return result;
}
Status ResourceHandle::ValidateType(const TypeIndex& type_index) const {
if (type_index.hash_code() != hash_code()) {
return errors::InvalidArgument(
"Trying to access a handle's resource using the wrong type. ",
"The handle points to a resource (name '", name(), "') of type '",
port::Demangle(maybe_type_name()), "' (hash code ", hash_code(),
") but you are trying to access the resource as type '",
port::Demangle(type_index.name()), "' (hash code ",
type_index.hash_code(), ")");
}
return absl::OkStatus();
}
std::atomic<int64_t> ResourceHandle::current_id_;
int64_t ResourceHandle::GenerateUniqueId() { return current_id_.fetch_add(1); }
string ProtoDebugString(const ResourceHandle& handle) {
return handle.DebugString();
}
void EncodeResourceHandleList(const ResourceHandle* p, int64_t n,
std::unique_ptr<port::StringListEncoder> e) {
ResourceHandleProto proto;
for (int i = 0; i < n; ++i) {
p[i].AsProto(&proto);
e->Append(proto);
}
e->Finalize();
}
bool DecodeResourceHandleList(std::unique_ptr<port::StringListDecoder> d,
ResourceHandle* ps, int64_t n) {
std::vector<uint32> sizes(n);
if (!d->ReadSizes(&sizes)) return false;
ResourceHandleProto proto;
for (int i = 0; i < n; ++i) {
if (!proto.ParseFromArray(d->Data(sizes[i]), sizes[i])) {
return false;
}
if (!ps[i].FromProto(proto).ok()) {
return false;
}
}
return true;
}
} | #include "tensorflow/core/framework/resource_handle.h"
#include <memory>
#include <string>
#include "tensorflow/core/framework/resource_handle.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class MockResource : public ResourceBase {
public:
MockResource(bool* alive, int payload) : alive_(alive), payload_(payload) {
if (alive_ != nullptr) {
*alive_ = true;
}
}
~MockResource() override {
if (alive_ != nullptr) {
*alive_ = false;
}
}
string DebugString() const override { return ""; }
bool* alive_;
int payload_;
};
class ResourceHandleTest : public ::testing::Test {};
TEST_F(ResourceHandleTest, RefCounting) {
const int payload = -123;
bool alive = false;
auto resource = new MockResource(&alive, payload);
EXPECT_TRUE(alive);
{
auto handle =
ResourceHandle::MakeRefCountingHandle(resource, "cpu", {}, {});
EXPECT_TRUE(alive);
EXPECT_EQ(resource->RefCount(), 1);
{
auto handle_copy = handle;
EXPECT_TRUE(alive);
EXPECT_EQ(resource->RefCount(), 2);
}
EXPECT_TRUE(alive);
EXPECT_EQ(resource->RefCount(), 1);
}
EXPECT_FALSE(alive);
}
TEST_F(ResourceHandleTest, SummarizeValue) {
ResourceHandleProto proto;
TensorShapeProto shape;
shape.add_dim()->set_size(4);
shape.add_dim()->set_size(8);
proto.set_device("cpu:0");
proto.set_container("test_container");
proto.set_name("test_var");
auto dtypes_and_shapes = proto.add_dtypes_and_shapes();
dtypes_and_shapes->set_dtype(DT_INT32);
dtypes_and_shapes->mutable_shape()->MergeFrom(shape);
auto handle = std::make_unique<ResourceHandle>(proto);
EXPECT_EQ(handle->SummarizeValue(),
"ResourceHandle(name=\"test_var\", device=\"cpu:0\", "
"container=\"test_container\", type=\"\", dtype and shapes : \"[ "
"DType enum: 3, Shape: [4,8] ]\")");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/resource_handle.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/resource_handle_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cadcf136-1281-4209-bb13-7b782228d794 | cpp | tensorflow/tensorflow | object_detection_average_precision_stage | tensorflow/lite/tools/evaluation/stages/object_detection_average_precision_stage.cc | tensorflow/lite/tools/evaluation/stages/object_detection_average_precision_stage_test.cc | #include "tensorflow/lite/tools/evaluation/stages/object_detection_average_precision_stage.h"
#include <stdint.h>
#include <numeric>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/stages/utils/image_metrics.h"
namespace tflite {
namespace evaluation {
namespace {
image::Detection ConvertProtoToDetection(
const ObjectDetectionResult::ObjectInstance& input, int image_id) {
image::Detection detection;
detection.box.x.min = input.bounding_box().normalized_left();
detection.box.x.max = input.bounding_box().normalized_right();
detection.box.y.min = input.bounding_box().normalized_top();
detection.box.y.max = input.bounding_box().normalized_bottom();
detection.imgid = image_id;
detection.score = input.score();
return detection;
}
}
TfLiteStatus ObjectDetectionAveragePrecisionStage::Init() {
num_classes_ = config_.specification()
.object_detection_average_precision_params()
.num_classes();
if (num_classes_ <= 0) {
LOG(ERROR) << "num_classes cannot be <= 0";
return kTfLiteError;
}
for (int i = 0; i < num_classes_; ++i) {
ground_truth_object_vectors_.emplace_back();
predicted_object_vectors_.emplace_back();
}
return kTfLiteOk;
}
TfLiteStatus ObjectDetectionAveragePrecisionStage::Run() {
for (int i = 0; i < ground_truth_objects_.objects_size(); ++i) {
const int class_id = ground_truth_objects_.objects(i).class_id();
if (class_id >= num_classes_) {
LOG(ERROR) << "Encountered invalid class ID: " << class_id;
return kTfLiteError;
}
ground_truth_object_vectors_[class_id].push_back(ConvertProtoToDetection(
ground_truth_objects_.objects(i), current_image_index_));
}
for (int i = 0; i < predicted_objects_.objects_size(); ++i) {
const int class_id = predicted_objects_.objects(i).class_id();
if (class_id >= num_classes_) {
LOG(ERROR) << "Encountered invalid class ID: " << class_id;
return kTfLiteError;
}
predicted_object_vectors_[class_id].push_back(ConvertProtoToDetection(
predicted_objects_.objects(i), current_image_index_));
}
current_image_index_++;
return kTfLiteOk;
}
EvaluationStageMetrics ObjectDetectionAveragePrecisionStage::LatestMetrics() {
EvaluationStageMetrics metrics;
if (current_image_index_ == 0) return metrics;
metrics.set_num_runs(current_image_index_);
auto* ap_metrics = metrics.mutable_process_metrics()
->mutable_object_detection_average_precision_metrics();
auto& ap_params =
config_.specification().object_detection_average_precision_params();
std::vector<float> iou_thresholds;
if (ap_params.iou_thresholds_size() == 0) {
float threshold = 0.5;
for (int i = 0; i < 10; ++i) {
iou_thresholds.push_back(threshold + i * 0.05);
}
} else {
for (auto& threshold : ap_params.iou_thresholds()) {
iou_thresholds.push_back(threshold);
}
}
image::AveragePrecision::Options opts;
opts.num_recall_points = ap_params.num_recall_points();
float ap_sum = 0;
int num_total_aps = 0;
for (float threshold : iou_thresholds) {
float threshold_ap_sum = 0;
int num_counted_classes = 0;
for (int i = 0; i < num_classes_; ++i) {
if (ground_truth_object_vectors_[i].empty() &&
predicted_object_vectors_[i].empty())
continue;
float ap_value = 0.0;
if (!ground_truth_object_vectors_[i].empty()) {
opts.iou_threshold = threshold;
ap_value = image::AveragePrecision(opts).FromBoxes(
ground_truth_object_vectors_[i], predicted_object_vectors_[i]);
}
ap_sum += ap_value;
num_total_aps += 1;
threshold_ap_sum += ap_value;
num_counted_classes += 1;
}
if (num_counted_classes == 0) continue;
auto* threshold_ap = ap_metrics->add_individual_average_precisions();
threshold_ap->set_average_precision(threshold_ap_sum / num_counted_classes);
threshold_ap->set_iou_threshold(threshold);
}
if (num_total_aps == 0) return metrics;
ap_metrics->set_overall_mean_average_precision(ap_sum / num_total_aps);
return metrics;
}
}
} | #include "tensorflow/lite/tools/evaluation/stages/object_detection_average_precision_stage.h"
#include <stdint.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kAveragePrecisionStageName[] =
"object_detection_average_precision";
EvaluationStageConfig GetAveragePrecisionStageConfig(int num_classes) {
EvaluationStageConfig config;
config.set_name(kAveragePrecisionStageName);
auto* params = config.mutable_specification()
->mutable_object_detection_average_precision_params();
params->add_iou_thresholds(0.5);
params->add_iou_thresholds(0.999);
params->set_num_classes(num_classes);
return config;
}
ObjectDetectionResult GetGroundTruthDetectionResult() {
ObjectDetectionResult ground_truth;
ground_truth.set_image_name("some_image.jpg");
auto* object_1 = ground_truth.add_objects();
object_1->set_class_id(1);
auto* object_1_bbox = object_1->mutable_bounding_box();
object_1_bbox->set_normalized_top(0.5);
object_1_bbox->set_normalized_bottom(1.0);
object_1_bbox->set_normalized_left(0.5);
object_1_bbox->set_normalized_right(1.0);
auto* object_2 = ground_truth.add_objects();
object_2->set_class_id(1);
auto* object_2_bbox = object_2->mutable_bounding_box();
object_2_bbox->set_normalized_top(0);
object_2_bbox->set_normalized_bottom(1.0);
object_2_bbox->set_normalized_left(0);
object_2_bbox->set_normalized_right(1.0);
auto* object_3 = ground_truth.add_objects();
object_3->set_class_id(2);
auto* object_3_bbox = object_3->mutable_bounding_box();
object_3_bbox->set_normalized_top(0.5);
object_3_bbox->set_normalized_bottom(1.0);
object_3_bbox->set_normalized_left(0.5);
object_3_bbox->set_normalized_right(1.0);
return ground_truth;
}
ObjectDetectionResult GetPredictedDetectionResult() {
ObjectDetectionResult predicted;
auto* object_1 = predicted.add_objects();
object_1->set_class_id(1);
object_1->set_score(0.8);
auto* object_1_bbox = object_1->mutable_bounding_box();
object_1_bbox->set_normalized_top(0.091);
object_1_bbox->set_normalized_bottom(1.0);
object_1_bbox->set_normalized_left(0.091);
object_1_bbox->set_normalized_right(1.0);
auto* object_2 = predicted.add_objects();
object_2->set_class_id(1);
object_2->set_score(0.9);
auto* object_2_bbox = object_2->mutable_bounding_box();
object_2_bbox->set_normalized_top(0.474);
object_2_bbox->set_normalized_bottom(1.0);
object_2_bbox->set_normalized_left(0.474);
object_2_bbox->set_normalized_right(1.0);
auto* object_3 = predicted.add_objects();
object_3->set_class_id(1);
object_3->set_score(0.95);
auto* object_3_bbox = object_3->mutable_bounding_box();
object_3_bbox->set_normalized_top(0.474);
object_3_bbox->set_normalized_bottom(1.0);
object_3_bbox->set_normalized_left(0.474);
object_3_bbox->set_normalized_right(1.0);
return predicted;
}
TEST(ObjectDetectionAveragePrecisionStage, ZeroClasses) {
EvaluationStageConfig config = GetAveragePrecisionStageConfig(0);
ObjectDetectionAveragePrecisionStage stage =
ObjectDetectionAveragePrecisionStage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(ObjectDetectionAveragePrecisionStage, SampleInputs) {
EvaluationStageConfig config = GetAveragePrecisionStageConfig(3);
ObjectDetectionAveragePrecisionStage stage =
ObjectDetectionAveragePrecisionStage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
const ObjectDetectionResult ground_truth = GetGroundTruthDetectionResult();
const ObjectDetectionResult predicted = GetPredictedDetectionResult();
stage.SetEvalInputs(ObjectDetectionResult(), ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
ObjectDetectionAveragePrecisionMetrics detection_metrics =
metrics.process_metrics().object_detection_average_precision_metrics();
EXPECT_FLOAT_EQ(detection_metrics.overall_mean_average_precision(), 0.0);
EXPECT_EQ(detection_metrics.individual_average_precisions_size(), 2);
stage.SetEvalInputs(ground_truth, ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
detection_metrics =
metrics.process_metrics().object_detection_average_precision_metrics();
EXPECT_FLOAT_EQ(detection_metrics.overall_mean_average_precision(),
0.50495052);
EXPECT_EQ(metrics.num_runs(), 2);
stage.SetEvalInputs(predicted, ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
detection_metrics =
metrics.process_metrics().object_detection_average_precision_metrics();
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(0).iou_threshold(), 0.5);
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(0).average_precision(),
0.4841584);
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(1).iou_threshold(),
0.999);
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(1).average_precision(),
0.33663365);
EXPECT_FLOAT_EQ(detection_metrics.overall_mean_average_precision(),
0.41039604);
}
TEST(ObjectDetectionAveragePrecisionStage, DefaultIoUThresholds) {
EvaluationStageConfig config = GetAveragePrecisionStageConfig(3);
auto* params = config.mutable_specification()
->mutable_object_detection_average_precision_params();
params->clear_iou_thresholds();
ObjectDetectionAveragePrecisionStage stage =
ObjectDetectionAveragePrecisionStage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
const ObjectDetectionResult ground_truth = GetGroundTruthDetectionResult();
const ObjectDetectionResult predicted = GetPredictedDetectionResult();
stage.SetEvalInputs(ground_truth, ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
ObjectDetectionAveragePrecisionMetrics detection_metrics =
metrics.process_metrics().object_detection_average_precision_metrics();
EXPECT_FLOAT_EQ(detection_metrics.overall_mean_average_precision(), 1.0);
EXPECT_EQ(detection_metrics.individual_average_precisions_size(), 10);
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(0).iou_threshold(), 0.5);
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(9).iou_threshold(), 0.95);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/object_detection_average_precision_stage.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/object_detection_average_precision_stage_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8244d340-5db9-4480-9ac7-8d78b3bb8bef | cpp | tensorflow/tensorflow | op_resolver_internal | tensorflow/lite/core/api/op_resolver_internal.h | tensorflow/lite/core/api/op_resolver_internal_test.cc | #ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_INTERNAL_H_
#define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_INTERNAL_H_
#include <memory>
#include "tensorflow/lite/core/api/op_resolver.h"
namespace tflite {
class OpResolverInternal {
public:
OpResolverInternal() = delete;
static bool MayContainUserDefinedOps(const OpResolver& op_resolver) {
return op_resolver.MayContainUserDefinedOps();
}
static std::shared_ptr<::tflite::internal::OperatorsCache> GetSharedCache(
const ::tflite::OpResolver& op_resolver) {
return op_resolver.registration_externals_cache_;
}
};
}
#endif | #include "tensorflow/lite/core/api/op_resolver_internal.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using ops::builtin::BuiltinOpResolver;
using ops::builtin::BuiltinOpResolverWithoutDefaultDelegates;
namespace {
TEST(OpResolverInternal, ObjectSlicing) {
BuiltinOpResolver op_resolver1;
EXPECT_FALSE(op_resolver1.GetDelegateCreators().empty());
BuiltinOpResolverWithoutDefaultDelegates op_resolver2;
EXPECT_TRUE(op_resolver2.GetDelegateCreators().empty());
BuiltinOpResolver op_resolver3(op_resolver2);
EXPECT_TRUE(op_resolver3.GetDelegateCreators().empty());
MutableOpResolver op_resolver4(op_resolver1);
EXPECT_FALSE(op_resolver4.GetDelegateCreators().empty());
MutableOpResolver op_resolver5(op_resolver2);
EXPECT_TRUE(op_resolver5.GetDelegateCreators().empty());
}
TEST(OpResolverInternal, BuiltinOpResolverContainsOnlyPredefinedOps) {
BuiltinOpResolver builtin_op_resolver;
EXPECT_EQ(OpResolverInternal::MayContainUserDefinedOps(builtin_op_resolver),
false);
}
TEST(OpResolverInternal, EmptyMutableOpResolverContainsOnlyPredefinedOps) {
MutableOpResolver empty_mutable_op_resolver;
EXPECT_EQ(
OpResolverInternal::MayContainUserDefinedOps(empty_mutable_op_resolver),
false);
}
TEST(OpResolverInternal,
MutableOpResolverAddBuiltinNullptrContainsOnlyPredefinedOps) {
MutableOpResolver mutable_op_resolver;
mutable_op_resolver.AddBuiltin(BuiltinOperator_ADD, nullptr, 1);
EXPECT_EQ(OpResolverInternal::MayContainUserDefinedOps(mutable_op_resolver),
false);
}
TEST(OpResolverInternal,
MutableOpResolverRedefineBuiltinDoesNotContainOnlyPredefinedOps) {
MutableOpResolver mutable_op_resolver;
mutable_op_resolver.AddBuiltin(BuiltinOperator_ADD,
tflite::ops::builtin::Register_MUL(), 1);
EXPECT_EQ(OpResolverInternal::MayContainUserDefinedOps(mutable_op_resolver),
true);
}
TEST(OpResolverInternal,
MutableOpResolverAddCustomDoesNotContainOnlyPredefinedOps) {
MutableOpResolver mutable_op_resolver;
mutable_op_resolver.AddCustom("my_custom_op",
tflite::ops::builtin::Register_ADD(), 1);
EXPECT_EQ(OpResolverInternal::MayContainUserDefinedOps(mutable_op_resolver),
true);
}
class ChainableOpResolver : public MutableOpResolver {
public:
using MutableOpResolver::ChainOpResolver;
};
TEST(OpResolverInternal, ChainedBuiltinOpResolverContainOnlyPredefinedOps) {
BuiltinOpResolver builtin_op_resolver;
ChainableOpResolver chainable_op_resolver;
chainable_op_resolver.ChainOpResolver(&builtin_op_resolver);
EXPECT_EQ(OpResolverInternal::MayContainUserDefinedOps(chainable_op_resolver),
false);
}
TEST(OpResolverInternal,
ChainedCustomOpResolverDoesNotContainOnlyPredefinedOps) {
MutableOpResolver mutable_op_resolver;
mutable_op_resolver.AddCustom("my_custom_op",
tflite::ops::builtin::Register_ADD(), 1);
ChainableOpResolver chainable_op_resolver;
chainable_op_resolver.ChainOpResolver(&mutable_op_resolver);
EXPECT_EQ(OpResolverInternal::MayContainUserDefinedOps(chainable_op_resolver),
true);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/api/op_resolver_internal.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/api/op_resolver_internal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ea068d7a-4c02-4749-9466-cbba7ff68004 | cpp | google/cel-cpp | data | common/data.h | common/data_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_DATA_H_
#define THIRD_PARTY_CEL_CPP_COMMON_DATA_H_
#include <cstdint>
#include "absl/base/nullability.h"
#include "absl/log/absl_check.h"
#include "common/internal/metadata.h"
#include "google/protobuf/arena.h"
namespace cel {
class Data;
template <typename T>
struct Ownable;
template <typename T>
struct Borrowable;
namespace common_internal {
class ReferenceCount;
void SetDataReferenceCount(
absl::Nonnull<const Data*> data,
absl::Nonnull<const ReferenceCount*> refcount) noexcept;
absl::Nullable<const ReferenceCount*> GetDataReferenceCount(
absl::Nonnull<const Data*> data) noexcept;
}
class Data {
public:
virtual ~Data() = default;
absl::Nullable<google::protobuf::Arena*> GetArena() const noexcept {
return (owner_ & kOwnerBits) == kOwnerArenaBit
? reinterpret_cast<google::protobuf::Arena*>(owner_ & kOwnerPointerMask)
: nullptr;
}
protected:
Data() noexcept : Data(nullptr) {}
Data(const Data&) = default;
Data(Data&&) = default;
Data& operator=(const Data&) = default;
Data& operator=(Data&&) = default;
explicit Data(absl::Nullable<google::protobuf::Arena*> arena) noexcept
: owner_(reinterpret_cast<uintptr_t>(arena) |
(arena != nullptr ? kOwnerArenaBit : kOwnerNone)) {}
private:
static constexpr uintptr_t kOwnerNone = common_internal::kMetadataOwnerNone;
static constexpr uintptr_t kOwnerReferenceCountBit =
common_internal::kMetadataOwnerReferenceCountBit;
static constexpr uintptr_t kOwnerArenaBit =
common_internal::kMetadataOwnerArenaBit;
static constexpr uintptr_t kOwnerBits = common_internal::kMetadataOwnerBits;
static constexpr uintptr_t kOwnerPointerMask =
common_internal::kMetadataOwnerPointerMask;
friend void common_internal::SetDataReferenceCount(
absl::Nonnull<const Data*> data,
absl::Nonnull<const common_internal::ReferenceCount*> refcount) noexcept;
friend absl::Nullable<const common_internal::ReferenceCount*>
common_internal::GetDataReferenceCount(
absl::Nonnull<const Data*> data) noexcept;
template <typename T>
friend struct Ownable;
template <typename T>
friend struct Borrowable;
mutable uintptr_t owner_ = kOwnerNone;
};
namespace common_internal {
inline void SetDataReferenceCount(
absl::Nonnull<const Data*> data,
absl::Nonnull<const ReferenceCount*> refcount) noexcept {
ABSL_DCHECK_EQ(data->owner_, Data::kOwnerNone);
data->owner_ =
reinterpret_cast<uintptr_t>(refcount) | Data::kOwnerReferenceCountBit;
}
inline absl::Nullable<const ReferenceCount*> GetDataReferenceCount(
absl::Nonnull<const Data*> data) noexcept {
return (data->owner_ & Data::kOwnerBits) == Data::kOwnerReferenceCountBit
? reinterpret_cast<const ReferenceCount*>(data->owner_ &
Data::kOwnerPointerMask)
: nullptr;
}
}
}
#endif | #include "common/data.h"
#include "absl/base/nullability.h"
#include "common/internal/reference_count.h"
#include "internal/testing.h"
#include "google/protobuf/arena.h"
namespace cel {
namespace {
using ::testing::IsNull;
class DataTest final : public Data {
public:
DataTest() noexcept : Data() {}
explicit DataTest(absl::Nullable<google::protobuf::Arena*> arena) noexcept
: Data(arena) {}
};
class DataReferenceCount final : public common_internal::ReferenceCounted {
public:
explicit DataReferenceCount(const Data* data) : data_(data) {}
private:
void Finalize() noexcept override { delete data_; }
const Data* data_;
};
TEST(Data, Arena) {
google::protobuf::Arena arena;
DataTest data(&arena);
EXPECT_EQ(data.GetArena(), &arena);
EXPECT_THAT(common_internal::GetDataReferenceCount(&data), IsNull());
}
TEST(Data, ReferenceCount) {
auto* data = new DataTest();
EXPECT_THAT(data->GetArena(), IsNull());
auto* refcount = new DataReferenceCount(data);
common_internal::SetDataReferenceCount(data, refcount);
EXPECT_EQ(common_internal::GetDataReferenceCount(data), refcount);
common_internal::StrongUnref(refcount);
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/data.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/data_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
440601c2-2c57-4815-83a7-8087f34eb744 | cpp | tensorflow/tensorflow | c_api_experimental_reader | tensorflow/c/eager/c_api_experimental_reader.cc | tensorflow/c/eager/c_api_experimental_reader_test.cc | #include "tensorflow/c/eager/c_api_experimental_reader.h"
#include "tensorflow/c/eager/tfe_monitoring_reader_internal.h"
template <typename... LabelType>
int64_t TFE_MonitoringCounterReader::Read(const LabelType&... labels) {
return counter->Read(labels...);
}
TFE_MonitoringCounterReader* TFE_MonitoringNewCounterReader(const char* name) {
auto* result = new TFE_MonitoringCounterReader(name);
return result;
}
int64_t TFE_MonitoringReadCounter0(TFE_MonitoringCounterReader* cell_reader) {
int64_t result = cell_reader->Read();
return result;
}
int64_t TFE_MonitoringReadCounter1(TFE_MonitoringCounterReader* cell_reader,
const char* label) {
int64_t result = cell_reader->Read(label);
return result;
} | #include "tensorflow/c/eager/c_api_experimental_reader.h"
#include <cstdint>
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TFE_MonitoringCounter0* CreateCounter0(const char* counter_name);
TFE_MonitoringCounter1* CreateCounter1(const char* counter_name,
const char* label);
void IncrementCounter0(TFE_MonitoringCounter0* counter, int64_t delta = 1);
void IncrementCounter1(TFE_MonitoringCounter1* counter, const char* label,
int64_t delta = 1);
TEST(CAPI, MonitoringCellReader0) {
auto counter_name = "test/counter0";
auto* counter = CreateCounter0(counter_name);
auto* reader = TFE_MonitoringNewCounterReader(counter_name);
IncrementCounter0(counter);
int64_t actual = TFE_MonitoringReadCounter0(reader);
CHECK_EQ(actual, 1);
}
TEST(CAPI, MonitoringCellReader1) {
auto counter_name = "test/counter1";
auto label_name = "test/label";
auto* counter = CreateCounter1(counter_name, label_name);
auto* reader = TFE_MonitoringNewCounterReader(counter_name);
IncrementCounter1(counter, label_name);
int64_t actual = TFE_MonitoringReadCounter1(reader, label_name);
CHECK_EQ(actual, 1);
}
TFE_MonitoringCounter0* CreateCounter0(const char* counter_name) {
TF_Status* status = TF_NewStatus();
auto* counter =
TFE_MonitoringNewCounter0(counter_name, status, "description");
TF_DeleteStatus(status);
return counter;
}
void IncrementCounter0(TFE_MonitoringCounter0* counter, int64_t delta) {
auto* cell = TFE_MonitoringGetCellCounter0(counter);
TFE_MonitoringCounterCellIncrementBy(cell, delta);
}
TFE_MonitoringCounter1* CreateCounter1(const char* counter_name,
const char* label) {
TF_Status* status = TF_NewStatus();
auto* counter =
TFE_MonitoringNewCounter1(counter_name, status, "description", label);
TF_DeleteStatus(status);
return counter;
}
void IncrementCounter1(TFE_MonitoringCounter1* counter, const char* label,
int64_t delta) {
auto* cell = TFE_MonitoringGetCellCounter1(counter, label);
TFE_MonitoringCounterCellIncrementBy(cell, delta);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/c_api_experimental_reader.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/c_api_experimental_reader_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3545902a-4b31-4b27-abc6-663c18776ccb | cpp | abseil/abseil-cpp | strerror | absl/base/internal/strerror.cc | absl/base/internal/strerror_test.cc | #include "absl/base/internal/strerror.h"
#include <array>
#include <cerrno>
#include <cstddef>
#include <cstdio>
#include <cstring>
#include <string>
#include <type_traits>
#include "absl/base/internal/errno_saver.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace {
const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) {
#if defined(_WIN32)
int rc = strerror_s(buf, buflen, errnum);
buf[buflen - 1] = '\0';
if (rc == 0 && strncmp(buf, "Unknown error", buflen) == 0) *buf = '\0';
return buf;
#else
auto ret = strerror_r(errnum, buf, buflen);
if (std::is_same<decltype(ret), int>::value) {
if (ret) *buf = '\0';
return buf;
} else {
return reinterpret_cast<const char*>(ret);
}
#endif
}
std::string StrErrorInternal(int errnum) {
char buf[100];
const char* str = StrErrorAdaptor(errnum, buf, sizeof buf);
if (*str == '\0') {
snprintf(buf, sizeof buf, "Unknown error %d", errnum);
str = buf;
}
return str;
}
constexpr int kSysNerr = 135;
std::array<std::string, kSysNerr>* NewStrErrorTable() {
auto* table = new std::array<std::string, kSysNerr>;
for (size_t i = 0; i < table->size(); ++i) {
(*table)[i] = StrErrorInternal(static_cast<int>(i));
}
return table;
}
}
std::string StrError(int errnum) {
absl::base_internal::ErrnoSaver errno_saver;
static const auto* table = NewStrErrorTable();
if (errnum >= 0 && static_cast<size_t>(errnum) < table->size()) {
return (*table)[static_cast<size_t>(errnum)];
}
return StrErrorInternal(errnum);
}
}
ABSL_NAMESPACE_END
} | #include "absl/base/internal/strerror.h"
#include <atomic>
#include <cerrno>
#include <cstdio>
#include <cstring>
#include <string>
#include <thread>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/strings/match.h"
namespace {
using ::testing::AnyOf;
using ::testing::Eq;
TEST(StrErrorTest, ValidErrorCode) {
errno = ERANGE;
EXPECT_THAT(absl::base_internal::StrError(EDOM), Eq(strerror(EDOM)));
EXPECT_THAT(errno, Eq(ERANGE));
}
TEST(StrErrorTest, InvalidErrorCode) {
errno = ERANGE;
EXPECT_THAT(absl::base_internal::StrError(-1),
AnyOf(Eq("No error information"), Eq("Unknown error -1")));
EXPECT_THAT(errno, Eq(ERANGE));
}
TEST(StrErrorTest, MultipleThreads) {
const int kNumCodes = 1000;
std::vector<std::string> expected_strings(kNumCodes);
for (int i = 0; i < kNumCodes; ++i) {
expected_strings[i] = strerror(i);
}
std::atomic_int counter(0);
auto thread_fun = [&]() {
for (int i = 0; i < kNumCodes; ++i) {
++counter;
errno = ERANGE;
const std::string value = absl::base_internal::StrError(i);
int check_err = errno;
EXPECT_THAT(check_err, Eq(ERANGE));
if (!absl::StartsWith(value, "Unknown error ")) {
EXPECT_THAT(value, Eq(expected_strings[i]));
}
}
};
const int kNumThreads = 100;
std::vector<std::thread> threads;
for (int i = 0; i < kNumThreads; ++i) {
threads.push_back(std::thread(thread_fun));
}
for (auto& thread : threads) {
thread.join();
}
EXPECT_THAT(counter, Eq(kNumThreads * kNumCodes));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/strerror.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/strerror_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
570c48f7-236d-4c3e-8e7d-44cba5bf1ea5 | cpp | tensorflow/tensorflow | buffer_assignment | third_party/xla/xla/service/buffer_assignment.cc | third_party/xla/xla/service/buffer_assignment_test.cc | #include "xla/service/buffer_assignment.h"
#include <algorithm>
#include <cstdint>
#include <deque>
#include <iterator>
#include <memory>
#include <optional>
#include <ostream>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_op_metadata.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/map_util.h"
#include "xla/service/buffer_value.h"
#include "xla/service/buffer_value_containers.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_value.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/numbers.h"
namespace xla {
namespace {
using absl::flat_hash_map;
using absl::flat_hash_set;
using absl::StrAppend;
using absl::StrAppendFormat;
using memory_space_assignment::PresetAssignments;
using ::tsl::strings::HumanReadableNumBytes;
absl::flat_hash_map<int64_t, const HloInstruction*> BuildIdToHloInstructionMap(
const HloModule* module) {
absl::flat_hash_map<int64_t, const HloInstruction*> id_to_hlo_instruction;
for (const HloComputation* computation : module->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
id_to_hlo_instruction[instruction->unique_id()] = instruction;
}
}
return id_to_hlo_instruction;
}
absl::StatusOr<absl::flat_hash_map<int64_t, const HloValue*>>
BuildIdToLogicalBufferMap(
const BufferAssignmentProto& proto,
const absl::flat_hash_map<int64_t, const HloInstruction*>&
id_to_hlo_instruction,
const std::unique_ptr<HloAliasAnalysis>& alias_analysis) {
absl::flat_hash_map<int64_t, const HloValue*> id_to_logical_buffer;
for (const LogicalBufferProto& logical_buffer_proto :
proto.logical_buffers()) {
TF_RET_CHECK(logical_buffer_proto.has_defined_at())
<< "Expected logical buffer to have location information in the proto.";
TF_RET_CHECK(id_to_hlo_instruction.contains(
logical_buffer_proto.defined_at().instruction_id()))
<< "Expected hlo instruction "
<< "with the id '" << logical_buffer_proto.defined_at().instruction_id()
<< "' in the proto to also exist in the "
"HLO module.";
const HloInstruction* hlo_instruction = id_to_hlo_instruction.at(
logical_buffer_proto.defined_at().instruction_id());
std::vector<int64_t> shape_idx_vals;
absl::c_copy(logical_buffer_proto.defined_at().shape_index(),
std::back_inserter(shape_idx_vals));
ShapeIndex proto_shape_index(shape_idx_vals);
auto& logical_buffer = alias_analysis->dataflow_analysis().GetUniqueValueAt(
hlo_instruction, proto_shape_index);
logical_buffer.set_color(logical_buffer_proto.color());
id_to_logical_buffer[logical_buffer_proto.id()] = &logical_buffer;
}
return id_to_logical_buffer;
}
}
absl::Status GatherComputationsByAllocationType(
const HloModule* module,
std::vector<const HloComputation*>* thread_local_computations,
std::vector<const HloComputation*>* global_computations) {
std::deque<std::pair<const HloComputation*, bool>> worklist;
worklist.push_back(std::make_pair(module->entry_computation(),
false));
flat_hash_set<const HloComputation*> thread_local_set;
flat_hash_set<const HloComputation*> global_set;
while (!worklist.empty()) {
auto worklist_front = worklist.front();
worklist.pop_front();
const HloComputation* computation = worklist_front.first;
bool is_thread_local = worklist_front.second;
bool in_thread_local_set = thread_local_set.contains(computation);
bool in_global_set = global_set.contains(computation);
if ((is_thread_local && in_thread_local_set) ||
(!is_thread_local && in_global_set)) {
continue;
}
if ((is_thread_local && in_global_set) ||
(!is_thread_local && in_thread_local_set)) {
return InvalidArgument(
"computation %s has conflicting allocation requirements (global "
"and thread-local)",
computation->name());
}
if (is_thread_local) {
thread_local_set.insert(computation);
} else {
global_set.insert(computation);
}
for (auto* instruction : computation->instructions()) {
for (HloComputation* subcomputation :
instruction->called_computations()) {
switch (instruction->opcode()) {
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kWhile:
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
if (is_thread_local) {
return InvalidArgument(
"computation %s cannot contain call/while op because it "
"requires thread-local buffer allocations",
computation->name());
}
worklist.push_back(std::make_pair(subcomputation,
false));
break;
case HloOpcode::kCustomCall:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kAllReduceStart:
case HloOpcode::kMap:
case HloOpcode::kReduce:
case HloOpcode::kReduceWindow:
case HloOpcode::kScatter:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kSort:
case HloOpcode::kFusion:
worklist.push_back(std::make_pair(subcomputation,
true));
break;
default:
return Internal("Unexpected calling opcode: %s",
HloOpcodeString(instruction->opcode()));
}
}
}
}
for (auto* computation : module->MakeComputationPostOrder()) {
if (thread_local_set.contains(computation)) {
thread_local_computations->push_back(computation);
} else if (global_set.contains(computation)) {
global_computations->push_back(computation);
}
}
return absl::OkStatus();
}
std::string BufferAllocation::Slice::ToString() const {
return absl::StrCat("{index:", allocation_ == nullptr ? -1 : index(),
", offset:", offset_, ", size:", size_, "}");
}
BufferAllocation::Slice BufferAllocation::GetSlice(
const HloValue& buffer) const {
const OffsetSize os = FindOrDie(assigned_buffers_, &buffer);
return Slice(this, os.offset, os.size);
}
void BufferAllocation::AddAssignment(const HloValue& buffer, int64_t offset,
int64_t size) {
VLOG(4) << "Adding the following buffer to allocation #" << index()
<< absl::StrFormat(" (size=%d, offset=%d) %s", size, offset,
buffer.ToShortString());
CHECK(!assigned_buffers_.contains(&buffer))
<< "LogicalBuffer " << buffer << " already assigned to allocation "
<< index_;
CHECK_LE(offset, size_) << "LogicalBuffer " << buffer
<< " offset out of range";
CHECK_LE(offset + size, size_)
<< "LogicalBuffer " << buffer
<< " size out of range at offset: " << offset << " with size: " << size;
if (!(IsPreallocatedTempBuffer() && color() != 0)) {
CHECK_EQ(buffer.color(), color())
<< "Buffer color " << buffer.color() << " for buffer " << buffer
<< " does not match allocation color " << color() << ".";
}
OffsetSize offset_size;
offset_size.offset = offset;
offset_size.size = size;
assigned_buffers_.emplace(&buffer, offset_size);
for (HloPosition position : buffer.positions()) {
Shape* shape = ShapeUtil::GetMutableSubshape(
position.instruction->mutable_shape(), position.index);
if (shape->has_layout()) {
shape->mutable_layout()->set_memory_space(buffer.color());
}
}
}
BufferAllocationProto BufferAllocation::ToProto() const {
BufferAllocationProto proto;
proto.set_index(index_);
proto.set_size(size_);
proto.set_is_thread_local(is_thread_local_);
proto.set_is_tuple(is_tuple_);
proto.set_color(color_);
if (is_entry_computation_parameter_) {
proto.set_is_entry_computation_parameter(true);
for (int64_t idx : param_shape_index()) {
proto.add_parameter_shape_index(idx);
}
proto.set_parameter_number(parameter_number_);
}
proto.set_is_constant(is_constant_);
proto.set_maybe_live_out(maybe_live_out_);
for (const auto& buffer_offset_size : assigned_buffers_) {
BufferAllocationProto::Assigned* proto_assigned = proto.add_assigned();
proto_assigned->set_logical_buffer_id(buffer_offset_size.first->id());
proto_assigned->set_offset(buffer_offset_size.second.offset);
proto_assigned->set_size(buffer_offset_size.second.size);
}
absl::c_sort(*proto.mutable_assigned(),
[](const BufferAllocationProto::Assigned& assign1,
const BufferAllocationProto::Assigned& assign2) {
return assign1.logical_buffer_id() <
assign2.logical_buffer_id();
});
return proto;
}
static bool CompareHloValuesById(const HloValue* a, const HloValue* b) {
return a->id() < b->id();
}
static const HloInstruction* GetEntryParameterInstruction(
const BufferAllocation& alloc) {
for (const auto& p : alloc.assigned_buffers()) {
const HloValue* value = p.first;
const HloInstruction* instr = value->instruction();
if (instr->opcode() == HloOpcode::kParameter &&
instr->parent() == instr->GetModule()->entry_computation()) {
return instr;
}
}
return nullptr;
}
static const HloInstruction* GetOutputInstruction(
const BufferAllocation& alloc) {
for (const auto& p : alloc.assigned_buffers()) {
const HloValue* value = p.first;
for (const HloPosition& position : value->positions()) {
const HloInstruction* instr = position.instruction;
if (position.index.empty() &&
instr->parent()->root_instruction() == instr &&
instr->parent()->IsEntryComputation()) {
return instr;
}
}
}
return nullptr;
}
std::string BufferAllocation::ToShortString() const {
std::string output;
StrAppendFormat(&output, "allocation %d: size %d", index_, size());
if (color() != 0) {
StrAppend(&output, ", color ", color());
}
if (is_entry_computation_parameter()) {
const HloInstruction* param = GetEntryParameterInstruction(*this);
StrAppend(&output, ", parameter ", parameter_number(), ", shape |",
param ? param->shape().ToString(false)
: "<unknown shape>",
"| at ShapeIndex ", param_shape_index().ToString());
}
if (const HloInstruction* instr = GetOutputInstruction(*this)) {
StrAppend(&output, ", output shape is |",
instr->shape().ToString(false), "|");
}
if (is_constant()) {
StrAppend(&output, ", constant");
}
if (is_thread_local()) {
StrAppend(&output, ", thread-local");
}
if (maybe_live_out()) {
StrAppend(&output, ", maybe-live-out");
}
if (IsPreallocatedTempBuffer()) {
StrAppend(&output, ", preallocated-temp");
}
StrAppend(&output, ":\n");
return output;
}
std::string BufferAllocation::ToString() const {
std::string output = ToShortString();
std::vector<const HloValue*> sorted_buffers;
for (const auto& buffer_offset_size : assigned_buffers_) {
sorted_buffers.push_back(buffer_offset_size.first);
}
absl::c_sort(sorted_buffers, &CompareHloValuesById);
for (const HloValue* buffer : sorted_buffers) {
const OffsetSize& offset_size = FindOrDie(assigned_buffers_, buffer);
StrAppend(&output,
absl::StrFormat(
" value: %s (size=%d,offset=%d): %s\n",
buffer->ToShortString(), offset_size.size, offset_size.offset,
ShapeUtil::HumanStringWithLayout(buffer->shape())));
}
return output;
}
std::ostream& operator<<(std::ostream& out, const BufferAllocation& buffer) {
out << buffer.ToString();
return out;
}
std::ostream& operator<<(std::ostream& out, const BufferAllocation::Slice& s) {
out << s.ToString();
return out;
}
bool BufferAssignment::HasAllocation(const HloValue& value) const {
return allocation_index_for_value_.contains(&value);
}
bool BufferAssignment::HasAllocation(HloValue::Id value_id) const {
return HasAllocation(dataflow_analysis().GetValue(value_id));
}
bool BufferAssignment::HasAllocation(const HloBuffer& buffer) const {
return allocation_index_for_value_.contains(buffer.values()[0]);
}
const BufferAllocation& BufferAssignment::GetAssignedAllocation(
const HloValue& value) const {
CHECK(HasAllocation(value));
return GetAllocation(allocation_index_for_value_.at(&value));
}
const BufferAllocation& BufferAssignment::GetAssignedAllocation(
const HloBuffer& hlo_buffer) const {
return GetAssignedAllocation(*hlo_buffer.values()[0]);
}
BufferAllocation* BufferAssignment::GetMutableAssignedAllocation(
const HloBuffer& buffer) {
return const_cast<BufferAllocation*>(&GetAssignedAllocation(buffer));
}
std::set<BufferAllocation::Slice> BufferAssignment::GetAllSlices(
const HloInstruction* instruction, const ShapeIndex& index) const {
std::set<BufferAllocation::Slice> result;
for (const HloValue* value :
dataflow_analysis().GetValueSet(instruction, index).values()) {
if (HasAllocation(*value)) {
result.insert(GetAssignedAllocation(*value).GetSlice(*value));
}
}
return result;
}
const BufferAllocation& BufferAssignment::GetAllocation(
BufferAllocation::Index index) const {
CHECK_GE(index, 0);
CHECK_LT(index, allocations_.size());
return allocations_[index];
}
const BufferAllocation* BufferAssignment::GetInstructionAllocation(
const HloInstruction* hlo, const ShapeIndex& shape_index) const {
const HloValue* value =
dataflow_analysis().GetValueSet(hlo, shape_index).values()[0];
if (!HasAllocation(*value)) {
return nullptr;
}
const BufferAllocation& instruction_allocation =
GetAssignedAllocation(*value);
return &instruction_allocation;
}
BufferAllocation* BufferAssignment::GetMutableAllocation(
BufferAllocation::Index index) {
return const_cast<BufferAllocation*>(&GetAllocation(index));
}
bool BufferAssignment::HasAllocationAt(const HloInstruction* instruction,
const ShapeIndex& index) const {
return absl::c_any_of(
dataflow_analysis().GetValueSet(instruction, index).values(),
IsKeyIn(allocation_index_for_value_));
}
bool BufferAssignment::HasTopLevelAllocation(
const HloInstruction* instruction) const {
return HasAllocationAt(instruction, {});
}
absl::StatusOr<BufferAllocation::Slice> BufferAssignment::GetUniqueSlice(
const HloInstruction* instruction, const ShapeIndex& index) const {
VLOG(3) << "Trying to find unique slice for " << instruction->name() << " ["
<< index << "]";
BufferAllocation::Slice result;
for (const HloValue* value :
dataflow_analysis().GetValueSet(instruction, index).values()) {
VLOG(3) << "Examining value " << *value;
if (HasAllocation(*value)) {
VLOG(3) << "Has allocation";
const BufferAllocation::Slice slice =
GetAssignedAllocation(*value).GetSlice(*value);
if (result.allocation() == nullptr) {
result = slice;
} else if (result != slice) {
return FailedPrecondition(
"BufferAllocation::Slice for instruction %s at index %s cannot "
"be determined at compile-time.",
instruction->name(), index.ToString());
}
} else {
VLOG(3) << "No allocation";
}
}
if (result.allocation() == nullptr) {
return FailedPrecondition(
"BufferAllocation::Slice not assigned for instruction %s at index %s",
instruction->name(), index.ToString());
}
return result;
}
absl::StatusOr<BufferAllocation::Slice>
BufferAssignment::GetUniqueTopLevelSlice(
const HloInstruction* instruction) const {
return GetUniqueSlice(instruction, {});
}
bool BufferAssignment::SharesSliceAtIndex(
const HloInstruction* hlo_a, const ShapeIndex& shape_index_a,
const HloInstruction* hlo_b, const ShapeIndex& shape_index_b) const {
return GetUniqueSlice(hlo_a, shape_index_a).value() ==
GetUniqueSlice(hlo_b, shape_index_b).value();
}
bool BufferAssignment::HaveDisjointSlices(const HloInstruction* hlo_a,
const HloInstruction* hlo_b) const {
using SliceSet = flat_hash_set<BufferAllocation::Slice>;
auto collect_slices = [&](const HloInstruction* instr) -> SliceSet {
SliceSet slices;
absl::Status status = ShapeUtil::ForEachSubshapeWithStatus(
instr->shape(),
[&](const Shape& ,
const ShapeIndex& index) -> absl::Status {
auto shape_slices = GetAllSlices(instr, index);
if (shape_slices.empty()) {
return InvalidArgument("No slices assigned to part of instr.");
}
slices.insert(shape_slices.begin(), shape_slices.end());
return absl::OkStatus();
});
if (!status.ok()) {
return {};
}
return slices;
};
SliceSet slices_a = collect_slices(hlo_a);
SliceSet slices_b = collect_slices(hlo_b);
return !slices_a.empty() && !slices_b.empty() &&
absl::c_none_of(slices_a, [&](const BufferAllocation::Slice& slice) {
return slices_b.contains(slice);
});
}
absl::StatusOr<BufferAllocation::Slice>
BufferAssignment::GetUniqueTopLevelOutputSlice() const {
return GetUniqueTopLevelSlice(
module_->entry_computation()->root_instruction());
}
BufferAllocation* BufferAssignment::NewEmptyAllocation(
int64_t size, LogicalBuffer::Color color) {
BufferAllocation::Index index = allocations_.size();
allocations_.emplace_back(index, size, color);
BufferAllocation* allocation = &allocations_.back();
return allocation;
}
BufferAllocation* BufferAssignment::NewAllocation(const HloBuffer& buffer,
int64_t size) {
BufferAllocation* allocation = NewEmptyAllocation(size, buffer.color());
AddAssignment(allocation, buffer, 0, size);
allocation->peak_buffers_.push_back(buffer.values()[0]);
return allocation;
}
void BufferAssignment::AddAssignment(BufferAllocation* allocation,
const HloBuffer& buffer, int64_t offset,
int64_t size) {
CHECK(allocation->is_reusable() || allocation->assigned_buffers().empty())
<< "Non-reusable allocation already assigned a buffer: "
<< allocation->ToString();
for (const HloValue* buffer_value : buffer.values()) {
CHECK(!allocation_index_for_value_.contains(buffer_value))
<< "BufferValue " << buffer_value << " already has an allocation.";
allocation->AddAssignment(*buffer_value, offset, size);
allocation_index_for_value_[buffer_value] = allocation->index();
}
if (alias_analysis().BufferLivesOut(buffer)) {
VLOG(3) << "HloBuffer lives out: " << buffer.ToString();
VLOG(3) << "Set maybe live out: " << allocation->ToString();
allocation->set_maybe_live_out(true);
}
}
void BufferAssignment::AddAssignment(BufferAllocation* allocation,
const HloValue& value, int64_t offset,
int64_t size) {
allocation->AddAssignment(value, offset, size);
allocation_index_for_value_[&value] = allocation->index();
const HloValue& hlo_value =
*CHECK_NOTNULL(dynamic_cast<const HloValue*>(&value));
if (alias_analysis().ValueLivesOut(hlo_value)) {
VLOG(3) << "HloValue lives out: " << hlo_value.ToString();
VLOG(3) << "Set maybe live out: " << allocation->ToString();
allocation->set_maybe_live_out(true);
}
}
void BufferAssignment::CombineTempAllocations(
const absl::flat_hash_set<BufferValue::Color>& private_stack_colors,
std::optional<BufferValue::Color> temp_buffer_color) {
VLOG(1) << "CombineTempAllocations()";
std::deque<BufferAllocation> combined_allocations;
flat_hash_map<BufferValue::Color, BufferAllocation*> combined_allocation_map;
const auto first_temp_it =
std::partition(allocations_.begin(), allocations_.end(),
[](const BufferAllocation& allocation) {
return !allocation.IsPreallocatedTempBuffer();
});
if (first_temp_it != allocations_.end()) {
for (auto it = first_temp_it; it != allocations_.end(); ++it) {
BufferAllocation& temp_allocation = *it;
BufferValue::Color color = temp_allocation.color();
auto combined_it = combined_allocation_map.find(color);
if (combined_it == combined_allocation_map.end()) {
VLOG(1) << "Combined temp allocation for color " << color
<< " is: " << temp_allocation;
combined_allocations.emplace_back(temp_allocation);
combined_allocation_map.emplace(color, &combined_allocations.back());
continue;
}
if (combined_it->second->size() + it->size() >=
multiheap_size_constraint_per_heap_) {
VLOG(1) << "Due to size constraint, reset temp allocation for color "
<< color << " to: " << temp_allocation;
combined_allocations.emplace_back(temp_allocation);
combined_allocation_map.emplace(color, &combined_allocations.back());
continue;
}
BufferAllocation* combined_allocation = combined_it->second;
VLOG(1) << "Combined allocation absorbing temp allocation: "
<< temp_allocation;
int64_t alignment = color_alignment_(color);
int64_t base;
bool is_private_stack = private_stack_colors.contains(color);
if (is_private_stack) {
base = 0;
combined_allocation->set_size(std::max(base, temp_allocation.size()));
} else {
base = RoundUpTo(combined_allocation->size(), alignment);
combined_allocation->set_size(base + temp_allocation.size());
}
for (const auto& buffer_offset_size : temp_allocation.assigned_buffers_) {
const HloValue* value = buffer_offset_size.first;
const int64_t offset = buffer_offset_size.second.offset;
const int64_t size = buffer_offset_size.second.size;
combined_allocation->AddAssignment(*value, base + offset, size);
}
if (!temp_allocation.HeapTraces().empty()) {
CHECK_EQ(temp_allocation.HeapTraces().size(), 1);
combined_allocation->AddHeapTrace(temp_allocation.HeapTraces().front());
}
if (is_private_stack) {
if (temp_allocation.size() == combined_allocation->size()) {
combined_allocation->peak_buffers_ = temp_allocation.peak_buffers_;
}
} else {
combined_allocation->peak_buffers_.insert(
combined_allocation->peak_buffers_.end(),
temp_allocation.peak_buffers_.begin(),
temp_allocation.peak_buffers_.end());
}
if (temp_buffer_color.has_value()) {
if (combined_allocation->color() == 0) {
combined_allocation->set_color(temp_buffer_color.value());
}
}
}
allocations_.erase(first_temp_it, allocations_.end());
for (BufferAllocation& combined : combined_allocations) {
temp_allocation_total_size_ += combined.size();
allocations_.push_back(std::move(combined));
}
}
allocation_index_for_value_.erase(allocation_index_for_value_.begin(),
allocation_index_for_value_.end());
for (size_t index = 0; index < allocations_.size(); ++index) {
BufferAllocation* allocation = &allocations_[index];
allocation->set_index(index);
std::vector<const HloValue*> sorted_values;
sorted_values.reserve(allocation->assigned_buffers_.size());
for (const auto& buffer_offset_size : allocation->assigned_buffers_) {
const HloValue* value = buffer_offset_size.first;
sorted_values.emplace(sorted_values.end(), value);
}
absl::c_sort(sorted_values, &CompareHloValuesById);
for (const HloValue* value : sorted_values) {
allocation_index_for_value_[value] = index;
}
}
}
absl::Status BufferAssignment::ComputeSummaryStats() {
for (auto& allocation : Allocations()) {
if (allocation.is_entry_computation_parameter()) {
stats_.parameter_allocation_count++;
stats_.parameter_allocation_bytes += allocation.size();
}
if (allocation.is_constant()) {
stats_.constant_allocation_count++;
stats_.constant_allocation_bytes += allocation.size();
}
if (allocation.maybe_live_out()) {
stats_.maybe_live_out_allocation_count++;
stats_.maybe_live_out_allocation_bytes += allocation.size();
}
if (allocation.IsPreallocatedTempBuffer()) {
stats_.preallocated_temp_allocation_count++;
stats_.preallocated_temp_allocation_bytes += allocation.size();
}
stats_.total_allocation_count++;
stats_.total_allocation_bytes += allocation.size();
}
HloSchedule schedule(module_);
bool schedule_complete = true;
for (const auto& computation : module_->computations()) {
if (!computation->IsFusionComputation()) {
const HloInstructionSequence* sequence =
hlo_ordering().SequentialOrder(*computation);
if (sequence == nullptr) {
schedule_complete = false;
} else {
schedule.set_sequence(computation, *sequence);
}
}
}
if (schedule_complete) {
TF_RETURN_IF_ERROR(schedule.Verify());
TF_ASSIGN_OR_RETURN(
const int64_t min_size,
HeapSimulator::MinimumMemoryForModule(schedule, buffer_size_));
stats_.total_fragmentation_bytes = stats_.total_allocation_bytes - min_size;
}
return absl::OkStatus();
}
std::string BufferAssignment::Stats::ToString() const {
std::string s;
StrAppendFormat(&s, "BufferAssignment stats:\n");
StrAppendFormat(&s, " parameter allocation: %10s\n",
HumanReadableNumBytes(parameter_allocation_bytes));
StrAppendFormat(&s, " constant allocation: %10s\n",
HumanReadableNumBytes(constant_allocation_bytes));
StrAppendFormat(&s, " maybe_live_out allocation: %10s\n",
HumanReadableNumBytes(maybe_live_out_allocation_bytes));
StrAppendFormat(&s, " preallocated temp allocation: %10s\n",
HumanReadableNumBytes(preallocated_temp_allocation_bytes));
if (preallocated_temp_fragmentation_bytes >= 0) {
const double percent = 100. * preallocated_temp_fragmentation_bytes /
preallocated_temp_allocation_bytes;
StrAppendFormat(
&s, " preallocated temp fragmentation: %10s (%.2f%%)\n",
HumanReadableNumBytes(preallocated_temp_fragmentation_bytes), percent);
}
StrAppendFormat(&s, " total allocation: %10s\n",
HumanReadableNumBytes(total_allocation_bytes));
if (total_fragmentation_bytes >= 0) {
const double percent =
100. * total_fragmentation_bytes / total_allocation_bytes;
StrAppendFormat(&s, " total fragmentation: %10s (%.2f%%)\n",
HumanReadableNumBytes(total_fragmentation_bytes), percent);
}
return s;
}
std::string BufferAssignment::ToString() const {
std::string output;
absl::StrAppend(&output, "BufferAssignment:\n");
std::vector<const HloValue*> used_values;
int64_t total_size = 0;
for (auto& allocation : allocations_) {
total_size += allocation.size();
absl::StrAppend(&output, allocation.ToString());
for (const auto& p : allocation.assigned_buffers()) {
used_values.push_back(p.first);
}
}
absl::StrAppend(&output, "\nTotal bytes used: ", total_size, " (",
HumanReadableNumBytes(total_size), ")\n");
absl::StrAppend(&output, "\nUsed values:\n");
absl::c_sort(used_values, &CompareHloValuesById);
for (const HloValue* value : used_values) {
absl::StrAppend(&output, value->ToString());
}
return output;
}
std::vector<std::pair<int64_t, const HloValue*>> TopKPeakBuffers(
uint64_t k, const std::vector<BufferAllocation> allocations) {
absl::btree_multimap<int64_t, const HloValue*> topk;
for (const BufferAllocation& allocation : allocations) {
for (const HloValue* value : allocation.PeakMemoryLogicalBuffers()) {
int64_t size = allocation.assigned_buffers().at(value).size;
if (topk.size() < k) {
topk.insert({size, value});
} else {
auto it = topk.begin();
if (size > it->first) {
topk.erase(it);
topk.insert({size, value});
}
}
}
}
std::vector<std::pair<int64_t, const HloValue*>> topk_descending;
topk_descending.reserve(topk.size());
absl::c_reverse_copy(topk, std::back_inserter(topk_descending));
return topk_descending;
}
std::string BufferAssignment::ToVerboseString(
size_t max_buffers_to_show) const {
std::string output =
absl::StrCat("BufferAssignment OOM Debugging.\n", stats_.ToString());
std::vector<std::pair<int64_t, const HloValue*>> peak_buffers =
TopKPeakBuffers(max_buffers_to_show, allocations_);
std::vector<std::string> buf_strs;
for (size_t i = 0; i < std::min(max_buffers_to_show, peak_buffers.size());
++i) {
const HloValue* value = peak_buffers[i].second;
const HloInstruction* instr = value->instruction();
int64_t size = peak_buffers[i].first;
buf_strs.push_back(absl::StrCat("\n\tBuffer ", i + 1, ":\n\t\tSize: ",
xla::HumanReadableNumBytes(size)));
if (!instr->metadata().op_name().empty()) {
buf_strs.push_back(absl::StrCat(
"\n\t\tOperator: ", xla::OpMetadataToString(instr->metadata())));
}
if (instr->opcode() == HloOpcode::kParameter &&
(instr->parent() == instr->GetModule()->entry_computation())) {
buf_strs.push_back(absl::StrCat(
"\n\t\tEntry Parameter Subshape: ",
ShapeUtil::GetSubshape(instr->shape(), value->index()).ToString()));
} else {
buf_strs.push_back(
absl::StrCat("\n\t\tXLA Label: ", HloOpcodeString(instr->opcode()),
"\n\t\tShape: ", value->shape().ToString()));
}
buf_strs.push_back("\n\t\t==========================\n");
}
absl::StrAppend(&output, "Peak buffers:", absl::StrJoin(buf_strs, ""));
return output;
}
std::string BufferAssignment::BufferInfoString() const {
std::string binfo;
absl::StrAppend(&binfo,
"buffer_id,buffer_name,offset,size,"
"definition_time,end_time,num_uses,use_times,use_names\n");
const HloLiveRange& live_ranges = hlo_live_range();
const auto& instruction_schedule = live_ranges.instruction_schedule();
const auto& buffer_live_ranges = live_ranges.buffer_live_ranges();
std::vector<std::pair<const HloValue*, BufferAllocation::OffsetSize>> buffers;
for (const BufferAllocation& allocation : allocations_) {
absl::c_copy(allocation.assigned_buffers(), std::back_inserter(buffers));
}
absl::c_sort(
buffers,
[](const std::pair<const HloValue*, BufferAllocation::OffsetSize>& b1,
const std::pair<const HloValue*, BufferAllocation::OffsetSize>& b2) {
return b1.first->id() < b2.first->id();
});
for (const auto& buffer_pair : buffers) {
const HloValue& buffer = *buffer_pair.first;
const BufferAllocation::OffsetSize& offset_size = buffer_pair.second;
if (!buffer_live_ranges.contains(&buffer)) {
continue;
}
std::vector<std::pair<int64_t, std::string>> uses;
uses.reserve(buffer.GetUses().size());
for (const HloUse& use : buffer.GetUses()) {
uses.emplace_back(instruction_schedule.at(use.instruction),
use.ToString());
}
absl::c_sort(uses);
std::vector<int64_t> use_positions;
std::vector<std::string> use_names;
use_positions.reserve(uses.size());
use_names.reserve(uses.size());
for (const auto& use : uses) {
use_positions.push_back(use.first);
use_names.push_back(use.second);
}
const int64_t definition_time =
instruction_schedule.at(buffer.defining_position().instruction);
const int64_t end_t = buffer_live_ranges.at(&buffer).end;
absl::StrAppend(&binfo, buffer.id(), ",");
absl::StrAppend(&binfo, "\"", buffer.ToShortString(), "\",");
absl::StrAppend(&binfo, offset_size.offset, ",");
absl::StrAppend(&binfo, offset_size.size, ",");
absl::StrAppend(&binfo, definition_time, ",");
absl::StrAppend(&binfo, end_t, ",");
absl::StrAppend(&binfo, use_positions.size(), ",");
absl::StrAppend(&binfo, "\"", absl::StrJoin(use_positions, ";"), "\",");
absl::StrAppend(&binfo, "\"", absl::StrJoin(use_names, ";"), "\"");
absl::StrAppend(&binfo, "\n");
}
return binfo;
}
BufferAssignmentProto BufferAssignment::ToProto() const {
BufferAssignmentProto proto;
const HloDataflowAnalysis& dataflow = this->dataflow_analysis();
for (BufferValue::Id id = 0; id < dataflow.values().size(); id++) {
auto& value = dataflow.values().at(id);
if (HasAllocation(*value)) {
LogicalBufferProto proto_buffer = value->ToProto(buffer_size_);
proto.add_logical_buffers()->Swap(&proto_buffer);
for (const HloValue* alias :
alias_analysis().GetBufferContainingValue(*value).values()) {
if (alias->instruction() == value->instruction() &&
alias->index() == value->index()) {
continue;
}
BufferAssignmentProto::BufferAlias* proto_alias =
proto.add_buffer_aliases();
LogicalBufferProto::Location proto_alias_location =
BufferValue::ToLocationProto(*alias->instruction(), alias->index());
proto_alias->set_source_buffer_id(value->id());
proto_alias->mutable_location()->Swap(&proto_alias_location);
}
}
}
for (const BufferAllocation& allocation : Allocations()) {
BufferAllocationProto proto_allocation = allocation.ToProto();
proto.add_buffer_allocations()->Swap(&proto_allocation);
for (const HeapSimulatorTrace& heap_trace : allocation.HeapTraces()) {
*proto.add_heap_simulator_traces() = heap_trace;
}
}
return proto;
}
absl::StatusOr<std::unique_ptr<BufferAssignment>> BufferAssignment::FromProto(
const BufferAssignmentProto& proto, const HloModule* module,
BufferValue::SizeFunction buffer_size,
HloDataflowAnalysis::CanShareBuffer can_share_buffer) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module, can_share_buffer));
auto id_to_hlo_instruction = BuildIdToHloInstructionMap(module);
absl::flat_hash_map<int64_t, const HloValue*> id_to_logical_buffer;
TF_ASSIGN_OR_RETURN(
id_to_logical_buffer,
BuildIdToLogicalBufferMap(proto, id_to_hlo_instruction, alias_analysis));
std::unique_ptr<BufferAssignment> buffer_assignment =
absl::WrapUnique(new BufferAssignment(
module, nullptr, std::move(buffer_size),
nullptr, std::move(alias_analysis),
nullptr));
for (const auto& alloc_proto : proto.buffer_allocations()) {
BufferAllocation* allocation = buffer_assignment->NewEmptyAllocation(
alloc_proto.size(), alloc_proto.color());
CHECK(allocation->index() == alloc_proto.index())
<< "Expected allocations in BufferAssignment proto to be sorted by "
"index.";
allocation->set_is_thread_local(alloc_proto.is_thread_local());
allocation->set_is_tuple(alloc_proto.is_tuple());
allocation->set_constant(alloc_proto.is_constant());
if (alloc_proto.is_entry_computation_parameter()) {
std::vector<int64_t> shape_idx_vals;
absl::c_copy(alloc_proto.parameter_shape_index(),
std::back_inserter(shape_idx_vals));
ShapeIndex shape_index(shape_idx_vals);
allocation->set_entry_computation_parameter(
alloc_proto.parameter_number(), shape_index, false);
}
for (const auto& assignee : alloc_proto.assigned()) {
HloValue::Id logical_buffer_id = assignee.logical_buffer_id();
const auto& buffer_val = id_to_logical_buffer[logical_buffer_id];
buffer_assignment->AddAssignment(allocation, *buffer_val,
assignee.offset(), assignee.size());
}
CHECK_EQ(allocation->maybe_live_out(), alloc_proto.maybe_live_out())
<< "Dataflow analysis differs from proto.";
}
TF_RET_CHECK(proto.logical_buffers_size() ==
buffer_assignment->allocation_index_for_value_.size());
for (auto& logical_buffer_proto : proto.logical_buffers()) {
TF_RET_CHECK(buffer_assignment->HasAllocation(
*id_to_logical_buffer[logical_buffer_proto.id()]));
}
return buffer_assignment;
}
absl::StatusOr<std::unique_ptr<BufferAssignment>> BufferAssigner::Run(
const HloModule* module, std::unique_ptr<HloOrdering> hlo_ordering,
BufferValue::SizeFunction buffer_size,
LogicalBuffer::AlignmentFunction color_alignment,
bool allocate_buffers_for_constants, BufferAssigner::Colorer colorer,
std::optional<BufferAssigner::MustNotLiveOut> must_not_live_out,
HloDataflowAnalysis::CanShareBuffer can_share_buffer,
std::unique_ptr<PresetAssignments> preset_assignments,
const PrivateStacks& private_stacks,
GlobalDecreasingSizeBestFitHeap<HloValue>::BufferIntervalCompare
heap_buffer_interval_compare,
std::optional<BufferAssignment::BufferIsolationOptions> isolation_options,
std::optional<BufferValue::Color> temp_buffer_color) {
BufferAssigner assigner(allocate_buffers_for_constants, std::move(colorer),
must_not_live_out, std::move(preset_assignments));
return assigner.CreateAssignment(
module, std::move(hlo_ordering), std::move(buffer_size),
std::move(color_alignment), std::move(can_share_buffer), private_stacks,
heap_buffer_interval_compare, isolation_options, temp_buffer_color);
}
bool BufferAssigner::LiveRangeInterferes(const HloValue* buffer1,
const HloValue* buffer2,
BufferAssignment* assignment) {
CHECK((assignment->hlo_live_range().total_order_scheduled()));
const HloLiveRange& hlo_live_range = assignment->hlo_live_range();
const auto& buffer_live_ranges = hlo_live_range.buffer_live_ranges();
auto live_range_it1 = buffer_live_ranges.find(buffer1);
CHECK(live_range_it1 != buffer_live_ranges.end())
<< "Buffer doesn't have a proper live range:" << buffer1->ToString();
auto live_range_it2 = buffer_live_ranges.find(buffer2);
CHECK(live_range_it2 != buffer_live_ranges.end())
<< "Buffer doesn't have a proper live range:" << buffer2->ToString();
auto can_share_as_operand =
[&assignment](const HloValue* user_value, const HloValue* operand_value,
const HloLiveRange::TimeBound& operand_live_range) {
HloPosition operand_end_position = operand_live_range.end_position;
return user_value->instruction()->opcode() != HloOpcode::kCopy &&
user_value->instruction()->IsUserOf(
operand_end_position.instruction) &&
assignment->dataflow_analysis().CanShareOperandBufferWithUser(
operand_end_position.instruction, operand_end_position.index,
user_value->instruction(), user_value->index());
};
const auto& live_range_1 = live_range_it1->second;
const auto& live_range_2 = live_range_it2->second;
if (!(live_range_1.start > live_range_2.end ||
live_range_2.start > live_range_1.end)) {
if (live_range_1.end == live_range_2.start) {
auto operand_value = buffer1;
auto user_value = buffer2;
if (!can_share_as_operand(user_value, operand_value, live_range_1)) {
VLOG(4) << "End of live range of " << buffer1->ToShortString()
<< " is equal to the start of live range of "
<< buffer2->ToShortString() << ", buffer cannot be shared.";
return true;
}
} else if (live_range_2.end == live_range_1.start) {
auto operand_value = buffer2;
auto user_value = buffer1;
if (!can_share_as_operand(user_value, operand_value, live_range_2)) {
VLOG(4) << "End of live range of " << buffer2->ToShortString()
<< " is equal to the start of live range of "
<< buffer1->ToShortString() << ", buffer cannot be shared.";
return true;
}
} else {
VLOG(4) << "Can't assign: assignee " << *buffer1 << " may interfere with "
<< *buffer2;
VLOG(4) << "assigned_buffer.start: " << live_range_1.start;
VLOG(4) << "assigned_buffer.end: " << live_range_1.end;
VLOG(4) << "live_range_2.start" << live_range_2.start;
VLOG(4) << "live_range_2.end" << live_range_2.end;
return true;
}
}
return false;
}
bool BufferAssigner::MaybeAssignBuffer(BufferAllocation* allocation,
const HloBuffer& hlo_buffer,
BufferAssignment* assignment) {
CHECK(!assignment->HasAllocation(hlo_buffer))
<< "buffer " << hlo_buffer << " already has an allocation assigned.";
VLOG(4) << "Trying to assign " << hlo_buffer << " size "
<< assignment->HloBufferSize(hlo_buffer)
<< " to allocation: " << *allocation;
if (hlo_buffer.color() != allocation->color()) {
VLOG(4) << "Can't assign: buffer has color " << hlo_buffer.color()
<< " and allocation has color " << allocation->color() << ".";
return false;
}
if (assignment->HloBufferSize(hlo_buffer) > allocation->size()) {
VLOG(4) << "Can't assign: buffer is larger than allocation ("
<< assignment->HloBufferSize(hlo_buffer) << " > "
<< allocation->size() << ")";
return false;
}
if (allocation->is_readonly()) {
VLOG(4) << "Can't assign: allocation is readonly";
return false;
}
if (must_not_live_out_.has_value()) {
if (allocation->maybe_live_out()) {
for (const HloValue* value : hlo_buffer.values()) {
if ((*must_not_live_out_)(assignment->alias_analysis(),
value->instruction(), value->index())) {
VLOG(4) << "Can't assign: " << value->instruction()->ToString()
<< " cannot live out of the module";
return false;
}
}
}
if (assignment->alias_analysis().BufferLivesOut(hlo_buffer)) {
for (const auto& buffer_offset_size : allocation->assigned_buffers()) {
const HloValue* value = buffer_offset_size.first;
if ((*must_not_live_out_)(assignment->alias_analysis(),
value->instruction(), value->index())) {
VLOG(4) << "Can't assign: " << value->instruction()
<< " cannot live out of the module";
return false;
}
}
}
}
if (!allocation->is_reusable()) {
VLOG(4) << "Can't assign: allocation is not reusable";
return false;
}
for (const auto& buffer_offset_size : allocation->assigned_buffers()) {
const HloValue& assigned_buffer =
*CHECK_NOTNULL(dynamic_cast<const HloValue*>(buffer_offset_size.first));
for (const HloValue* new_value : hlo_buffer.values()) {
if (assignment->hlo_live_range().total_order_scheduled()) {
if (LiveRangeInterferes(new_value, &assigned_buffer, assignment)) {
VLOG(4) << "Can't assign: assignee " << assigned_buffer
<< " live range interferes with "
<< new_value->ToShortString();
return false;
}
} else if (assignment->hlo_ordering().MayInterfere(
assigned_buffer, *new_value,
assignment->dataflow_analysis())) {
VLOG(4) << "Can't assign: assignee " << assigned_buffer
<< " may interfere with " << new_value->ToShortString();
return false;
}
if (new_value->instruction()->opcode() == HloOpcode::kCopy) {
for (const HloPosition& assigned_buffer_position :
assigned_buffer.positions()) {
if (new_value->instruction()->IsUserOf(
assigned_buffer_position.instruction)) {
VLOG(4) << "Can't assign: assignee " << assigned_buffer
<< " is used at copy instruction "
<< new_value->ToShortString();
return false;
}
}
}
}
}
if (assignment->alias_analysis().BufferLivesOut(hlo_buffer) &&
allocation->size() != assignment->HloBufferSize(hlo_buffer)) {
VLOG(4) << "Can't assign: buffer " << hlo_buffer
<< "is live out and size not the same as allocation";
return false;
}
assignment->AddAssignment(allocation, hlo_buffer, 0,
assignment->HloBufferSize(hlo_buffer));
return true;
}
absl::Status BufferAssigner::AssignSingleHloBuffer(
const HloBuffer* hlo_buffer, bool is_thread_local,
absl::flat_hash_map<const HloComputation*,
absl::flat_hash_set<const HloValue*>>*
buffers_to_assign_sequentially,
std::vector<BufferAllocation::Index>* allocation_indices,
BufferAssignment* assignment) {
const int64_t buffer_size = assignment->HloBufferSize(*hlo_buffer);
for (const HloValue* value : hlo_buffer->values()) {
if (value->instruction()->opcode() == HloOpcode::kConstant) {
if (allocate_buffers_for_constants_) {
BufferAllocation* allocation =
assignment->NewAllocation(*hlo_buffer, buffer_size);
allocation->set_constant(true);
VLOG(3) << "New allocation #" << allocation->index() << " for constant "
<< *hlo_buffer << " value ptr: " << value;
}
VLOG(3) << "Not allocating buffer for constant";
return absl::OkStatus();
}
const HloInstruction* instruction = value->instruction();
const bool is_entry_parameter =
instruction->opcode() == HloOpcode::kParameter &&
instruction->parent() == instruction->GetModule()->entry_computation();
if (is_entry_parameter) {
bool parameter_has_alias =
assignment->module().input_output_alias_config().ParameterHasAlias(
instruction->parameter_number(), value->index());
BufferAllocation* allocation =
assignment->NewAllocation(*hlo_buffer, buffer_size);
allocation->set_entry_computation_parameter(
instruction->parameter_number(), value->index(), parameter_has_alias);
if (parameter_has_alias) {
allocation_indices->push_back(allocation->index());
}
VLOG(3) << "New allocation #" << allocation->index()
<< " marked as entry computation parameter: " << *hlo_buffer;
return absl::OkStatus();
}
}
if (is_thread_local) {
BufferAllocation* allocation =
assignment->NewAllocation(*hlo_buffer, buffer_size);
allocation->set_is_thread_local(true);
VLOG(3) << "New allocation #" << allocation->index()
<< " for thread-local: " << *hlo_buffer;
return absl::OkStatus();
}
for (const HloValue* value : hlo_buffer->values()) {
if (value->shape().IsTuple()) {
BufferAllocation* allocation =
assignment->NewAllocation(*hlo_buffer, buffer_size);
allocation->set_is_tuple(true);
VLOG(3) << "New allocation #" << allocation->index()
<< " for tuple-shaped buffer: " << *hlo_buffer;
return absl::OkStatus();
}
if (value->IsTopLevel() && !value->IsTuple()) {
const HloInstruction* instruction = value->instruction();
for (auto* operand : instruction->operands()) {
for (const auto& operand_slice :
assignment->GetAllSlices(operand, {})) {
BufferAllocation* allocation =
assignment->GetMutableAllocation(operand_slice.index());
if (MaybeAssignBuffer(allocation, *hlo_buffer, assignment)) {
VLOG(3) << "Reusing (operand) allocation #" << allocation->index()
<< " for: " << *hlo_buffer;
return absl::OkStatus();
}
}
}
}
}
for (int allocation_index = allocation_indices->size() - 1;
allocation_index >= 0; allocation_index--) {
BufferAllocation* allocation = assignment->GetMutableAllocation(
allocation_indices->at(allocation_index));
if (MaybeAssignBuffer(allocation, *hlo_buffer, assignment)) {
VLOG(3) << "Reusing allocation #" << allocation->index()
<< " for: " << *hlo_buffer;
return absl::OkStatus();
}
}
if (!assignment->HasAllocation(*hlo_buffer) &&
!assignment->alias_analysis().BufferLivesOut(*hlo_buffer)) {
bool all_computations_have_sequential_order = true;
for (const HloValue* hlo_value : hlo_buffer->values()) {
HloComputation* computation = hlo_value->instruction()->parent();
const bool has_sequential_order =
assignment->hlo_ordering().SequentialOrder(*computation) != nullptr;
all_computations_have_sequential_order &= has_sequential_order;
}
if (all_computations_have_sequential_order) {
for (const HloValue* hlo_value : hlo_buffer->values()) {
HloComputation* computation = hlo_value->instruction()->parent();
(*buffers_to_assign_sequentially)[computation].insert(hlo_value);
VLOG(3) << "Delaying assignment of temp buffer: " << *hlo_value;
}
return absl::OkStatus();
}
}
if (!assignment->HasAllocation(*hlo_buffer)) {
BufferAllocation* allocation =
assignment->NewAllocation(*hlo_buffer, buffer_size);
allocation_indices->push_back(allocation->index());
VLOG(3) << "New allocation #" << allocation->index()
<< " for: " << *hlo_buffer;
}
TF_RET_CHECK(assignment->HasAllocation(*hlo_buffer));
return absl::OkStatus();
}
absl::Status BufferAssigner::AssignBuffersForComputations(
const std::vector<const HloComputation*>& computations,
bool is_thread_local,
absl::flat_hash_map<const HloComputation*,
absl::flat_hash_set<const HloValue*>>*
buffers_to_assign_sequentially,
BufferAssignment* assignment) {
if (computations.empty()) {
return absl::OkStatus();
}
std::vector<const HloBuffer*> sorted_buffers;
absl::flat_hash_set<const HloBuffer*> preset_assigned_buffers;
TF_RETURN_IF_ERROR(AssignPresetBuffers(&preset_assigned_buffers, assignment));
const HloAliasAnalysis& alias_analysis = assignment->alias_analysis();
for (const HloBuffer& buffer : alias_analysis.buffers()) {
if (preset_assigned_buffers.find(&buffer) !=
preset_assigned_buffers.end()) {
VLOG(3) << "Skip allocation for buffer: " << buffer;
continue;
}
TF_RET_CHECK(!buffer.values().empty());
const HloComputation* comp = buffer.values()[0]->instruction()->parent();
if (absl::c_linear_search(computations, comp)) {
sorted_buffers.push_back(&buffer);
}
}
flat_hash_map<const HloInstruction*, int> post_order_position;
int position = 0;
std::vector<const HloComputation*> reverse_post_order_computations;
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(computations[0]->parent());
TF_RETURN_IF_ERROR(call_graph->VisitNodes([&](const CallGraphNode& node) {
if (absl::c_linear_search(computations, node.computation())) {
reverse_post_order_computations.push_back(node.computation());
}
return absl::OkStatus();
}));
absl::c_reverse(reverse_post_order_computations);
for (auto* computation : reverse_post_order_computations) {
for (auto* instruction : computation->MakeInstructionPostOrder()) {
post_order_position.emplace(instruction, position);
position++;
}
}
HloSchedule schedule(&assignment->module());
for (const HloComputation* computation : computations) {
const HloInstructionSequence* instruction_sequence =
assignment->hlo_ordering().SequentialOrder(*computation);
const bool has_sequential_order = instruction_sequence != nullptr;
if (has_sequential_order && buffers_to_assign_sequentially != nullptr) {
buffers_to_assign_sequentially->emplace(computation,
flat_hash_set<const HloValue*>());
schedule.set_sequence(computation, *instruction_sequence);
}
}
absl::c_sort(
sorted_buffers, [&post_order_position, &alias_analysis, assignment](
const HloBuffer* a, const HloBuffer* b) {
const int64_t a_size = assignment->HloBufferSize(*a);
const int64_t b_size = assignment->HloBufferSize(*b);
if (a_size != b_size) {
return a_size > b_size;
}
const bool a_live_out = alias_analysis.BufferLivesOut(*a);
const bool b_live_out = alias_analysis.BufferLivesOut(*b);
if (a_live_out != b_live_out) {
return a_live_out;
}
auto compare = [&post_order_position](const HloValue* value1,
const HloValue* value2) {
return post_order_position.at(value1->instruction()) <
post_order_position.at(value2->instruction());
};
const HloValue* a_min = *absl::c_min_element(a->values(), compare);
const HloValue* b_min = *absl::c_min_element(b->values(), compare);
if (post_order_position.at(a_min->instruction()) <
post_order_position.at(b_min->instruction())) {
return true;
} else if (post_order_position.at(a_min->instruction()) >
post_order_position.at(b_min->instruction())) {
return false;
}
return a->id() < b->id();
});
std::vector<BufferAllocation::Index> allocation_indices;
for (const HloBuffer* buffer : sorted_buffers) {
VLOG(3) << "=================================================";
VLOG(3) << "Assigning buffer for " << *buffer;
TF_RETURN_IF_ERROR(AssignSingleHloBuffer(buffer, is_thread_local,
buffers_to_assign_sequentially,
&allocation_indices, assignment));
}
return absl::OkStatus();
}
flat_hash_map<LogicalBuffer::Color, flat_hash_set<const HloValue*>>
BufferAssigner::SplitBuffersByColor(
const flat_hash_set<const HloValue*>& buffers) const {
flat_hash_map<LogicalBuffer::Color, flat_hash_set<const HloValue*>> color_map;
for (auto buffer : buffers) {
color_map[buffer->color()].insert(buffer);
}
return color_map;
}
absl::flat_hash_map<const HloComputation*, absl::flat_hash_set<const HloValue*>>
BufferAssigner::SplitBuffersByPrivateStackComputation(
const absl::flat_hash_set<const HloValue*>& buffers,
absl::Span<const HloComputation* const> private_stack_computations,
const CallGraph& call_graph) const {
absl::flat_hash_map<const HloComputation*,
absl::flat_hash_set<const HloValue*>>
computation_map;
for (const HloValue* value : buffers) {
bool found_computation = false;
for (const HloComputation* computation : private_stack_computations) {
if (call_graph.InstructionIsNestedIn(value->instruction(), computation)) {
found_computation = true;
computation_map[computation].insert(value);
break;
}
}
CHECK(found_computation);
}
return computation_map;
}
absl::Status BufferAssigner::AssignPresetBuffers(
absl::flat_hash_set<const HloBuffer*>* assigned_buffers,
BufferAssignment* assignment) {
if (!preset_assignments_) {
return absl::OkStatus();
}
absl::flat_hash_map<LogicalBuffer::Color, BufferAllocation*>
preset_allocations;
for (auto& color_and_info : preset_assignments_->assignment_informations()) {
LogicalBuffer::Color color(color_and_info.first);
auto inserted = preset_allocations.emplace(
color,
assignment->NewEmptyAllocation(color_and_info.second.size, color));
BufferAllocation* inserted_allocation = inserted.first->second;
inserted_allocation->AddHeapTrace(
color_and_info.second.heap_simulator_trace);
VLOG(3) << "Created preset buffer allocation "
<< inserted_allocation->index()
<< ", color: " << inserted_allocation->color()
<< ", size: " << inserted_allocation->size();
}
const HloAliasAnalysis& alias_analysis = assignment->alias_analysis();
for (auto& position_and_chunk : preset_assignments_->chunks()) {
const HloPosition& defining_position = position_and_chunk.first;
const HloBuffer& buffer = alias_analysis.GetUniqueBufferAt(
defining_position.instruction, defining_position.index);
for (const HloValue* value : buffer.values()) {
VLOG(3) << "Preset allocation for value: " << value->ToShortString();
const HeapSimulator::Chunk& chunk = position_and_chunk.second;
auto preset_allocations_iter = preset_allocations.find(value->color());
CHECK(preset_allocations_iter != preset_allocations.end())
<< "No preset value allocation for color " << value->color()
<< " for " << value->ToShortString() << " found.";
preset_allocations_iter->second->AddAssignment(*value, chunk.offset,
chunk.size);
}
assigned_buffers->insert(&buffer);
}
preset_assignments_ = {};
return absl::OkStatus();
}
absl::Status BufferAssigner::AssignBuffersWithSequentialOrdering(
const flat_hash_map<const HloComputation*, flat_hash_set<const HloValue*>>&
buffers_to_assign_sequentially,
bool run_whole_module_heap_simulation, BufferAssignment* assignment,
const PrivateStacks& private_stacks,
GlobalDecreasingSizeBestFitHeap<HloValue>::BufferIntervalCompare
heap_buffer_interval_compare,
std::optional<BufferAssignment::BufferIsolationOptions> isolation_options) {
const HloOrdering& hlo_ordering = assignment->hlo_ordering();
auto get_heap_algorithm =
[&](int64_t alignment) -> std::unique_ptr<HeapAlgorithm<HloValue>> {
if (heap_buffer_interval_compare) {
return std::make_unique<ConstrainedGlobalDecreasingSizeBestFitHeap>(
assignment->multiheap_size_constraint_per_heap(), alignment,
GlobalDecreasingSizeBestFitHeap<HloValue>::kCustom,
heap_buffer_interval_compare);
}
auto algorithms = std::make_unique<
std::vector<std::unique_ptr<HeapAlgorithm<HloValue>>>>();
algorithms->push_back(
std::make_unique<ConstrainedGlobalDecreasingSizeBestFitHeap>(
assignment->multiheap_size_constraint_per_heap(), alignment,
GlobalDecreasingSizeBestFitHeap<HloValue>::kSpatial));
algorithms->push_back(
std::make_unique<ConstrainedGlobalDecreasingSizeBestFitHeap>(
assignment->multiheap_size_constraint_per_heap(), alignment,
GlobalDecreasingSizeBestFitHeap<HloValue>::kTemporal));
return std::make_unique<ChooseBestHeapAlgorithm<HloValue>>(
std::move(algorithms));
};
if (run_whole_module_heap_simulation) {
VLOG(1) << "Running whole-module heap simulation";
HloSchedule schedule(&assignment->module());
flat_hash_set<const HloValue*> all_buffers_to_assign;
for (const auto& pair : buffers_to_assign_sequentially) {
const HloComputation* computation = pair.first;
const flat_hash_set<const HloValue*>& buffers_to_assign = pair.second;
const HloInstructionSequence* instruction_sequence =
hlo_ordering.SequentialOrder(*computation);
CHECK(instruction_sequence != nullptr) << computation->name();
schedule.set_sequence(computation, *instruction_sequence);
all_buffers_to_assign.insert(buffers_to_assign.begin(),
buffers_to_assign.end());
}
auto color_map = SplitBuffersByColor(all_buffers_to_assign);
std::vector<LogicalBuffer::Color> sorted_colors;
sorted_colors.reserve(color_map.size());
for (auto& single_colored_set : color_map) {
auto color = single_colored_set.first;
sorted_colors.emplace(sorted_colors.end(), color);
}
absl::c_sort(sorted_colors);
for (auto color : sorted_colors) {
VLOG(2) << "Simulating heap for color " << color;
int64_t alignment = assignment->color_alignment_(color);
HeapSimulator::Options options;
options.alloc_constants = allocate_buffers_for_constants_;
auto private_stacks_it = private_stacks.find(color);
if (private_stacks_it != private_stacks.end()) {
auto computation_map = SplitBuffersByPrivateStackComputation(
color_map[color], private_stacks_it->second,
assignment->alias_analysis().dataflow_analysis().call_graph());
for (const HloComputation* private_stack_computation :
private_stacks_it->second) {
VLOG(2) << "private stack computation: "
<< private_stack_computation->name();
auto computation_map_it =
computation_map.find(private_stack_computation);
CHECK(computation_map_it != computation_map.end());
options.buffers_to_assign = &computation_map_it->second;
const HloInstructionSequence* instruction_sequence =
hlo_ordering.SequentialOrder(*private_stack_computation);
TF_ASSIGN_OR_RETURN(
HeapSimulator::Result<HloValue> result,
HeapSimulator::Run(
get_heap_algorithm(alignment), *private_stack_computation,
*instruction_sequence, assignment->alias_analysis(),
assignment->buffer_size_, &schedule, options));
AssignBuffersFromHeapSimulator(result, assignment, color,
isolation_options);
}
} else {
options.buffers_to_assign = &color_map[color];
TF_ASSIGN_OR_RETURN(
HeapSimulator::Result<HloValue> result,
HeapSimulator::Run(get_heap_algorithm(alignment),
assignment->module(), schedule,
assignment->alias_analysis(),
assignment->buffer_size_, options));
AssignBuffersFromHeapSimulator(result, assignment, color,
isolation_options);
}
}
} else {
VLOG(1) << "Running per-computation heap simulation";
for (const auto& pair : buffers_to_assign_sequentially) {
const HloComputation* computation = pair.first;
const flat_hash_set<const HloValue*>& buffers_to_assign = pair.second;
const HloInstructionSequence* instruction_sequence =
hlo_ordering.SequentialOrder(*computation);
CHECK(instruction_sequence != nullptr) << computation->name();
auto color_map = SplitBuffersByColor(buffers_to_assign);
std::vector<LogicalBuffer::Color> sorted_colors;
sorted_colors.reserve(color_map.size());
for (auto& single_colored_set : color_map) {
auto color = single_colored_set.first;
sorted_colors.emplace(sorted_colors.end(), color);
}
absl::c_sort(sorted_colors);
for (auto color : sorted_colors) {
VLOG(2) << "Simulating heap for color " << color;
int64_t alignment = assignment->color_alignment_(color);
HeapSimulator::Options options;
options.buffers_to_assign = &color_map[color];
TF_ASSIGN_OR_RETURN(
HeapSimulator::Result<HloValue> result,
HeapSimulator::Run(get_heap_algorithm(alignment), *computation,
*instruction_sequence,
assignment->alias_analysis(),
assignment->buffer_size_, options));
AssignBuffersFromHeapSimulator(result, assignment, color,
isolation_options);
}
}
}
return absl::OkStatus();
}
namespace {
std::vector<const HloValue*> ComputePeakMemoryLogicalBuffers(
const BufferAllocation& allocation, const HeapSimulatorTrace& heap_trace) {
absl::flat_hash_map<BufferValue::Id, const HloValue*> id_to_value;
absl::flat_hash_map<const HloValue*, int64_t> buffer_sizes;
for (const auto& pair : allocation.assigned_buffers()) {
const HloValue* value = pair.first;
const BufferAllocation::OffsetSize& offset_size = pair.second;
id_to_value[value->id()] = value;
buffer_sizes[value] = offset_size.size;
}
VLOG(1) << "Compute peak memory logical buffers";
absl::flat_hash_map<int64_t, int> num_outstanding_shared_buffers;
absl::flat_hash_map<int64_t, int64_t> shared_canonical_ids;
absl::flat_hash_map<int64_t, int64_t> allocated_sizes;
auto memory_delta = [&](const HeapSimulatorTrace::Event& event) -> int64_t {
const HloValue* buffer = id_to_value.at(event.buffer_id());
const int64_t buffer_size = buffer_sizes.at(buffer);
if (event.kind() == HeapSimulatorTrace::Event::ALLOC) {
num_outstanding_shared_buffers[event.buffer_id()] = 1;
allocated_sizes[event.buffer_id()] = buffer_size;
return buffer_size;
} else if (event.kind() == HeapSimulatorTrace::Event::SHARE_WITH) {
shared_canonical_ids[event.buffer_id()] = event.share_with_canonical_id();
if (++num_outstanding_shared_buffers[event.share_with_canonical_id()] ==
1) {
allocated_sizes[event.buffer_id()] = buffer_size;
return buffer_size;
}
allocated_sizes[event.buffer_id()] = 0;
return 0;
} else if (event.kind() == HeapSimulatorTrace::Event::FREE) {
auto shared_canonical_id_it =
shared_canonical_ids.find(event.buffer_id());
int64_t buffer_id = (shared_canonical_id_it == shared_canonical_ids.end())
? event.buffer_id()
: shared_canonical_id_it->second;
--num_outstanding_shared_buffers[buffer_id];
return -1 * allocated_sizes[event.buffer_id()];
}
LOG(FATAL) << "Unknown event kind: " << event.kind();
};
int64_t max_live_size = 0;
int64_t live_size = 0;
for (const auto& event : heap_trace.events()) {
if (!id_to_value.contains(event.buffer_id())) {
continue;
}
live_size += memory_delta(event);
if (max_live_size < live_size) {
max_live_size = live_size;
}
}
absl::flat_hash_set<const HloValue*> live_values;
live_size = 0;
num_outstanding_shared_buffers.clear();
for (const auto& event : heap_trace.events()) {
if (!id_to_value.contains(event.buffer_id())) {
continue;
}
const HloValue* value = id_to_value.at(event.buffer_id());
int64_t delta = memory_delta(event);
if (delta > 0) {
InsertOrDie(&live_values, value);
} else if (delta < 0) {
CHECK(ContainsKey(live_values, value));
live_values.erase(value);
}
live_size += delta;
if (live_size == max_live_size) {
break;
}
}
CHECK_EQ(live_size, max_live_size);
std::vector<const HloValue*> live_values_vector;
live_values_vector.insert(live_values_vector.end(), live_values.begin(),
live_values.end());
absl::c_sort(live_values_vector, [](const HloValue* a, const HloValue* b) {
return a->id() < b->id();
});
VLOG(4) << "Peak memory buffer:";
for (auto value : live_values_vector) {
VLOG(4) << " " << value->ToString();
}
return live_values_vector;
}
}
void BufferAssigner::IsolateHeapBuffers(
std::optional<BufferAssignment::BufferIsolationOptions> isolation_options,
const BufferAssignment* assignment, LogicalBuffer::Color color,
HeapSimulator::Result<HloValue>& result) const {
if (!isolation_options) {
return;
}
result.heap_size = 0;
for (HeapSimulator::HeapResult<HloValue>& heap_result : result.heap_results) {
if (absl::c_find(isolation_options->config.isolation_colors(), color) !=
isolation_options->config.isolation_colors().end()) {
VLOG(1) << "Isolating color: " << color;
int64_t alignment = assignment->color_alignment_(color);
std::vector<const HloValue*> sorted_values;
sorted_values.reserve(heap_result.chunk_map.size());
for (const auto& [value, chunk] : heap_result.chunk_map) {
sorted_values.push_back(value);
}
absl::c_sort(sorted_values, isolation_options->hlo_value_compare);
int64_t isolation_offset =
RoundUpTo(isolation_options->config.base_offset_bytes() +
heap_result.heap_size +
isolation_options->config.isolation_padding_bytes(),
alignment);
int64_t value_index;
for (value_index = 0;
value_index < std::min(static_cast<int64_t>(sorted_values.size()),
isolation_options->config.isolation_fuel());
++value_index) {
const HloValue* value = sorted_values[value_index];
HeapSimulator::Chunk& chunk = heap_result.chunk_map.at(value);
VLOG(1) << "Isolating " << value->ToShortString() << " from "
<< chunk.offset << " to " << isolation_offset;
chunk.offset = isolation_offset;
isolation_offset += RoundUpTo(
chunk.size + isolation_options->config.isolation_padding_bytes(),
alignment);
}
for (; value_index < sorted_values.size(); ++value_index) {
const HloValue* value = sorted_values[value_index];
HeapSimulator::Chunk& chunk = heap_result.chunk_map.at(value);
int64_t new_offset = RoundUpTo(
chunk.offset + isolation_options->config.base_offset_bytes(),
alignment);
VLOG(1) << "Not isolating " << value->ToShortString() << ", from "
<< chunk.offset << " to " << new_offset;
chunk.offset = new_offset;
}
heap_result.heap_size = isolation_offset;
}
result.heap_size += heap_result.heap_size;
}
}
void BufferAssigner::AssignBuffersFromHeapSimulator(
HeapSimulator::Result<HloValue>& result, BufferAssignment* assignment,
BufferValue::Color color,
std::optional<BufferAssignment::BufferIsolationOptions> isolation_options) {
IsolateHeapBuffers(isolation_options, assignment, color, result);
if (assignment->stats_.preallocated_temp_fragmentation_bytes == -1) {
assignment->stats_.preallocated_temp_fragmentation_bytes =
result.fragmentation_size;
} else {
assignment->stats_.preallocated_temp_fragmentation_bytes +=
result.fragmentation_size;
}
VLOG(1) << "Result size from heap simulator: " << result.heap_size;
for (const HeapSimulator::HeapResult<HloValue>& heap_result :
result.heap_results) {
BufferAllocation* allocation =
assignment->NewEmptyAllocation(heap_result.heap_size, color);
for (const auto& [value, chunk] : heap_result.chunk_map) {
assignment->AddAssignment(allocation, *value, chunk.offset, chunk.size);
}
allocation->peak_buffers_ =
ComputePeakMemoryLogicalBuffers(*allocation, result.debug_trace);
XLA_VLOG_LINES(2, allocation->ToString());
allocation->AddHeapTrace(result.debug_trace);
}
}
absl::StatusOr<std::unique_ptr<BufferAssignment>>
BufferAssigner::CreateAssignment(
const HloModule* module, std::unique_ptr<HloOrdering> hlo_ordering,
BufferValue::SizeFunction buffer_size,
LogicalBuffer::AlignmentFunction color_alignment,
HloDataflowAnalysis::CanShareBuffer can_share_buffer,
const PrivateStacks& private_stacks,
GlobalDecreasingSizeBestFitHeap<HloValue>::BufferIntervalCompare
heap_buffer_interval_compare,
std::optional<BufferAssignment::BufferIsolationOptions> isolation_options,
std::optional<BufferValue::Color> temp_buffer_color) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module, can_share_buffer));
HloSchedule schedule(module);
for (const HloComputation* computation : module->computations()) {
const HloInstructionSequence* instruction_sequence =
hlo_ordering->SequentialOrder(*computation);
const bool has_sequential_order = instruction_sequence != nullptr;
if (has_sequential_order) {
schedule.set_sequence(computation, *instruction_sequence);
}
}
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(schedule, *alias_analysis,
module->entry_computation(), true));
VLOG(1) << "Assigning buffers to module " << module->name();
XLA_VLOG_LINES(3, module->ToString());
XLA_VLOG_LINES(3, alias_analysis->ToString());
XLA_VLOG_LINES(3, alias_analysis->dataflow_analysis().ToString());
VLOG(1) << "Number of buffers to assign: "
<< alias_analysis->buffers().size();
std::unique_ptr<BufferAssignment> assignment(new BufferAssignment(
module, std::move(hlo_ordering), std::move(buffer_size),
std::move(color_alignment), std::move(alias_analysis),
std::move(hlo_live_range)));
TF_RETURN_IF_ERROR(
colorer_(&assignment->alias_analysis(), assignment->hlo_ordering()));
VLOG(3) << "After coloring:";
XLA_VLOG_LINES(3,
assignment->alias_analysis().dataflow_analysis().ToString());
std::vector<const HloComputation*> thread_local_computations;
std::vector<const HloComputation*> global_computations;
TF_RETURN_IF_ERROR(GatherComputationsByAllocationType(
module, &thread_local_computations, &global_computations));
flat_hash_map<const HloComputation*, flat_hash_set<const HloValue*>>
buffers_to_assign_sequentially;
TF_RETURN_IF_ERROR(AssignBuffersForComputations(
global_computations,
false, &buffers_to_assign_sequentially,
assignment.get()));
const bool run_whole_module_heap_simulation =
buffers_to_assign_sequentially.size() == global_computations.size();
VLOG(2) << "Running whole module heap simulation: "
<< run_whole_module_heap_simulation;
const int32_t multiheap_size_constraint_per_heap =
module->config().debug_options().xla_multiheap_size_constraint_per_heap();
VLOG(2) << "Multiheap per heap size limit: "
<< multiheap_size_constraint_per_heap;
TF_RETURN_IF_ERROR(AssignBuffersWithSequentialOrdering(
buffers_to_assign_sequentially, run_whole_module_heap_simulation,
assignment.get(), private_stacks, heap_buffer_interval_compare,
isolation_options));
std::vector<const HloComputation*> thread_local_computations_no_fusion;
for (auto* computation : thread_local_computations) {
TF_RET_CHECK(computation != module->entry_computation());
if (computation->IsFusionComputation()) {
continue;
}
thread_local_computations_no_fusion.push_back(computation);
}
TF_RETURN_IF_ERROR(AssignBuffersForComputations(
thread_local_computations_no_fusion,
true,
nullptr, assignment.get()));
for (const HloBuffer* buffer :
assignment->alias_analysis().LiveOutBuffers()) {
VLOG(3) << "maybe_live_out LogicalBuffer: " << *buffer;
if (assignment->HasAllocation(*buffer)) {
BufferAllocation* alloc =
assignment->GetMutableAssignedAllocation(*buffer);
alloc->set_maybe_live_out(true);
VLOG(3) << "maybe_live_out BufferAllocation: " << *alloc;
}
}
absl::flat_hash_set<BufferValue::Color> private_stack_colors;
for (const auto& [color, computations] : private_stacks) {
private_stack_colors.insert(color);
}
assignment->CombineTempAllocations(private_stack_colors, temp_buffer_color);
XLA_VLOG_LINES(2, assignment->ToString());
TF_RETURN_IF_ERROR(assignment->ComputeSummaryStats());
XLA_VLOG_LINES(1, assignment->GetStats().ToString());
VLOG(1) << "Buffer assignment done.";
return std::move(assignment);
}
} | #include "xla/service/buffer_assignment.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/buffer_value.h"
#include "xla/service/call_graph.h"
#include "xla/service/copy_insertion.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_value.h"
#include "xla/service/logical_buffer.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using memory_space_assignment::PresetAssignments;
using ::testing::UnorderedElementsAre;
class InstructionListVisitor : public DfsHloVisitorWithDefault {
public:
explicit InstructionListVisitor(const HloInstruction* root) : root_(root) {}
absl::Status DefaultAction(HloInstruction* hlo) override {
instructions_.push_back(hlo);
VLOG(0) << "List instruction " << hlo->ToString();
return absl::OkStatus();
}
std::vector<const HloInstruction*> GetInstructions() { return instructions_; }
private:
const HloInstruction* root_;
std::vector<const HloInstruction*> instructions_;
InstructionListVisitor(const InstructionListVisitor&) = delete;
InstructionListVisitor& operator=(const InstructionListVisitor&) = delete;
};
const std::vector<const HloInstruction*> GetInstructions(HloInstruction* root) {
InstructionListVisitor main_list(root);
TF_CHECK_OK(root->Accept(&main_list));
return main_list.GetInstructions();
}
class BufferAssignmentTest : public HloTestBase {
protected:
~BufferAssignmentTest() override {}
std::unique_ptr<BufferAssignment> RunBufferAssignment(HloModule* module,
int64_t alignment = 1) {
return BufferAssigner::Run(
module, std::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
true)
.value();
}
absl::StatusOr<std::unique_ptr<BufferAssignment>> ConvertToProtoAndBack(
const BufferAssignment* buffers, const HloModule* module) {
auto proto = buffers->ToProto();
return BufferAssignment::FromProto(
proto, module, backend().compiler()->BufferSizeBytesFunction(),
nullptr);
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentWithSequentialOrdering(
HloModule* module, int64_t alignment = 1,
BufferAssigner::Colorer colorer = BufferAssigner::DefaultColorer(),
const BufferAssigner::PrivateStacks& private_stacks = {},
std::optional<BufferAssignment::BufferIsolationOptions>
isolation_options = std::nullopt) {
return BufferAssigner::Run(
module,
std::make_unique<SequentialHloOrdering>(module->schedule()),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
true, colorer,
std::nullopt, nullptr,
{}, private_stacks,
nullptr, isolation_options)
.value();
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentNoBuffersForConstants(
HloModule* module, int64_t alignment = 1) {
return BufferAssigner::Run(
module, std::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
false)
.value();
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentNoBuffersReuseForAdd(
HloModule* module, int64_t alignment = 1) {
auto must_not_live_out = [](const HloAliasAnalysis& alias_analysis,
const HloInstruction* instruction,
const ShapeIndex&) {
return instruction->opcode() == HloOpcode::kAdd;
};
return BufferAssigner::Run(
module, std::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
false,
BufferAssigner::DefaultColorer(),
must_not_live_out)
.value();
}
std::unique_ptr<BufferAssignment> RunColoredBufferAssignment(
HloModule* module, BufferAssigner::Colorer colorer,
int64_t alignment = 1) {
return BufferAssigner::Run(
module, std::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
true, std::move(colorer))
.value();
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentWithInstructionSequence(
HloModule* module, absl::Span<HloInstruction* const> instruction_sequence,
int64_t alignment = 1) {
HloSchedule schedule(module);
schedule.set_sequence(module->entry_computation(), instruction_sequence);
return BufferAssigner::Run(
module, std::make_unique<SequentialHloOrdering>(schedule),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
true)
.value();
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentWithPresetAssignments(
HloModule* module, std::unique_ptr<PresetAssignments> preset_assignments,
int64_t alignment = 1) {
return BufferAssigner::Run(
module, std::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
true,
BufferAssigner::DefaultColorer(),
std::nullopt,
nullptr, std::move(preset_assignments))
.value();
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentWithIsolationOptions(
HloModule* module, std::optional<BufferAssignment::BufferIsolationOptions>
isolation_options = std::nullopt) {
return BufferAssigner::Run(
module,
std::make_unique<SequentialHloOrdering>(module->schedule()),
backend().compiler()->BufferSizeBytesFunction(),
[](LogicalBuffer::Color) { return 1; },
true,
BufferAssigner::DefaultColorer(),
std::nullopt, nullptr,
{}, {},
nullptr, isolation_options)
.value();
}
std::unique_ptr<HloComputation> BuildMapComputationPlus1(
const std::string& name) {
auto builder = HloComputation::Builder(name);
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x"));
auto value = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param, value));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildReduceComputation(
const std::string& name) {
auto builder = HloComputation::Builder(name);
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x"));
auto param2 =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "y"));
builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param, param2));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildWhileConditionComputation(
const std::string& name) {
auto builder = HloComputation::Builder(name);
auto const4 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(4)));
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v4_, "x"));
auto index = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(const4->shape(), param, 0));
builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), index,
const4, ComparisonDirection::kLt));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildWhileBodyComputation(
const std::string& name) {
auto builder = HloComputation::Builder(name);
auto const1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1)));
auto constv = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v4_, "x"));
auto indexc = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(const1->shape(), param, 0));
auto addc = builder.AddInstruction(HloInstruction::CreateBinary(
indexc->shape(), HloOpcode::kAdd, indexc, const1));
auto indexv = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(constv->shape(), param, 1));
auto addv = builder.AddInstruction(HloInstruction::CreateBinary(
constv->shape(), HloOpcode::kAdd, indexv, constv));
builder.AddInstruction(HloInstruction::CreateTuple({addc, addv}));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildR0F32UnaryOpComputation(
HloOpcode opcode, const std::string& name) {
auto builder = HloComputation::Builder(name);
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x"));
builder.AddInstruction(HloInstruction::CreateUnary(r0f32_, opcode, param));
return builder.Build();
}
const BufferAllocation& GetAssignedInputAllocation(
const BufferAssignment& buffers, HloInstruction* hlo) {
LOG(INFO) << "Checking input: " << hlo->ToString();
const BufferAllocation& buffer =
*buffers.GetUniqueTopLevelSlice(hlo).value().allocation();
EXPECT_EQ(hlo->parameter_number(), buffer.parameter_number());
return buffer;
}
const BufferAllocation& GetAssignedOutputAllocation(
const BufferAssignment& buffers, HloInstruction* hlo) {
LOG(INFO) << "Checking output: " << hlo->ToString();
const BufferAllocation& buffer = GetTopLevelAllocation(buffers, hlo);
return buffer;
}
const BufferAllocation& GetAllocation(const BufferAssignment& buffers,
const HloInstruction* hlo,
const ShapeIndex& index) {
return *buffers.GetUniqueSlice(hlo, index).value().allocation();
}
const BufferAllocation& GetTopLevelAllocation(const BufferAssignment& buffers,
const HloInstruction* hlo) {
return *buffers.GetUniqueTopLevelSlice(hlo).value().allocation();
}
int64_t ValidateBuffers(
const std::vector<const HloInstruction*>& instructions,
const BufferAssignment& buffers) {
for (const HloInstruction* hlo : instructions) {
if (!buffers.HasTopLevelAllocation(hlo)) {
EXPECT_TRUE(HloOpcode::kConstant == hlo->opcode() ||
HloOpcode::kParameter == hlo->opcode());
continue;
}
}
int64_t total_size = 0;
for (auto& allocation : buffers.Allocations()) {
total_size += allocation.size();
}
return total_size;
}
Shape s32_ = ShapeUtil::MakeShape(xla::S32, {});
Shape r0f32_ = ShapeUtil::MakeShape(xla::F32, {});
Shape f32vec4_ = ShapeUtil::MakeShape(F32, {4});
Shape f32vec10_ = ShapeUtil::MakeShape(F32, {10});
Shape f32vec100_ = ShapeUtil::MakeShape(F32, {100});
Shape f32a100x10_ = ShapeUtil::MakeShape(F32, {100, 10});
Shape t_s32_f32v4_ = ShapeUtil::MakeTupleShape({s32_, f32vec4_});
Shape t_s32_f32v10_ = ShapeUtil::MakeTupleShape({s32_, f32vec10_});
};
static bool BuffersDistinct(const std::vector<const HloInstruction*>& a,
const std::vector<const HloInstruction*>& b,
const BufferAssignment& assignment) {
absl::flat_hash_set<BufferAllocation::Slice> a_slices;
for (const HloInstruction* instruction : a) {
if (assignment.HasTopLevelAllocation(instruction)) {
a_slices.insert(assignment.GetUniqueTopLevelSlice(instruction).value());
}
}
for (const HloInstruction* instruction : b) {
if (assignment.HasTopLevelAllocation(instruction)) {
if (a_slices.contains(
assignment.GetUniqueTopLevelSlice(instruction).value())) {
return false;
}
}
}
return true;
}
TEST_F(BufferAssignmentTest, ScalarConstant) {
auto builder = HloComputation::Builder(TestName());
auto const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
{
auto buffers = RunBufferAssignment(module.get());
EXPECT_TRUE(buffers->HasTopLevelAllocation(const0));
}
{
auto buffers = RunBufferAssignmentNoBuffersForConstants(module.get());
EXPECT_FALSE(buffers->HasTopLevelAllocation(const0));
}
}
TEST_F(BufferAssignmentTest, BufferForConst) {
auto builder = HloComputation::Builder(TestName());
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
auto const1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({4.1f, 4.2f, 4.3f, 4.4f})));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, const0, const1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
{
auto buffers = RunBufferAssignment(module.get());
EXPECT_TRUE(buffers->HasTopLevelAllocation(const0));
EXPECT_TRUE(buffers->HasTopLevelAllocation(const1));
GetAssignedOutputAllocation(*buffers, add);
}
{
auto buffers = RunBufferAssignmentNoBuffersForConstants(module.get());
EXPECT_FALSE(buffers->HasTopLevelAllocation(const0));
EXPECT_FALSE(buffers->HasTopLevelAllocation(const1));
GetAssignedOutputAllocation(*buffers, add);
}
}
TEST_F(BufferAssignmentTest, HasAllocationAt) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "param0"));
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1)));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({negate, param0, constant}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers = RunBufferAssignment(module.get());
EXPECT_EQ(buffers->HasTopLevelAllocation(tuple),
buffers->HasAllocationAt(tuple, {}));
EXPECT_EQ(buffers->HasTopLevelAllocation(negate),
buffers->HasAllocationAt(tuple, {0}));
EXPECT_EQ(buffers->HasTopLevelAllocation(param0),
buffers->HasAllocationAt(tuple, {1}));
EXPECT_EQ(buffers->HasTopLevelAllocation(constant),
buffers->HasAllocationAt(tuple, {2}));
}
TEST_F(BufferAssignmentTest, BufferForOutputConst) {
auto builder = HloComputation::Builder(TestName());
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(const0->shape(), HloOpcode::kCopy, const0));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers = RunBufferAssignment(module.get());
GetAssignedOutputAllocation(*buffers, copy);
}
TEST_F(BufferAssignmentTest, Basic) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers_orig = RunBufferAssignment(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffers,
ConvertToProtoAndBack(buffers_orig.get(), module.get()));
BufferAllocation paramscalar_buffer =
GetAssignedInputAllocation(*buffers, paramscalar);
BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);
BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);
EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());
EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());
EXPECT_NE(param0_buffer.index(), param1_buffer.index());
const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);
EXPECT_NE(mul_buffer.index(), param0_buffer.index());
const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);
EXPECT_EQ(add_buffer.index(), mul_buffer.index());
GetAssignedOutputAllocation(*buffers, sub);
}
TEST_F(BufferAssignmentTest, BasicToFromProto) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers_orig = RunBufferAssignment(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffers_from_proto,
ConvertToProtoAndBack(buffers_orig.get(), module.get()));
const HloDataflowAnalysis& dataflow_orig = buffers_orig->dataflow_analysis();
const HloDataflowAnalysis& dataflow_proto =
buffers_from_proto->dataflow_analysis();
EXPECT_EQ(buffers_orig->Allocations().size(),
buffers_from_proto->Allocations().size());
for (BufferValue::Id id = 0; id < dataflow_orig.values().size(); id++) {
auto& orig_value = dataflow_orig.values().at(id);
if (buffers_orig->HasAllocation(*orig_value)) {
auto& value_proto = dataflow_proto.GetUniqueValueAt(
orig_value->instruction(), orig_value->index());
EXPECT_TRUE(buffers_from_proto->HasAllocation(value_proto));
EXPECT_EQ(orig_value->color(), value_proto.color());
EXPECT_EQ(buffers_orig->GetAssignedAllocation(*orig_value).index(),
buffers_from_proto->GetAssignedAllocation(value_proto).index());
}
}
}
TEST_F(BufferAssignmentTest, AliasedParamCanBeReused) {
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "p0"));
auto neg_1 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param));
auto neg_2 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, neg_1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK(module->input_output_alias_config().SetUpAlias({}, 0, {}));
auto buffers_orig = RunBufferAssignment(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffers,
ConvertToProtoAndBack(buffers_orig.get(), module.get()));
BufferAllocation param_buffer = GetAssignedInputAllocation(*buffers, param);
BufferAllocation neg_1_buffer = GetAllocation(*buffers, neg_1, {});
BufferAllocation neg_2_buffer = GetAllocation(*buffers, neg_2, {});
EXPECT_EQ(param_buffer.index(), neg_1_buffer.index());
EXPECT_EQ(neg_2_buffer.index(), neg_1_buffer.index());
}
TEST_F(BufferAssignmentTest, AddCannotReuse) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers_orig = RunBufferAssignmentNoBuffersReuseForAdd(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffers,
ConvertToProtoAndBack(buffers_orig.get(), module.get()));
BufferAllocation paramscalar_buffer =
GetAssignedInputAllocation(*buffers, paramscalar);
BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);
BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);
EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());
EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());
EXPECT_NE(param0_buffer.index(), param1_buffer.index());
const BufferAllocation& sub_buffer = GetTopLevelAllocation(*buffers, sub);
EXPECT_NE(sub_buffer.index(), param0_buffer.index());
const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);
EXPECT_NE(add_buffer.index(), sub_buffer.index());
GetAssignedOutputAllocation(*buffers, sub);
}
TEST_F(BufferAssignmentTest, BasicUniquelyColored) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
absl::flat_hash_map<const HloInstruction*, int> color_map;
auto colorer = [&](HloAliasAnalysis* alias_analysis, const HloOrdering&) {
int color = 0;
for (HloValue::Id id = 0;
id < alias_analysis->dataflow_analysis().values().size(); id++) {
auto& value = alias_analysis->dataflow_analysis().GetValue(id);
color_map[value.defining_instruction()] = color;
value.set_color(BufferValue::Color(color++));
}
return absl::OkStatus();
};
auto buffers = RunColoredBufferAssignment(module.get(), colorer);
BufferAllocation paramscalar_buffer =
GetAssignedInputAllocation(*buffers, paramscalar);
BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);
BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);
EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());
EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());
EXPECT_NE(param0_buffer.index(), param1_buffer.index());
const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);
EXPECT_NE(mul_buffer.index(), param0_buffer.index());
const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);
EXPECT_NE(add_buffer.index(), mul_buffer.index());
GetAssignedOutputAllocation(*buffers, sub);
EXPECT_EQ(param0->shape().layout().memory_space(), color_map[param0]);
EXPECT_EQ(param1->shape().layout().memory_space(), color_map[param1]);
EXPECT_EQ(mul->shape().layout().memory_space(), color_map[mul]);
EXPECT_EQ(add->shape().layout().memory_space(), color_map[add]);
EXPECT_EQ(sub->shape().layout().memory_space(), color_map[sub]);
}
TEST_F(BufferAssignmentTest, BasicPartiallyColored) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto colorer = [](HloAliasAnalysis* alias_analysis, const HloOrdering&) {
for (HloValue::Id id = 0;
id < alias_analysis->dataflow_analysis().values().size(); id++) {
auto& value = alias_analysis->dataflow_analysis().GetValue(id);
auto& buffer = alias_analysis->GetBufferContainingValue(value);
for (const auto& alias : buffer.values()) {
if (alias->instruction()->opcode() == HloOpcode::kAdd ||
alias->instruction()->opcode() == HloOpcode::kMultiply) {
value.set_color(LogicalBuffer::Color(1));
}
}
if (!value.has_color()) {
value.set_color(LogicalBuffer::Color(0));
}
}
return absl::OkStatus();
};
auto buffers = RunColoredBufferAssignment(module.get(), colorer);
BufferAllocation paramscalar_buffer =
GetAssignedInputAllocation(*buffers, paramscalar);
BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);
BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);
EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());
EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());
EXPECT_NE(param0_buffer.index(), param1_buffer.index());
const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);
EXPECT_NE(mul_buffer.index(), param0_buffer.index());
const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);
EXPECT_EQ(add_buffer.index(), mul_buffer.index());
GetAssignedOutputAllocation(*buffers, sub);
EXPECT_EQ(mul->shape().layout().memory_space(), 1);
EXPECT_EQ(add->shape().layout().memory_space(), 1);
EXPECT_EQ(sub->shape().layout().memory_space(), 0);
EXPECT_EQ(param0->shape().layout().memory_space(), 0);
EXPECT_EQ(param1->shape().layout().memory_space(), 0);
}
TEST_F(BufferAssignmentTest, PresetAssignments) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
Shape f32vec100_color1 = ShapeUtil::MakeShapeWithDenseLayout(
F32, {100}, {0}, {}, 1,
0,
1);
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_color1, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_color1, HloOpcode::kAdd, mul, param1));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto preset_assignments = std::make_unique<PresetAssignments>();
preset_assignments->add_chunk({mul, {}},
HeapSimulator::Chunk::FromOffsetSize(100, 400));
preset_assignments->add_chunk({add, {}},
HeapSimulator::Chunk::FromOffsetSize(550, 400));
preset_assignments->assignment_information_for_space(1)
->size = 950;
auto buffers = RunBufferAssignmentWithPresetAssignments(
module.get(), std::move(preset_assignments));
BufferAllocation paramscalar_buffer =
GetAssignedInputAllocation(*buffers, paramscalar);
BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);
BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);
EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());
EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());
EXPECT_EQ(paramscalar_buffer.color(), LogicalBuffer::Color(0));
EXPECT_NE(param0_buffer.index(), param1_buffer.index());
EXPECT_EQ(param0_buffer.color(), LogicalBuffer::Color(0));
const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);
const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);
EXPECT_EQ(mul_buffer, add_buffer);
EXPECT_NE(mul_buffer.index(), param0_buffer.index());
EXPECT_EQ(mul_buffer.color(), LogicalBuffer::Color(1));
EXPECT_EQ(mul_buffer.assigned_buffers().size(), 2);
for (const auto& value_and_offsetsize : mul_buffer.assigned_buffers()) {
if (value_and_offsetsize.first->instruction() == mul) {
EXPECT_EQ(value_and_offsetsize.second.offset, 100);
EXPECT_EQ(value_and_offsetsize.second.size, 400);
} else {
EXPECT_EQ(value_and_offsetsize.first->instruction(), add);
EXPECT_EQ(value_and_offsetsize.second.offset, 550);
EXPECT_EQ(value_and_offsetsize.second.size, 400);
}
}
GetAssignedOutputAllocation(*buffers, sub);
}
TEST_F(BufferAssignmentTest, PresetAssignmentsWhile) {
auto module = CreateNewVerifiedModule();
Shape f32vec10_color1 = ShapeUtil::MakeShapeWithDenseLayout(
F32, {10}, {0}, {}, 1,
0,
1);
Shape t_s32_f32v10_color1 =
ShapeUtil::MakeTupleShape({s32_, f32vec10_color1});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v10_color1, "cond_param"));
HloInstruction* cond_iter = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(s32_, cond_param, 0));
HloInstruction* cond_limit = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(50)));
cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,
cond_limit, ComparisonDirection::kLt));
HloComputation* cond_computation =
module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v10_color1, "body_param"));
HloInstruction* body_iter = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(s32_, body_param, 0));
HloInstruction* body_data = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32vec10_color1, body_param, 1));
HloInstruction* body_data_increment = body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f})));
HloInstruction* body_data_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
f32vec10_color1, HloOpcode::kAdd, body_data, body_data_increment));
HloInstruction* body_iter_increment = body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1)));
HloInstruction* body_iter_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
s32_, HloOpcode::kAdd, body_iter, body_iter_increment));
body_builder.AddInstruction(
HloInstruction::CreateTuple({body_iter_next, body_data_next}));
HloComputation* body_computation =
module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* iter = builder.AddInstruction(
HloInstruction::CreateParameter(0, s32_, "param_iter"));
HloInstruction* data = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec10_, "param_data"));
HloInstruction* negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec10_color1, HloOpcode::kNegate, data));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({iter, negate}));
HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(
t_s32_f32v10_color1, cond_computation, body_computation, tuple));
HloInstruction* while_data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32vec10_color1, while_op, 1));
builder.AddInstruction(HloInstruction::CreateBinary(
f32vec10_, HloOpcode::kAdd, while_data, data));
module->AddEntryComputation(builder.Build());
auto preset_assignments = std::make_unique<PresetAssignments>();
preset_assignments->add_chunk({negate, {}},
HeapSimulator::Chunk::FromOffsetSize(100, 40));
preset_assignments->assignment_information_for_space(1)
->size = 140;
auto buffers_orig = RunBufferAssignmentWithPresetAssignments(
module.get(), std::move(preset_assignments));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffers,
ConvertToProtoAndBack(buffers_orig.get(), module.get()));
const BufferAllocation& data_buffer = GetTopLevelAllocation(*buffers, negate);
EXPECT_EQ(data_buffer.assigned_buffers().size(), 5);
for (const auto& value_and_offsetsize : data_buffer.assigned_buffers()) {
EXPECT_EQ(value_and_offsetsize.second.offset, 100);
EXPECT_EQ(value_and_offsetsize.second.size, 40);
EXPECT_EQ(value_and_offsetsize.first->color(), LogicalBuffer::Color(1));
}
}
TEST_F(BufferAssignmentTest, MultipleUsersForNode) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
auto sub = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kSubtract, add, mul));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers_orig = RunBufferAssignment(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffers,
ConvertToProtoAndBack(buffers_orig.get(), module.get()));
BufferAllocation paramscalar_buffer =
GetAssignedInputAllocation(*buffers, paramscalar);
BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);
BufferAllocation param1_index = GetAssignedInputAllocation(*buffers, param1);
EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());
EXPECT_NE(paramscalar_buffer.index(), param1_index.index());
EXPECT_NE(param0_buffer.index(), param1_index.index());
const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);
const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);
EXPECT_NE(add_buffer.index(), mul_buffer.index());
const std::vector<const HloInstruction*> level0 = GetInstructions(sub);
int64_t size0 = ValidateBuffers(level0, *buffers);
LOG(INFO) << "LogicalBuffer count " << buffers->Allocations().size()
<< " for " << level0.size() << " instructions; "
<< "total buffer size " << size0;
}
TEST_F(BufferAssignmentTest, TrivialMap) {
auto module = CreateNewVerifiedModule();
auto map_computation =
module->AddEmbeddedComputation(BuildMapComputationPlus1("f32+1"));
auto inner_last = map_computation->root_instruction();
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32a100x10_, "p"));
auto map = builder.AddInstruction(
HloInstruction::CreateMap(f32a100x10_, {param0}, map_computation));
module->AddEntryComputation(builder.Build());
const std::vector<const HloInstruction*> level0 = GetInstructions(map);
EXPECT_EQ(2, level0.size()) << "Invalid main kernel size";
const std::vector<const HloInstruction*> level1 = GetInstructions(inner_last);
EXPECT_EQ(3, level1.size()) << "Invalid nested add+1 size";
auto buffers = RunBufferAssignment(module.get());
int64_t size0 = ValidateBuffers(level0, *buffers);
int64_t size1 = ValidateBuffers(level1, *buffers);
EXPECT_TRUE(BuffersDistinct(level0, level1, *buffers))
<< "Reuse between main kernel and embedded mapping.";
BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);
BufferAllocation map_buffer = GetAssignedOutputAllocation(*buffers, map);
EXPECT_NE(param0_buffer.index(), map_buffer.index());
EXPECT_EQ(HloOpcode::kAdd, inner_last->opcode());
const BufferAllocation& inner_add_buffer =
GetTopLevelAllocation(*buffers, inner_last);
EXPECT_NE(inner_add_buffer.index(), map_buffer.index());
LOG(INFO) << "LogicalBuffer count " << buffers->Allocations().size()
<< " for " << level0.size() + level1.size() << " instructions; "
<< "total buffer size " << size0 + size1;
}
TEST_F(BufferAssignmentTest, CannotReuseInputBufferOfReduce) {
auto module = CreateNewVerifiedModule();
auto reduce_computation =
module->AddEmbeddedComputation(BuildReduceComputation("f32+f32"));
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32a100x10_, "p"));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(f32a100x10_, HloOpcode::kExp, param0));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(f32a100x10_, HloOpcode::kExp, exp1));
auto const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(
f32vec10_,
exp2,
const0,
{0}, reduce_computation));
auto exp3 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec10_, HloOpcode::kExp, reduce));
module->AddEntryComputation(builder.Build());
auto buffers = RunBufferAssignment(module.get());
const std::vector<const HloInstruction*> instrs = GetInstructions(exp3);
ValidateBuffers(instrs, *buffers);
const BufferAllocation& exp1_buffer = GetTopLevelAllocation(*buffers, exp1);
const BufferAllocation& exp2_buffer = GetTopLevelAllocation(*buffers, exp2);
const BufferAllocation& reduce_buffer =
GetTopLevelAllocation(*buffers, reduce);
EXPECT_EQ(exp1_buffer.index(), exp2_buffer.index());
EXPECT_NE(exp2_buffer.index(), reduce_buffer.index());
}
TEST_F(BufferAssignmentTest, ExampleWhile) {
auto module = CreateNewVerifiedModule();
auto condition_computation =
module->AddEmbeddedComputation(BuildWhileConditionComputation("if<4"));
auto body_computation =
module->AddEmbeddedComputation(BuildWhileBodyComputation("add-update"));
auto builder = HloComputation::Builder(TestName());
auto const3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(0)));
auto const4 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({const3, const4}));
auto while_op = builder.AddInstruction(HloInstruction::CreateWhile(
t_s32_f32v4_, condition_computation, body_computation, tuple));
module->AddEntryComputation(builder.Build());
const std::vector<const HloInstruction*> level0 = GetInstructions(while_op);
EXPECT_EQ(4, level0.size()) << "Invalid while kernel size";
const std::vector<const HloInstruction*> levelc =
GetInstructions(condition_computation->root_instruction());
EXPECT_EQ(4, levelc.size()) << "Invalid nested condition size";
const std::vector<const HloInstruction*> levelb =
GetInstructions(body_computation->root_instruction());
EXPECT_EQ(8, levelb.size()) << "Invalid nested body size";
auto buffers = RunBufferAssignment(module.get());
int64_t size0 = ValidateBuffers(level0, *buffers);
int64_t sizec = ValidateBuffers(levelc, *buffers);
int64_t sizeb = ValidateBuffers(levelb, *buffers);
EXPECT_FALSE(BuffersDistinct(level0, levelc, *buffers))
<< "Should be reuse between main kernel and embedded condition.";
EXPECT_FALSE(BuffersDistinct(levelb, levelc, *buffers))
<< "Should be reuse between embedded condition and body.";
EXPECT_FALSE(BuffersDistinct(level0, levelb, *buffers))
<< "Should be reuse between main kernel and embedded body.";
HloInstruction* body_root = body_computation->root_instruction();
EXPECT_EQ(HloOpcode::kTuple, body_root->opcode());
ShapeUtil::ForEachSubshape(
while_op->shape(),
[this, &buffers, while_op, body_root](const Shape& ,
const ShapeIndex& index) {
auto while_op_allocation = GetAllocation(*buffers, while_op, index);
auto body_root_allocation = GetAllocation(*buffers, body_root, index);
EXPECT_EQ(while_op_allocation.index(), body_root_allocation.index());
});
LOG(INFO) << "LogicalBuffer count " << buffers->Allocations().size()
<< " for " << level0.size() + levelc.size() + levelb.size()
<< " instructions; total buffer size " << size0 + sizec + sizeb;
}
TEST_F(BufferAssignmentTest, ExampleConditional) {
auto module = CreateNewVerifiedModule();
auto true_computation = module->AddEmbeddedComputation(
BuildR0F32UnaryOpComputation(HloOpcode::kCeil, "Ceil"));
auto false_computation = module->AddEmbeddedComputation(
BuildR0F32UnaryOpComputation(HloOpcode::kFloor, "Floor"));
auto builder = HloComputation::Builder(TestName());
auto pred = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto const1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(56.4f)));
auto const2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(12.4f)));
auto conditional = builder.AddInstruction(HloInstruction::CreateConditional(
r0f32_, pred, const1, true_computation, const2, false_computation));
module->AddEntryComputation(builder.Build());
const std::vector<const HloInstruction*> conditional_instrs =
GetInstructions(conditional);
const std::vector<const HloInstruction*> true_instrs =
GetInstructions(true_computation->root_instruction());
const std::vector<const HloInstruction*> false_instrs =
GetInstructions(false_computation->root_instruction());
EXPECT_EQ(4, conditional_instrs.size());
EXPECT_EQ(2, true_instrs.size());
EXPECT_EQ(2, false_instrs.size());
auto buffers = RunBufferAssignment(module.get());
ValidateBuffers(conditional_instrs, *buffers);
ValidateBuffers(true_instrs, *buffers);
ValidateBuffers(false_instrs, *buffers);
EXPECT_FALSE(BuffersDistinct(conditional_instrs, true_instrs, *buffers))
<< "Should be reuse between conditional and true computation.";
EXPECT_FALSE(BuffersDistinct(conditional_instrs, false_instrs, *buffers))
<< "Should be reuse between conditional and false computation.";
EXPECT_FALSE(BuffersDistinct(true_instrs, false_instrs, *buffers))
<< "Should be reuse between true and false computations.";
const BufferAllocation& conditional_buffer =
GetTopLevelAllocation(*buffers, conditional);
const BufferAllocation& true_buffer =
GetTopLevelAllocation(*buffers, true_computation->root_instruction());
const BufferAllocation& false_buffer =
GetTopLevelAllocation(*buffers, false_computation->root_instruction());
EXPECT_EQ(conditional_buffer.size(), true_buffer.size());
EXPECT_EQ(conditional_buffer.size(), false_buffer.size());
}
TEST_F(BufferAssignmentTest, UnaryOpReuseChain) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "p"));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kExp, param0));
auto tanh = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kTanh, exp1));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kExp, tanh));
auto neg = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, exp2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_TRUE(assignment->HasTopLevelAllocation(exp1));
auto& buffer_for_exp1 = GetTopLevelAllocation(*assignment, exp1);
EXPECT_EQ(buffer_for_exp1, GetTopLevelAllocation(*assignment, tanh));
EXPECT_EQ(buffer_for_exp1, GetTopLevelAllocation(*assignment, exp2));
EXPECT_EQ(buffer_for_exp1, GetTopLevelAllocation(*assignment, neg));
}
TEST_F(BufferAssignmentTest, ReuseNonOperandBuffer) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "param0"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));
auto slice = builder.AddInstruction(
HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1}));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32a100x10_, slice, {1}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_TRUE(assignment->HasTopLevelAllocation(broadcast));
auto& buffer_for_bcast = GetTopLevelAllocation(*assignment, broadcast);
EXPECT_EQ(buffer_for_bcast, GetTopLevelAllocation(*assignment, negate));
EXPECT_NE(buffer_for_bcast, GetTopLevelAllocation(*assignment, slice));
}
TEST_F(BufferAssignmentTest, NoReuseLiveBuffer) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "param0"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));
auto slice = builder.AddInstruction(
HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1}));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32a100x10_, slice, {1}));
builder.AddInstruction(HloInstruction::CreateTuple({negate, broadcast}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment_orig = RunBufferAssignment(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> assignment,
ConvertToProtoAndBack(assignment_orig.get(), module.get()));
EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),
GetTopLevelAllocation(*assignment, negate));
EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),
GetTopLevelAllocation(*assignment, slice));
EXPECT_NE(GetTopLevelAllocation(*assignment, negate),
GetTopLevelAllocation(*assignment, slice));
}
TEST_F(BufferAssignmentTest, NoReuseAliasedBuffer) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "param0"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({negate}));
auto tuple_element = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32vec100_, tuple, 0));
auto slice = builder.AddInstruction(
HloInstruction::CreateSlice(f32vec10_, tuple_element, {0}, {10}, {1}));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32a100x10_, slice, {1}));
builder.AddInstruction(HloInstruction::CreateTuple({tuple, broadcast}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment_orig = RunBufferAssignment(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> assignment,
ConvertToProtoAndBack(assignment_orig.get(), module.get()));
EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),
GetTopLevelAllocation(*assignment, negate));
EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),
GetTopLevelAllocation(*assignment, slice));
EXPECT_NE(GetTopLevelAllocation(*assignment, negate),
GetTopLevelAllocation(*assignment, slice));
}
TEST_F(BufferAssignmentTest, DoNotReuseOversizedOutputBuffer) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "param0"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));
auto slice = builder.AddInstruction(
HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1}));
auto broadcast = builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {10, 4}), slice, {0}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),
GetTopLevelAllocation(*assignment, negate));
EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),
GetTopLevelAllocation(*assignment, slice));
}
TEST_F(BufferAssignmentTest, ReuseOutputBufferIfExactlySized) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "param0"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));
auto slice = builder.AddInstruction(
HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1}));
auto broadcast = builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {10, 10}), slice, {0}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_TRUE(assignment->HasTopLevelAllocation(broadcast));
auto& buffer_for_bcast = GetTopLevelAllocation(*assignment, broadcast);
EXPECT_EQ(buffer_for_bcast, GetTopLevelAllocation(*assignment, negate));
EXPECT_NE(buffer_for_bcast, GetTopLevelAllocation(*assignment, slice));
}
TEST_F(BufferAssignmentTest, DoNotReuseOversizedOutputBufferInTuple) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "param0"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));
auto slice = builder.AddInstruction(
HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1}));
auto broadcast = builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {10, 4}), slice, {0}));
builder.AddInstruction(HloInstruction::CreateTuple({broadcast}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),
GetTopLevelAllocation(*assignment, negate));
EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),
GetTopLevelAllocation(*assignment, slice));
}
TEST_F(BufferAssignmentTest, EmbeddedComputationBuffers) {
auto module = CreateNewVerifiedModule();
auto vec_shape = ShapeUtil::MakeShape(F32, {42});
auto scalar_shape = ShapeUtil::MakeShape(F32, {});
auto map_builder = HloComputation::Builder(TestName() + "_map");
auto map_param = map_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "map_param"));
auto map_root = map_builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, map_param));
auto map_computation = module->AddEmbeddedComputation(map_builder.Build());
auto call_builder = HloComputation::Builder(TestName() + "_call");
auto call_param = call_builder.AddInstruction(
HloInstruction::CreateParameter(0, vec_shape, "vec_param"));
auto call_root = call_builder.AddInstruction(
HloInstruction::CreateUnary(vec_shape, HloOpcode::kExp, call_param));
auto call_computation = module->AddEmbeddedComputation(call_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vec_shape, "param"));
auto call = builder.AddInstruction(
HloInstruction::CreateCall(vec_shape, {param}, call_computation));
auto map = builder.AddInstruction(
HloInstruction::CreateMap(vec_shape, {call}, map_computation));
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
auto& map_param_alloc = GetTopLevelAllocation(*assignment, map_param);
EXPECT_FALSE(map_param_alloc.is_entry_computation_parameter());
EXPECT_FALSE(map_param_alloc.maybe_live_out());
EXPECT_TRUE(map_param_alloc.is_thread_local());
auto& map_root_alloc = GetTopLevelAllocation(*assignment, map_root);
EXPECT_FALSE(map_root_alloc.is_entry_computation_parameter());
EXPECT_FALSE(map_root_alloc.maybe_live_out());
EXPECT_TRUE(map_root_alloc.is_thread_local());
auto& call_param_alloc = GetTopLevelAllocation(*assignment, call_param);
EXPECT_TRUE(call_param_alloc.is_entry_computation_parameter());
EXPECT_FALSE(call_param_alloc.maybe_live_out());
EXPECT_FALSE(call_param_alloc.is_thread_local());
auto& call_root_alloc = GetTopLevelAllocation(*assignment, call_root);
EXPECT_FALSE(call_root_alloc.is_entry_computation_parameter());
EXPECT_FALSE(call_root_alloc.is_thread_local());
auto& param_alloc = GetTopLevelAllocation(*assignment, param);
EXPECT_TRUE(param_alloc.is_entry_computation_parameter());
EXPECT_FALSE(param_alloc.maybe_live_out());
EXPECT_FALSE(param_alloc.is_thread_local());
auto& map_alloc = GetTopLevelAllocation(*assignment, map);
EXPECT_FALSE(map_alloc.is_entry_computation_parameter());
EXPECT_TRUE(map_alloc.maybe_live_out());
EXPECT_FALSE(map_alloc.is_thread_local());
}
TEST_F(BufferAssignmentTest, CustomCallEmbeddedComputationBuffers) {
auto module = CreateNewVerifiedModule();
auto scalar_shape = ShapeUtil::MakeShape(F32, {});
auto map_builder = HloComputation::Builder(TestName() + "_map");
auto map_param = map_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "map_param"));
auto map_root = map_builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, map_param));
auto map_computation = module->AddEmbeddedComputation(map_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
builder.AddInstruction(HloInstruction::CreateCustomCall(
scalar_shape, {param}, map_computation, "call_name"));
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
auto& map_param_alloc = GetTopLevelAllocation(*assignment, map_param);
EXPECT_FALSE(map_param_alloc.is_entry_computation_parameter());
EXPECT_FALSE(map_param_alloc.maybe_live_out());
EXPECT_TRUE(map_param_alloc.is_thread_local());
auto& map_root_alloc = GetTopLevelAllocation(*assignment, map_root);
EXPECT_FALSE(map_root_alloc.is_entry_computation_parameter());
EXPECT_FALSE(map_root_alloc.maybe_live_out());
EXPECT_TRUE(map_root_alloc.is_thread_local());
}
TEST_F(BufferAssignmentTest, TupleParameterAsOutput) {
auto builder = HloComputation::Builder(TestName());
auto tuple_param = builder.AddInstruction(HloInstruction::CreateParameter(
0,
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(PRED, {1, 2, 3, 4}),
ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeShape(S32, {42})}),
"param0"));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_EQ(4, assignment->Allocations().size());
ShapeUtil::ForEachSubshape(
tuple_param->shape(),
[this, &assignment, tuple_param](const Shape& ,
const ShapeIndex& index) {
auto allocation = GetAllocation(*assignment, tuple_param, index);
EXPECT_TRUE(allocation.is_entry_computation_parameter());
EXPECT_EQ(0, allocation.parameter_number());
EXPECT_TRUE(allocation.maybe_live_out());
});
}
TEST_F(BufferAssignmentTest, ElementOfNestedTupleParameterAsOutput) {
auto builder = HloComputation::Builder(TestName());
auto tuple_param = builder.AddInstruction(HloInstruction::CreateParameter(
0,
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {1, 2, 3, 4}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S32, {42}),
ShapeUtil::MakeShape(S32, {101})})}),
"param0"));
auto tuple_element =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(tuple_param->shape(), {1}), tuple_param, 1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_FALSE(
GetAllocation(*assignment, tuple_param, {}).maybe_live_out());
EXPECT_TRUE(
GetAllocation(*assignment, tuple_param, {1}).maybe_live_out());
EXPECT_TRUE(GetAllocation(*assignment, tuple_param, {1, 0})
.maybe_live_out());
EXPECT_TRUE(GetAllocation(*assignment, tuple_param, {1, 1})
.maybe_live_out());
EXPECT_TRUE(
GetTopLevelAllocation(*assignment, tuple_element).maybe_live_out());
EXPECT_EQ(GetAllocation(*assignment, tuple_param, {1, 0}),
GetAllocation(*assignment, tuple_element, {0}));
EXPECT_EQ(GetAllocation(*assignment, tuple_param, {1, 1}),
GetAllocation(*assignment, tuple_element, {1}));
EXPECT_EQ(GetAllocation(*assignment, tuple_param, {1}),
GetTopLevelAllocation(*assignment, tuple_element));
}
TEST_F(BufferAssignmentTest, TupleConstantAsOutput) {
auto builder = HloComputation::Builder(TestName());
Literal elements[] = {LiteralUtil::CreateR0<int64_t>(0),
LiteralUtil::CreateR0<int64_t>(1)};
builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::MakeTuple({&elements[0], &elements[1]})));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_EQ(3, assignment->Allocations().size());
}
TEST_F(BufferAssignmentTest, TupleCustomCallAsOutput) {
auto builder = HloComputation::Builder(TestName());
auto custom_call = builder.AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(PRED, {1, 2, 3, 4}),
ShapeUtil::MakeShape(S32, {101})}),
{}, "foo_function"));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_EQ(3, assignment->Allocations().size());
EXPECT_TRUE(
GetAllocation(*assignment, custom_call, {}).maybe_live_out());
EXPECT_TRUE(
GetAllocation(*assignment, custom_call, {0}).maybe_live_out());
EXPECT_TRUE(
GetAllocation(*assignment, custom_call, {1}).maybe_live_out());
}
TEST_F(BufferAssignmentTest, CustomCallAliasedBuffer) {
const char* const kModuleString = R"(
HloModule xla_computation_f
ENTRY xla_computation_f {
parameter.1 = f32[2,3,4,5] parameter(0)
parameter.2 = f32[2,3,4,5] parameter(1)
add = f32[2,3,4,5] add(parameter.1, parameter.2)
ROOT custom-call = f32[2,3,4,5] custom-call(add, parameter.2), custom_call_target="dm_softmax", operand_layout_constraints={f32[2,3,4,5], f32[2,3,4,5]}, output_to_operand_aliasing={{}: (0, {})}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnUnverifiedModule(kModuleString));
std::unique_ptr<BufferAssignment> assignment =
RunBufferAssignment(module.get());
HloInstruction* custom_call = module->entry_computation()->root_instruction();
EXPECT_TRUE(
assignment->SharesTopLevelSlice(custom_call, custom_call->operand(0)));
}
TEST_F(BufferAssignmentTest, TupleCallAsOutput) {
auto module = CreateNewVerifiedModule();
auto elem_shape = f32vec4_;
auto tuple_shape = ShapeUtil::MakeTupleShape({elem_shape});
auto sub_builder = HloComputation::Builder(TestName() + "_sub");
auto sub_param = sub_builder.AddInstruction(
HloInstruction::CreateParameter(0, elem_shape, "sub_param"));
auto sub_tuple =
sub_builder.AddInstruction(HloInstruction::CreateTuple({sub_param}));
auto sub_computation = module->AddEmbeddedComputation(sub_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, elem_shape, "param"));
auto call = builder.AddInstruction(
HloInstruction::CreateCall(tuple_shape, {param}, sub_computation));
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_EQ(2, assignment->Allocations().size());
EXPECT_EQ(GetAllocation(*assignment, call, {}),
GetAllocation(*assignment, sub_tuple, {}));
EXPECT_EQ(GetAllocation(*assignment, call, {0}),
GetAllocation(*assignment, sub_param, {}));
EXPECT_NE(GetTopLevelAllocation(*assignment, param),
GetTopLevelAllocation(*assignment, sub_tuple));
EXPECT_EQ(GetTopLevelAllocation(*assignment, param),
GetTopLevelAllocation(*assignment, sub_param));
}
TEST_F(BufferAssignmentTest, TupleChainedCallAsOutput) {
auto module = CreateNewVerifiedModule();
auto elem_shape = f32vec4_;
auto tuple_shape = ShapeUtil::MakeTupleShape({elem_shape});
auto d_builder = HloComputation::Builder(TestName() + "_d");
auto d_param = d_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "d_param"));
auto d_computation = d_builder.Build();
auto c_builder = HloComputation::Builder(TestName() + "_c");
auto c_param = c_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "c_param"));
auto c_call = c_builder.AddInstruction(
HloInstruction::CreateCall(tuple_shape, {c_param}, d_computation.get()));
auto c_computation = c_builder.Build();
auto b_builder = HloComputation::Builder(TestName() + "_b");
auto b_param = b_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "b_param"));
auto b_call = b_builder.AddInstruction(
HloInstruction::CreateCall(tuple_shape, {b_param}, c_computation.get()));
auto b_computation = b_builder.Build();
auto a_builder = HloComputation::Builder(TestName());
auto a_param = a_builder.AddInstruction(
HloInstruction::CreateParameter(0, elem_shape, "param"));
auto a_tuple =
a_builder.AddInstruction(HloInstruction::CreateTuple({a_param}));
auto a_call = a_builder.AddInstruction(
HloInstruction::CreateCall(tuple_shape, {a_tuple}, b_computation.get()));
auto a_computation = a_builder.Build();
module->AddEmbeddedComputation(std::move(d_computation));
module->AddEmbeddedComputation(std::move(c_computation));
module->AddEntryComputation(std::move(a_computation));
module->AddEmbeddedComputation(std::move(b_computation));
auto assignment = RunBufferAssignment(module.get());
EXPECT_EQ(GetAllocation(*assignment, a_call, {}),
GetAllocation(*assignment, b_call, {}));
EXPECT_EQ(GetAllocation(*assignment, b_call, {}),
GetAllocation(*assignment, c_call, {}));
EXPECT_EQ(GetAllocation(*assignment, c_call, {}),
GetAllocation(*assignment, d_param, {}));
EXPECT_EQ(GetAllocation(*assignment, a_call, {0}),
GetAllocation(*assignment, b_call, {0}));
EXPECT_EQ(GetAllocation(*assignment, b_call, {0}),
GetAllocation(*assignment, c_call, {0}));
EXPECT_EQ(GetAllocation(*assignment, c_call, {0}),
GetAllocation(*assignment, d_param, {0}));
EXPECT_TRUE(BuffersDistinct({a_param}, {b_param}, *assignment));
EXPECT_TRUE(BuffersDistinct({a_param}, {c_param}, *assignment));
EXPECT_TRUE(BuffersDistinct({a_param}, {d_param}, *assignment));
EXPECT_EQ(GetAllocation(*assignment, b_param, {0}),
GetAllocation(*assignment, c_param, {0}));
EXPECT_EQ(GetAllocation(*assignment, c_param, {0}),
GetAllocation(*assignment, d_param, {0}));
}
TEST_F(BufferAssignmentTest, BitcastAsOutput) {
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {42}), "param"));
auto bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(param->shape(), param));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_EQ(1, assignment->Allocations().size());
EXPECT_EQ(GetTopLevelAllocation(*assignment, param),
GetTopLevelAllocation(*assignment, bitcast));
}
TEST_F(BufferAssignmentTest, TupleBufferNotReused) {
auto builder = HloComputation::Builder(TestName());
auto scalar_shape = ShapeUtil::MakeShape(F32, {});
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param0"));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({param}));
auto tuple_element = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, tuple, 0));
auto copy = builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape, HloOpcode::kCopy, tuple_element));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment_orig = RunBufferAssignment(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> assignment,
ConvertToProtoAndBack(assignment_orig.get(), module.get()));
EXPECT_EQ(3, assignment->Allocations().size());
EXPECT_NE(GetTopLevelAllocation(*assignment, tuple),
GetTopLevelAllocation(*assignment, copy));
}
TEST_F(BufferAssignmentTest, OneTempAllocation) {
auto builder = HloComputation::Builder(TestName());
Shape shape_2x3 = ShapeUtil::MakeShape(F32, {2, 3});
Shape shape_2x4 = ShapeUtil::MakeShape(F32, {2, 4});
Shape shape_3x4 = ShapeUtil::MakeShape(F32, {3, 4});
Shape shape_4x4 = ShapeUtil::MakeShape(F32, {4, 4});
Shape shape_5x4 = ShapeUtil::MakeShape(F32, {5, 4});
auto param_a = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape_2x3, "param_a"));
auto param_b = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape_3x4, "param_b"));
auto param_c = builder.AddInstruction(
HloInstruction::CreateParameter(2, shape_4x4, "param_c"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
auto dot_ab = builder.AddInstruction(HloInstruction::CreateDot(
shape_2x4, param_a, param_b, dot_dnums, precision_config));
auto dot_bc = builder.AddInstruction(HloInstruction::CreateDot(
shape_3x4, param_b, param_c, dot_dnums, precision_config));
builder.AddInstruction(
HloInstruction::CreateConcatenate(shape_5x4, {dot_ab, dot_bc}, 0));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get(), 1);
EXPECT_EQ(5, assignment->Allocations().size());
BufferAllocation::Slice slice_ab =
assignment->GetUniqueTopLevelSlice(dot_ab).value();
BufferAllocation::Slice slice_bc =
assignment->GetUniqueTopLevelSlice(dot_bc).value();
EXPECT_EQ(slice_ab.allocation(), slice_bc.allocation());
EXPECT_NE(slice_ab, slice_bc);
EXPECT_EQ(32, slice_ab.size());
EXPECT_EQ(48, slice_bc.size());
EXPECT_EQ(80, slice_ab.allocation()->size());
EXPECT_EQ(80, slice_bc.allocation()->size());
assignment = RunBufferAssignment(module.get(), 64);
EXPECT_EQ(5, assignment->Allocations().size());
slice_ab = assignment->GetUniqueTopLevelSlice(dot_ab).value();
slice_bc = assignment->GetUniqueTopLevelSlice(dot_bc).value();
EXPECT_EQ(slice_ab.allocation(), slice_bc.allocation());
EXPECT_NE(slice_ab, slice_bc);
EXPECT_EQ(32, slice_ab.size());
EXPECT_EQ(48, slice_bc.size());
if (slice_ab.offset() == 0) {
EXPECT_EQ(64, slice_bc.offset());
EXPECT_EQ(64 + 48, slice_ab.allocation()->size());
EXPECT_EQ(64 + 48, slice_bc.allocation()->size());
} else {
EXPECT_EQ(64, slice_ab.offset());
EXPECT_EQ(0, slice_bc.offset());
EXPECT_EQ(64 + 32, slice_ab.allocation()->size());
EXPECT_EQ(64 + 32, slice_bc.allocation()->size());
}
}
TEST_F(BufferAssignmentTest, TrivialPeakBuffers) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers = RunBufferAssignment(module.get());
const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);
const std::vector<const HloValue*>& peak_buffers =
mul_buffer.PeakMemoryLogicalBuffers();
ASSERT_EQ(peak_buffers.size(), 1);
EXPECT_EQ(peak_buffers[0]->instruction(), sub);
}
TEST_F(BufferAssignmentTest, PeakBuffers) {
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "p"));
auto log = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kLog, param));
auto rev = builder.AddInstruction(
HloInstruction::CreateReverse(f32vec100_, log, {0}));
auto neg = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param));
const Shape concat_shape = ShapeUtil::MakeShape(F32, {200});
auto concat = builder.AddInstruction(
HloInstruction::CreateConcatenate(concat_shape, {rev, neg}, 0));
auto root = builder.AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(F32, {1}), concat, {0}, {1}, {1}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers = RunBufferAssignmentWithInstructionSequence(
module.get(), {param, log, rev, neg, concat, root});
const BufferAllocation& buffer = GetTopLevelAllocation(*buffers, concat);
EXPECT_FALSE(buffer.IsInputOrOutput());
EXPECT_TRUE(buffer.IsPreallocatedTempBuffer());
ASSERT_EQ(buffer.assigned_buffers().size(), 4);
const std::vector<const HloValue*>& peak_buffers =
buffer.PeakMemoryLogicalBuffers();
ASSERT_EQ(peak_buffers.size(), 3);
std::vector<const HloInstruction*> peak_instructions;
for (const HloValue* logical_buffer : peak_buffers) {
peak_instructions.push_back(logical_buffer->instruction());
}
EXPECT_THAT(peak_instructions, UnorderedElementsAre(rev, neg, concat));
}
TEST_F(BufferAssignmentTest, AliasedBuffersShouldntCoexistInPeakBuffers) {
std::string hlo_text = R"(
HloModule test_module, is_scheduled=true
cond {
param = (s32[], s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
body {
param.0 = (s32[], s32[]) parameter(0)
gte = s32[] get-tuple-element(param.0), index=0
add = s32[] add(gte, gte)
ROOT tuple = (s32[], s32[]) tuple(add, add)
}
ENTRY test_module {
param.3 = s32[] parameter(0)
copy = s32[] copy(param.3)
tuple = (s32[], s32[]) tuple(copy, copy)
while = (s32[], s32[]) while(tuple), condition=cond, body=body
gte = s32[] get-tuple-element(while), index=0
ROOT negate = s32[] negate(gte)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
auto assignment = RunBufferAssignmentWithSequentialOrdering(module.get());
const BufferAllocation& buffer =
GetTopLevelAllocation(*assignment, FindInstruction(module.get(), "copy"));
const std::vector<const HloValue*>& peak_buffers =
buffer.PeakMemoryLogicalBuffers();
int num_peak_buffers = 0;
for (const HloValue* peak_buffer : peak_buffers) {
if (peak_buffer->shape().IsArray()) {
++num_peak_buffers;
}
}
EXPECT_EQ(num_peak_buffers, 1);
}
TEST_F(BufferAssignmentTest, InPlaceBuffer) {
const char* hlo_text = R"(
HloModule Module
ENTRY main {
state = (s32[], f32[1280,1,128]{2,1,0}) parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128]{2,1,0} broadcast(constant.1), dimensions={}
get-tuple-element.4 = f32[1280,1,128]{2,1,0} get-tuple-element(state), index=1
get-tuple-element.3 = s32[] get-tuple-element(state), index=0
constant.2 = s32[] constant(128)
add.5 = s32[] add(get-tuple-element.3, constant.2)
constant.3 = s32[] constant(0)
dynamic-update-slice.5 = f32[1280,1,128]{2,1,0} dynamic-update-slice(get-tuple-element.4, broadcast.6, constant.3, constant.3, constant.3)
dynamic-update-slice.9 = f32[1280,1,128]{2,1,0} dynamic-update-slice(dynamic-update-slice.5, broadcast.6, constant.3, constant.3, constant.3)
ROOT tuple.85 = (s32[], f32[1280,1,128]{2,1,0}) tuple(add.5, dynamic-update-slice.9)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));
HloInstruction* parameter =
m->entry_computation()->GetInstructionWithName("get-tuple-element.4");
HloInstruction* dus1 =
m->entry_computation()->GetInstructionWithName("dynamic-update-slice.5");
HloInstruction* dus2 =
m->entry_computation()->GetInstructionWithName("dynamic-update-slice.9");
auto buffers = RunBufferAssignment(m.get());
{
const BufferAllocation& parameter_alloc =
GetTopLevelAllocation(*buffers, parameter);
const BufferAllocation& dus1_alloc = GetTopLevelAllocation(*buffers, dus1);
EXPECT_EQ(parameter_alloc, dus1_alloc);
const BufferAllocation& dus2_alloc = GetTopLevelAllocation(*buffers, dus2);
EXPECT_EQ(parameter_alloc, dus2_alloc);
}
}
TEST_F(BufferAssignmentTest, ConstantBuffersAreNotReused) {
const char* hlo_text = R"(
HloModule Module
True {
ROOT x.0.1 = f32[] parameter(0)
}
False {
x.0.0 = f32[] parameter(0)
ROOT copy.1 = f32[] copy(x.0.0)
}
ENTRY main {
pred.1.0 = pred[] parameter(0)
constant.1.1 = f32[] constant(56)
copy.2 = f32[] copy(constant.1.1)
constant.1.2 = f32[] constant(12)
ROOT conditional.1.3 = f32[] conditional(pred.1.0, copy.2, constant.1.2),
true_computation=True, false_computation=False
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));
HloInstruction* constant_1 =
m->entry_computation()->GetInstructionWithName("constant.1.1");
HloInstruction* constant_2 =
m->entry_computation()->GetInstructionWithName("constant.1.2");
auto buffers = RunBufferAssignment(m.get());
{
const BufferAllocation& allocation_for_const_1 =
GetTopLevelAllocation(*buffers, constant_1);
EXPECT_TRUE(allocation_for_const_1.is_constant());
for (const auto& buffer_offset_pair :
allocation_for_const_1.assigned_buffers()) {
EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(),
HloOpcode::kCopy);
EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(),
HloOpcode::kConditional);
}
}
{
const BufferAllocation& allocation_for_const_2 =
GetTopLevelAllocation(*buffers, constant_2);
EXPECT_TRUE(allocation_for_const_2.is_constant());
for (const auto& buffer_offset_pair :
allocation_for_const_2.assigned_buffers()) {
EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(),
HloOpcode::kCopy);
EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(),
HloOpcode::kConditional);
}
}
}
class WhileBufferAssignmentTest : public HloTestBase {
protected:
std::unique_ptr<HloComputation> BuildWhileConditionComputation(
const std::string& name) {
auto builder = HloComputation::Builder(name);
builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape_, "loop_state"));
auto zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(0)));
auto ten = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(10)));
builder.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), zero, ten, ComparisonDirection::kLt));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildWhileBodyComputation(
const std::string& name) {
auto builder = HloComputation::Builder(name);
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape_, "loop_state"));
auto input = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 0));
auto weights = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1));
auto output = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kMultiply, input, weights));
builder.AddInstruction(
HloInstruction::CreateTuple({input, weights, output}));
return builder.Build();
}
std::unique_ptr<BufferAssignment> RunBufferAssignment(HloModule* module,
int64_t alignment = 1) {
HloSchedule schedule = ScheduleModule(module, ByteSizeOf).value();
return BufferAssigner::Run(
module, std::make_unique<SequentialHloOrdering>(schedule),
ByteSizeOf,
[alignment](LogicalBuffer::Color) { return alignment; },
true)
.value();
}
static int64_t ByteSizeOf(const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), sizeof(void*));
}
Shape data_shape_ = ShapeUtil::MakeShape(F32, {4});
Shape loop_state_shape_ =
ShapeUtil::MakeTupleShape({data_shape_, data_shape_, data_shape_});
};
static void RunCopyInsertion(HloModule* module) {
CopyInsertion copy_insertion;
EXPECT_IS_OK(copy_insertion.Run(module).status());
}
TEST_F(WhileBufferAssignmentTest, TwoForwardWhileLoops) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder("entry");
auto input0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape_, "input0"));
auto weights0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, data_shape_, "weights0"));
auto weights1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, data_shape_, "weights1"));
auto zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto output0 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, zero, {}));
auto output1 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, zero, {}));
auto cond0 =
module->AddEmbeddedComputation(BuildWhileConditionComputation("cond"));
auto body0 =
module->AddEmbeddedComputation(BuildWhileBodyComputation("body"));
auto tuple0 = builder.AddInstruction(
HloInstruction::CreateTuple({input0, weights0, output0}));
auto while0 = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape_, cond0, body0, tuple0));
auto cond1 =
module->AddEmbeddedComputation(BuildWhileConditionComputation("cond"));
auto body1 =
module->AddEmbeddedComputation(BuildWhileBodyComputation("body"));
auto input1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, while0, 2));
auto tuple1 = builder.AddInstruction(
HloInstruction::CreateTuple({input1, weights1, output1}));
auto while1 = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape_, cond1, body1, tuple1));
module->AddEntryComputation(builder.Build());
RunCopyInsertion(module.get());
auto assignment = RunBufferAssignment(module.get());
EXPECT_EQ(assignment->GetUniqueSlice(input0, {}).value(),
assignment->GetUniqueSlice(while0, {0}).value());
EXPECT_EQ(assignment->GetUniqueSlice(weights0, {}).value(),
assignment->GetUniqueSlice(while0, {1}).value());
EXPECT_EQ(assignment->GetUniqueSlice(while0, {2}).value(),
assignment->GetUniqueSlice(while1, {0}).value());
EXPECT_EQ(assignment->GetUniqueSlice(weights1, {}).value(),
assignment->GetUniqueSlice(while1, {1}).value());
}
TEST_F(WhileBufferAssignmentTest, ColocatedBufferWithEntryParameter) {
const Shape r0s32 = ShapeUtil::MakeShape(S32, {});
const char* module_str = R"(
HloModule test_module
%cond.v0 {
%param = s32[] parameter(0)
ROOT %constant = pred[] constant(true)
}
%cond.v1 {
%param.0 = s32[] parameter(0)
ROOT %constant.0 = pred[] constant(true)
}
%body.v0 {
ROOT %param.1 = s32[] parameter(0)
}
%body.v1 {
%param.2 = s32[] parameter(0)
ROOT add = s32[] add(%param.2, %param.2)
}
ENTRY %test_module {
%param.3 = s32[] parameter(0)
%while.0 = s32[] while(%param.3), condition=%cond.v0, body=%body.v0
%mul = s32[] multiply(%while.0, %while.0)
%while.1 = s32[] while(%mul), condition=%cond.v1, body=%body.v1
ROOT %bcast = s32[1024,1024]{1,0} broadcast(s32[] %while.1), dimensions={}
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
int64_t instruction_count = m->instruction_count();
CopyInsertion copy_insertion;
ASSERT_IS_OK(copy_insertion.Run(m.get()).status());
ASSERT_EQ(instruction_count, m->instruction_count());
const HloInstruction* bcast = m->entry_computation()->root_instruction();
const HloInstruction* param =
m->entry_computation()->parameter_instruction(0);
ASSERT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
const HloInstruction* while1 = bcast->operand(0);
ASSERT_EQ(while1->opcode(), HloOpcode::kWhile);
const HloInstruction* while0 = while1->operand(0)->operand(0);
ASSERT_EQ(while0->opcode(), HloOpcode::kWhile);
auto assignment = RunBufferAssignment(m.get());
TF_ASSERT_OK_AND_ASSIGN(auto slice_param,
assignment->GetUniqueSlice(param, {}));
TF_ASSERT_OK_AND_ASSIGN(auto slice_while0,
assignment->GetUniqueSlice(while0, {}));
TF_ASSERT_OK_AND_ASSIGN(auto slice_while1,
assignment->GetUniqueSlice(while1, {}));
EXPECT_EQ(slice_param, slice_while0);
EXPECT_NE(slice_param, slice_while1);
}
TEST_F(WhileBufferAssignmentTest, ColocatedBufferWithConstant) {
const Shape r0s32 = ShapeUtil::MakeShape(S32, {});
const char* module_str = R"(
HloModule test_module
%cond.v0 {
%param = s32[] parameter(0)
ROOT %constant = pred[] constant(true)
}
%cond.v1 {
%param.0 = s32[] parameter(0)
ROOT %constant.0 = pred[] constant(true)
}
%body.v0 {
ROOT %param.1 = s32[] parameter(0)
}
%body.v1 {
%param.2 = s32[] parameter(0)
ROOT add = s32[] add(%param.2, %param.2)
}
ENTRY %test_module {
%constant.42 = s32[] constant(42)
%while.0 = s32[] while(%constant.42), condition=%cond.v0, body=%body.v0
%mul = s32[] multiply(%while.0, %while.0)
%while.1 = s32[] while(%mul), condition=%cond.v1, body=%body.v1
ROOT %bcast = s32[1024,1024]{1,0} broadcast(s32[] %while.1), dimensions={}
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
int64_t instruction_count = m->instruction_count();
CopyInsertion copy_insertion;
ASSERT_IS_OK(copy_insertion.Run(m.get()).status());
ASSERT_EQ(instruction_count, m->instruction_count());
const HloInstruction* bcast = m->entry_computation()->root_instruction();
const HloInstruction* constant =
m->entry_computation()->GetInstructionWithName("constant.42");
ASSERT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
const HloInstruction* while1 = bcast->operand(0);
ASSERT_EQ(while1->opcode(), HloOpcode::kWhile);
const HloInstruction* while0 = while1->operand(0)->operand(0);
ASSERT_EQ(while0->opcode(), HloOpcode::kWhile);
auto assignment = RunBufferAssignment(m.get());
TF_ASSERT_OK_AND_ASSIGN(auto slice_constant,
assignment->GetUniqueSlice(constant, {}));
TF_ASSERT_OK_AND_ASSIGN(auto slice_while0,
assignment->GetUniqueSlice(while0, {}));
TF_ASSERT_OK_AND_ASSIGN(auto slice_while1,
assignment->GetUniqueSlice(while1, {}));
EXPECT_EQ(slice_constant, slice_while0);
EXPECT_NE(slice_constant, slice_while1);
}
TEST_F(WhileBufferAssignmentTest, ColocatedBuffers) {
const Shape r0s32 = ShapeUtil::MakeShape(S32, {});
auto build_cond = [&]() {
auto builder = HloComputation::Builder("cond");
auto const4 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(4)));
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0s32, "x"));
builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param,
const4, ComparisonDirection::kLt));
return builder.Build();
};
auto build_body = [&]() {
auto builder = HloComputation::Builder("body");
auto const9 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(9)));
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0s32, "x"));
builder.AddInstruction(
HloInstruction::CreateBinary(r0s32, HloOpcode::kAdd, param, const9));
return builder.Build();
};
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder("entry");
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto infeed =
builder.AddInstruction(HloInstruction::CreateInfeed(r0s32, token, ""));
auto infeed_data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(r0s32, infeed, 0));
auto cond0 = module->AddEmbeddedComputation(build_cond());
auto body0 = module->AddEmbeddedComputation(build_body());
auto while0 = builder.AddInstruction(
HloInstruction::CreateWhile(r0s32, cond0, body0, infeed_data));
auto cond1 = module->AddEmbeddedComputation(build_cond());
auto body1 = module->AddEmbeddedComputation(build_body());
auto while1 = builder.AddInstruction(
HloInstruction::CreateWhile(r0s32, cond1, body1, while0));
auto zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0s32, HloOpcode::kAdd, zero, zero));
auto cond2 = module->AddEmbeddedComputation(build_cond());
auto body2 = module->AddEmbeddedComputation(build_body());
auto while2 = builder.AddInstruction(
HloInstruction::CreateWhile(r0s32, cond2, body2, add));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({while2, while1}));
module->AddEntryComputation(builder.Build());
int64_t instruction_count = module->instruction_count();
CopyInsertion copy_insertion;
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
ASSERT_EQ(instruction_count, module->instruction_count());
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(),
sizeof(void*));
}));
schedule.set_sequence(
module->entry_computation(),
{token, infeed, infeed_data, while0, while1, zero, add, while2, tuple});
TF_ASSERT_OK(schedule.Verify());
TF_ASSERT_OK_AND_ASSIGN(
auto assignment,
BufferAssigner::Run(
module.get(), std::make_unique<SequentialHloOrdering>(schedule),
backend().compiler()->BufferSizeBytesFunction(),
[](LogicalBuffer::Color) { return 1; },
true));
TF_ASSERT_OK_AND_ASSIGN(auto slice0, assignment->GetUniqueSlice(tuple, {0}));
TF_ASSERT_OK_AND_ASSIGN(auto slice1, assignment->GetUniqueSlice(tuple, {1}));
EXPECT_NE(slice0, slice1);
TF_ASSERT_OK_AND_ASSIGN(auto slice_while0,
assignment->GetUniqueSlice(while0, {}));
TF_ASSERT_OK_AND_ASSIGN(auto slice_while1,
assignment->GetUniqueSlice(while1, {}));
EXPECT_EQ(slice1, slice_while0);
EXPECT_EQ(slice1, slice_while1);
TF_ASSERT_OK_AND_ASSIGN(auto slice_while2,
assignment->GetUniqueSlice(while2, {}));
EXPECT_EQ(slice0, slice_while2);
}
TEST_F(WhileBufferAssignmentTest, OneForwardBackwardWhileLoopSet) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder("entry");
auto input0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape_, "input0"));
auto weights0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, data_shape_, "weights0"));
auto zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto output0 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, zero, {}));
auto cond0 =
module->AddEmbeddedComputation(BuildWhileConditionComputation("cond"));
auto body0 =
module->AddEmbeddedComputation(BuildWhileBodyComputation("body"));
auto tuple0 = builder.AddInstruction(
HloInstruction::CreateTuple({input0, weights0, output0}));
auto while0 = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape_, cond0, body0, tuple0));
auto cond1 =
module->AddEmbeddedComputation(BuildWhileConditionComputation("cond"));
auto body1 =
module->AddEmbeddedComputation(BuildWhileBodyComputation("body"));
auto while1 = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape_, cond1, body1, while0));
module->AddEntryComputation(builder.Build());
RunCopyInsertion(module.get());
auto assignment = RunBufferAssignment(module.get());
EXPECT_EQ(assignment->GetUniqueSlice(while0, {0}).value(),
assignment->GetUniqueSlice(while1, {0}).value());
EXPECT_EQ(assignment->GetUniqueSlice(while0, {1}).value(),
assignment->GetUniqueSlice(while1, {1}).value());
EXPECT_EQ(assignment->GetUniqueSlice(while0, {2}).value(),
assignment->GetUniqueSlice(while1, {2}).value());
}
TEST_F(BufferAssignmentTest, TwoCalls) {
auto module = CreateNewVerifiedModule();
Shape r0f32 = ShapeUtil::MakeShape(xla::F32, {});
HloComputation* sub_computation;
{
auto builder = HloComputation::Builder(TestName() + "_sub_comp");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "param"));
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, param, constant1));
sub_computation = module->AddEmbeddedComputation(builder.Build(add));
}
auto builder = HloComputation::Builder(TestName());
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
auto call1 = builder.AddInstruction(
HloInstruction::CreateCall(r0f32, {constant2}, sub_computation));
auto call2 = builder.AddInstruction(
HloInstruction::CreateCall(r0f32, {constant3}, sub_computation));
auto add1 = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, call1, constant2));
auto add2 = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, call2, add1));
module->AddEntryComputation(builder.Build(add2));
{
FlattenCallGraph flatten;
TF_ASSERT_OK_AND_ASSIGN(bool result, flatten.Run(module.get()));
EXPECT_TRUE(result);
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
}
RunCopyInsertion(module.get());
auto assignment = RunBufferAssignment(module.get());
EXPECT_TRUE(BuffersDistinct({call1}, {call2}, *assignment));
}
TEST_F(BufferAssignmentTest, CallParamCoAllocation) {
const char* hlo_text = R"(
HloModule CallParamCoAllocation
Callee {
param0 = (f32[100],(f32[200],f32[300])) parameter(0)
param1 = s32[20] parameter(1)
ROOT constant = f32[] constant(1)
}
ENTRY Main {
entry_param0 = f32[100] parameter(0)
entry_param1 = s32[20] parameter(1)
custom_call = (f32[200],f32[300]) custom-call(), custom_call_target="call-target"
call_op0 = (f32[100],(f32[200],f32[300])) tuple(entry_param0, custom_call)
ROOT call_result = f32[] call(call_op0, entry_param1), to_apply=Callee
}
)";
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsFromFlags());
TF_ASSERT_OK_AND_ASSIGN(auto m,
ParseAndReturnVerifiedModule(hlo_text, config));
auto buffers = RunBufferAssignment(m.get());
HloComputation* main = m->entry_computation();
HloComputation* callee = m->GetComputationWithName("Callee");
EXPECT_NE(callee, nullptr);
HloInstruction* param0 = callee->parameter_instruction(0);
HloInstruction* param1 = callee->parameter_instruction(1);
HloInstruction* entry_param0 = main->parameter_instruction(0);
HloInstruction* entry_param1 = main->parameter_instruction(1);
HloInstruction* custom_call = main->GetInstructionWithName("custom_call");
EXPECT_EQ(GetAllocation(*buffers, entry_param0, {}),
GetAllocation(*buffers, param0, {0}));
EXPECT_EQ(GetAllocation(*buffers, entry_param1, {}),
GetAllocation(*buffers, param1, {}));
EXPECT_EQ(GetAllocation(*buffers, custom_call, {}),
GetAllocation(*buffers, param0, {1}));
EXPECT_EQ(GetAllocation(*buffers, custom_call, {0}),
GetAllocation(*buffers, param0, {1, 0}));
EXPECT_EQ(GetAllocation(*buffers, custom_call, {1}),
GetAllocation(*buffers, param0, {1, 1}));
}
TEST_F(BufferAssignmentTest, AsyncCall) {
const char* hlo_text = R"(
HloModule AsyncCall, is_scheduled=true
%called_computation (param_0: f32[4096], param_1: f32[4096]) -> f32[4096] {
%param_0 = f32[4096]{0} parameter(0)
%param_1 = f32[4096]{0} parameter(1)
%negate_0 = f32[4096]{0} negate(f32[4096]{0} %param_0)
%negate_1 = f32[4096]{0} negate(f32[4096]{0} %param_1)
%negate_2 = f32[4096]{0} negate(f32[4096]{0} %negate_1)
%negate_3 = f32[4096]{0} negate(f32[4096]{0} %negate_2)
ROOT %result.1 = f32[4096]{0} add(f32[4096]{0} %negate_0, f32[4096]{0} %negate_3)
}
ENTRY %main (a: f32[4096], b: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%b = f32[4096]{0} parameter(1)
%async-start = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %a, f32[4096]{0} %b), to_apply=%called_computation
%negate_4 = f32[4096]{0} negate(f32[4096]{0} %a)
%negate_5 = f32[4096]{0} negate(f32[4096]{0} %b)
%negate_6 = f32[4096]{0} negate(f32[4096]{0} %negate_5)
%negate_7 = f32[4096]{0} negate(f32[4096]{0} %negate_6)
%add_0 = f32[4096]{0} add(f32[4096]{0} %negate_4, f32[4096]{0} %negate_7)
%async-done = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start)
ROOT %add_1 = f32[4096]{0} add(f32[4096]{0} %add_0, f32[4096]{0} %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));
auto buffers = RunBufferAssignmentWithSequentialOrdering(m.get());
LOG(INFO) << buffers->ToString();
auto get_slice = [&](std::string_view hlo_name, const ShapeIndex& index) {
return buffers->GetUniqueSlice(FindInstruction(m.get(), hlo_name), index)
.value();
};
EXPECT_EQ(get_slice("param_0", {}), get_slice("a", {}));
EXPECT_EQ(get_slice("param_1", {}), get_slice("b", {}));
EXPECT_EQ(get_slice("result.1", {}), get_slice("async-done", {}));
for (const auto& hlo_name :
{"negate_0", "negate_1", "negate_2", "negate_3"}) {
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_4", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_5", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_6", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_7", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("add_0", {}));
}
}
TEST_F(BufferAssignmentTest, AsyncCallPrivateStack) {
const char* hlo_text = R"(
HloModule AsyncCall, is_scheduled=true
%called_computation (param_0: f32[4096], param_1: f32[4096]) -> f32[4096] {
%param_0 = f32[4096]{0} parameter(0)
%param_1 = f32[4096]{0} parameter(1)
%negate_0 = f32[4096]{0} negate(f32[4096]{0} %param_0)
%negate_1 = f32[4096]{0} negate(f32[4096]{0} %param_1)
%negate_2 = f32[4096]{0} negate(f32[4096]{0} %negate_1)
%negate_3 = f32[4096]{0} negate(f32[4096]{0} %negate_2)
ROOT %result.1 = f32[4096]{0} add(f32[4096]{0} %negate_0, f32[4096]{0} %negate_3)
}, execution_thread="foobar"
ENTRY %main (a: f32[4096], b: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%b = f32[4096]{0} parameter(1)
%async-start = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %a, f32[4096]{0} %b), async_execution_thread="foobar", to_apply=%called_computation
%negate_4 = f32[4096]{0} negate(f32[4096]{0} %a)
%negate_5 = f32[4096]{0} negate(f32[4096]{0} %b)
%negate_6 = f32[4096]{0} negate(f32[4096]{0} %negate_5)
%negate_7 = f32[4096]{0} negate(f32[4096]{0} %negate_6)
%add_0 = f32[4096]{0} add(f32[4096]{0} %negate_4, f32[4096]{0} %negate_7)
%async-done = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start)
ROOT %add_1 = f32[4096]{0} add(f32[4096]{0} %add_0, f32[4096]{0} %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));
auto colorer = [](HloAliasAnalysis* alias_analysis, const HloOrdering&) {
for (const HloBuffer& buffer : alias_analysis->buffers()) {
int color = 1;
for (const HloValue* value : buffer.values()) {
if (absl::c_any_of(
value->positions(),
[](const HloPosition& position) {
return position.instruction->parent()->execution_thread() !=
"foobar";
}) ||
absl::c_any_of(value->GetUses(), [](const HloUse& use) {
return use.instruction->parent()->execution_thread() != "foobar";
})) {
color = 0;
}
}
for (const HloValue* value : buffer.values()) {
const HloPosition& defining_position = value->defining_position();
if (defining_position.shape().has_layout()) {
const int memory_space =
defining_position.shape().layout().memory_space();
if (memory_space != 0) {
color = memory_space;
}
}
alias_analysis->dataflow_analysis()
.GetValue(value->id())
.set_color(BufferValue::Color(color));
}
}
return absl::OkStatus();
};
BufferAssigner::PrivateStacks private_stacks;
private_stacks[1] = {FindComputation(m.get(), "called_computation")};
auto buffers = RunBufferAssignmentWithSequentialOrdering(
m.get(), 1, colorer, private_stacks);
LOG(INFO) << buffers->ToString();
auto get_slice = [&](std::string_view hlo_name, const ShapeIndex& index) {
return buffers->GetUniqueSlice(FindInstruction(m.get(), hlo_name), index)
.value();
};
EXPECT_EQ(get_slice("param_0", {}), get_slice("a", {}));
EXPECT_EQ(get_slice("param_1", {}), get_slice("b", {}));
EXPECT_EQ(get_slice("result.1", {}), get_slice("async-done", {}));
for (const auto& hlo_name :
{"negate_0", "negate_1", "negate_2", "negate_3"}) {
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_4", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_5", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_6", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_7", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("add_0", {}));
}
EXPECT_NE(get_slice("negate_0", {}), get_slice("negate_1", {}));
EXPECT_EQ(get_slice("negate_1", {}), get_slice("negate_2", {}));
EXPECT_EQ(get_slice("negate_1", {}), get_slice("negate_3", {}));
}
TEST_F(BufferAssignmentTest, MultipleAsyncCallPrivateStack) {
const char* hlo_text = R"(
HloModule AsyncCall, is_scheduled=true
%called_computation1 {
%param_0 = f32[4096]{0} parameter(0)
%param_1 = f32[4096]{0} parameter(1)
%negate_0 = f32[4096]{0} negate(f32[4096]{0} %param_0)
%negate_1 = f32[4096]{0} negate(f32[4096]{0} %param_1)
%negate_2 = f32[4096]{0} negate(f32[4096]{0} %negate_1)
%negate_3 = f32[4096]{0} negate(f32[4096]{0} %negate_2)
ROOT %result.1 = f32[4096]{0} add(f32[4096]{0} %negate_0, f32[4096]{0} %negate_3)
}, execution_thread="foobar"
%called_computation2 {
%param_2 = f32[4096]{0} parameter(0)
%param_3 = f32[4096]{0} parameter(1)
%negate_4 = f32[4096]{0} negate(f32[4096]{0} %param_2)
%negate_5 = f32[4096]{0} negate(f32[4096]{0} %param_3)
ROOT %result.2 = f32[4096]{0} add(f32[4096]{0} %negate_4, f32[4096]{0} %negate_5)
}, execution_thread="foobar"
ENTRY %main (a: f32[4096], b: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%b = f32[4096]{0} parameter(1)
%async-start.1 = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %a, f32[4096]{0} %b), async_execution_thread="foobar", to_apply=%called_computation1
%async-start.2 = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %b, f32[4096]{0} %a), async_execution_thread="foobar", to_apply=%called_computation2
%negate_6 = f32[4096]{0} negate(f32[4096]{0} %a)
%negate_7 = f32[4096]{0} negate(f32[4096]{0} %b)
%negate_8 = f32[4096]{0} negate(f32[4096]{0} %negate_7)
%negate_9 = f32[4096]{0} negate(f32[4096]{0} %negate_8)
%add_0 = f32[4096]{0} add(f32[4096]{0} %negate_6, f32[4096]{0} %negate_9)
%async-done.1 = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start.1)
%async-done.2 = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start.2)
%add_1 = f32[4096]{0} add(f32[4096]{0} %add_0, f32[4096]{0} %async-done.1)
ROOT %add_2 = f32[4096]{0} add(f32[4096]{0} %add_1, f32[4096]{0} %async-done.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));
auto colorer = [](HloAliasAnalysis* alias_analysis, const HloOrdering&) {
for (const HloBuffer& buffer : alias_analysis->buffers()) {
int color = 1;
for (const HloValue* value : buffer.values()) {
if (absl::c_any_of(
value->positions(),
[](const HloPosition& position) {
return position.instruction->parent()->execution_thread() !=
"foobar";
}) ||
absl::c_any_of(value->GetUses(), [](const HloUse& use) {
return use.instruction->parent()->execution_thread() != "foobar";
})) {
color = 0;
}
}
for (const HloValue* value : buffer.values()) {
const HloPosition& defining_position = value->defining_position();
if (defining_position.shape().has_layout()) {
const int memory_space =
defining_position.shape().layout().memory_space();
if (memory_space != 0) {
color = memory_space;
}
}
alias_analysis->dataflow_analysis()
.GetValue(value->id())
.set_color(BufferValue::Color(color));
}
}
return absl::OkStatus();
};
BufferAssigner::PrivateStacks private_stacks;
private_stacks[1] = {FindComputation(m.get(), "called_computation1"),
FindComputation(m.get(), "called_computation2")};
auto buffers = RunBufferAssignmentWithSequentialOrdering(
m.get(), 1, colorer, private_stacks);
LOG(INFO) << buffers->ToString();
auto get_slice = [&](std::string_view hlo_name, const ShapeIndex& index) {
return buffers->GetUniqueSlice(FindInstruction(m.get(), hlo_name), index)
.value();
};
EXPECT_EQ(get_slice("param_0", {}), get_slice("a", {}));
EXPECT_EQ(get_slice("param_3", {}), get_slice("a", {}));
EXPECT_EQ(get_slice("param_1", {}), get_slice("b", {}));
EXPECT_EQ(get_slice("param_2", {}), get_slice("b", {}));
EXPECT_EQ(get_slice("result.1", {}), get_slice("async-done.1", {}));
EXPECT_EQ(get_slice("result.2", {}), get_slice("async-done.2", {}));
for (const auto& hlo_name : {"negate_0", "negate_1", "negate_2", "negate_3",
"negate_4", "negate_5"}) {
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_6", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_7", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_8", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_9", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("add_0", {}));
}
EXPECT_NE(get_slice("negate_0", {}), get_slice("negate_1", {}));
EXPECT_EQ(get_slice("negate_1", {}), get_slice("negate_2", {}));
EXPECT_EQ(get_slice("negate_1", {}), get_slice("negate_3", {}));
EXPECT_TRUE(get_slice("negate_4", {}) == get_slice("negate_0", {}) ||
get_slice("negate_4", {}) == get_slice("negate_1", {}));
EXPECT_TRUE(get_slice("negate_5", {}) == get_slice("negate_0", {}) ||
get_slice("negate_5", {}) == get_slice("negate_1", {}));
}
TEST_F(BufferAssignmentTest, AsyncCallImplicitSharding) {
std::string hlo_string = R"(
HloModule module, is_scheduled=true
called_computation {
param0 = f32[4] parameter(0)
constant = f32[1] constant(1)
dynamic-update-slice = f32[4] dynamic-update-slice(param0, constant, constant)
ROOT negate = f32[4] negate(dynamic-update-slice)
}
ENTRY entry {
p0 = f32[8] parameter(0)
call-start = ((f32[8]), f32[8], s32[]) call-start(p0), async_execution_thread="foo", to_apply=called_computation
ROOT call-done = f32[8] call-done(call-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto buffers = RunBufferAssignmentWithSequentialOrdering(module.get());
LOG(INFO) << buffers->ToString();
auto get_slice = [&](std::string_view hlo_name, const ShapeIndex& index) {
return buffers
->GetUniqueSlice(FindInstruction(module.get(), hlo_name), index)
.value();
};
EXPECT_EQ(get_slice("p0", {}).size(), 32);
EXPECT_EQ(get_slice("dynamic-update-slice", {}).size(), 32);
}
TEST_F(BufferAssignmentTest, AsyncCustomCall) {
const char* hlo_text = R"(
HloModule AsyncCustomCall, is_scheduled=true
ENTRY %main (a: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%neg_0 = f32[4096]{0} negate(f32[4096]{0} %a)
%async-start = ((f32[4096]{0}), f32[4096]{0}, u32[])
custom-call-start(f32[4096]{0} %neg_0),
custom_call_target="Foo"
%async-done = f32[4096]{0} custom-call-done(((f32[4096]{0}), f32[4096]{0}, u32[]) %async-start)
ROOT %neg_1 = f32[4096]{0} negate(f32[4096]{0} %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));
auto buffers = RunBufferAssignmentWithSequentialOrdering(m.get());
HloInstruction* neg_0 = FindInstruction(m.get(), "neg_0");
HloInstruction* async_done = FindInstruction(m.get(), "async-done");
EXPECT_FALSE(buffers->SharesTopLevelSlice(neg_0, async_done));
}
TEST_F(BufferAssignmentTest, AsyncCustomCallWithAliasing) {
const char* hlo_text = R"(
HloModule AsyncCustomCall, is_scheduled=true
ENTRY %main (a: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%neg_0 = f32[4096]{0} negate(f32[4096]{0} %a)
%async-start = ((f32[4096]{0}), f32[4096]{0}, u32[])
custom-call-start(f32[4096]{0} %neg_0),
custom_call_target="Foo",
output_to_operand_aliasing={{}: (0, {})}
%async-done = f32[4096]{0} custom-call-done(((f32[4096]{0}), f32[4096]{0}, u32[]) %async-start)
ROOT %neg_1 = f32[4096]{0} negate(f32[4096]{0} %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));
auto buffers = RunBufferAssignmentWithSequentialOrdering(m.get());
HloInstruction* neg_0 = FindInstruction(m.get(), "neg_0");
HloInstruction* async_done = FindInstruction(m.get(), "async-done");
EXPECT_TRUE(buffers->SharesTopLevelSlice(neg_0, async_done));
}
TEST_F(BufferAssignmentTest, BufferIsolation) {
absl::string_view module_str = R"(
HloModule test_module, is_scheduled=true
ENTRY %test_module {
param.0 = s32[1024]{0} parameter(0)
param.1 = s32[1024]{0} parameter(1)
mul1 = s32[1024]{0} multiply(param.0, param.1)
bcast1 = s32[4,1024]{1,0} broadcast(mul1), dimensions={1}
bcast2 = s32[4,1024]{1,0} broadcast(param.0), dimensions={1}
mul2 = s32[1024]{0} multiply(mul1, param.0)
add1 = s32[1024]{0} add(mul1, mul2)
sub2 = s32[1024]{0} subtract(mul1, mul2)
mul3 = s32[1024]{0} multiply(mul2, add1)
mul4 = s32[1024]{0} multiply(mul3, sub2)
bcast3 = s32[4,1024]{1,0} broadcast(mul4), dimensions={1}
add2 = s32[4,1024]{1,0} add(bcast3, bcast2)
ROOT add3 = s32[4,1024]{1,0} add(add2, bcast1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
std::unique_ptr<BufferAssignment> nonisolation_assignment =
RunBufferAssignmentWithIsolationOptions(m.get());
auto nonisolation_allocation =
absl::c_find_if(nonisolation_assignment->Allocations(),
[](const BufferAllocation& allocation) {
return allocation.IsPreallocatedTempBuffer();
});
ASSERT_NE(nonisolation_allocation,
nonisolation_assignment->Allocations().end());
LOG(INFO) << "Non-isolation buffers";
for (const auto& [value, offset_size] :
nonisolation_allocation->assigned_buffers()) {
LOG(INFO) << value->ToShortString() << ": off: " << offset_size.offset
<< ", size: " << offset_size.size;
}
BufferAssignment::BufferIsolationOptions isolation_options;
isolation_options.hlo_value_compare =
[](const HloValue* a, const HloValue* b) { return a->id() < b->id(); };
isolation_options.config.add_isolation_colors(0);
isolation_options.config.set_isolation_order_salt(10);
isolation_options.config.set_isolation_fuel(5);
isolation_options.config.set_isolation_padding_bytes(1024);
isolation_options.config.set_base_offset_bytes(12288);
std::unique_ptr<BufferAssignment> isolation_assignment =
RunBufferAssignmentWithIsolationOptions(m.get(), isolation_options);
auto isolation_allocation =
absl::c_find_if(isolation_assignment->Allocations(),
[](const BufferAllocation& allocation) {
return allocation.IsPreallocatedTempBuffer();
});
ASSERT_NE(isolation_allocation, isolation_assignment->Allocations().end());
std::vector<const HloValue*> ordered_values;
for (const auto& [value, _] : isolation_allocation->assigned_buffers()) {
ordered_values.push_back(value);
}
absl::c_sort(ordered_values, isolation_options.hlo_value_compare);
int i;
int64_t expected_offset = nonisolation_allocation->size() +
isolation_options.config.base_offset_bytes() +
isolation_options.config.isolation_padding_bytes();
ASSERT_GT(ordered_values.size(), isolation_options.config.isolation_fuel());
LOG(INFO) << "Isolation buffers";
for (i = 0; i < isolation_options.config.isolation_fuel(); ++i) {
const HloValue* value = ordered_values[i];
auto offset_size = isolation_allocation->assigned_buffers().at(value);
LOG(INFO) << value->ToShortString() << ": off: " << offset_size.offset
<< ", size: " << offset_size.size;
EXPECT_EQ(offset_size.offset, expected_offset);
expected_offset +=
offset_size.size + isolation_options.config.isolation_padding_bytes();
}
for (; i < ordered_values.size(); ++i) {
const HloValue* value = ordered_values[i];
auto offset_size = isolation_allocation->assigned_buffers().at(value);
auto nonisolation_offset_size = absl::c_find_if(
nonisolation_allocation->assigned_buffers(), [&](const auto& pair) {
return pair.first->defining_position() == value->defining_position();
});
ASSERT_NE(nonisolation_offset_size,
nonisolation_allocation->assigned_buffers().end());
LOG(INFO) << value->ToShortString() << ": off: " << offset_size.offset
<< ", size: " << offset_size.size;
EXPECT_EQ(offset_size.offset,
nonisolation_offset_size->second.offset +
isolation_options.config.base_offset_bytes());
}
}
TEST_F(BufferAssignmentTest, BufferInfoStringTest) {
absl::string_view module_str = R"(
HloModule test_module
ENTRY %test_module {
%param.0 = s32[1024]{0} parameter(0)
%param.1 = s32[1024]{0} parameter(1)
%mul = s32[1024]{0} multiply(%param.0, %param.1)
%add = s32[1024]{0} add(%mul, %param.0)
ROOT %bcast = s32[1024,1024]{1,0} broadcast(s32[1024] %add), dimensions={0}
})";
absl::string_view reference_str =
R"(buffer_id,buffer_name,offset,size,definition_time,end_time,num_uses,use_times,use_names
0,"<0 param.0 @0>",0,4096,0,5,2,"2;3","mul, operand 0;add, operand 1"
1,"<1 param.1 @0>",0,4096,1,5,1,"2","mul, operand 1"
2,"<2 mul @0>",0,4096,2,3,1,"3","add, operand 0"
3,"<3 add @0>",0,4096,3,4,1,"4","bcast, operand 0"
4,"<4 bcast @0>",0,4194304,4,5,0,"",""
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
HloInstruction* const param0 = FindInstruction(m.get(), "param.0");
HloInstruction* const param1 = FindInstruction(m.get(), "param.1");
HloInstruction* const mul = FindInstruction(m.get(), "mul");
HloInstruction* const add = FindInstruction(m.get(), "add");
HloInstruction* const bcast = FindInstruction(m.get(), "bcast");
auto assignment = RunBufferAssignmentWithInstructionSequence(
m.get(), {param0, param1, mul, add, bcast});
const std::string buffer_info_str = assignment->BufferInfoString();
EXPECT_EQ(buffer_info_str, reference_str);
}
TEST_F(WhileBufferAssignmentTest, WhileLoopsInterferingResultRange) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto input0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape_, "input0"));
auto weights0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, data_shape_, "weights0"));
auto output0 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, zero, {}));
auto input1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, data_shape_, "input1"));
auto weights1 = builder.AddInstruction(
HloInstruction::CreateParameter(3, data_shape_, "weights1"));
auto output1 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, one, {}));
auto cond =
module->AddEmbeddedComputation(BuildWhileConditionComputation("cond"));
auto body = module->AddEmbeddedComputation(BuildWhileBodyComputation("body"));
auto tuple0 = builder.AddInstruction(
HloInstruction::CreateTuple({input0, weights0, output0}));
auto tuple1 = builder.AddInstruction(
HloInstruction::CreateTuple({input1, weights1, output1}));
auto while0 = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape_, cond, body, tuple0));
auto while1 = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape_, cond, body, tuple1));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, while0, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, while1, 1));
auto root_add = builder.AddInstruction(
HloInstruction::CreateBinary(data_shape_, HloOpcode::kAdd, gte0, gte1));
module->AddEntryComputation(builder.Build());
{
FlattenCallGraph flatten;
TF_ASSERT_OK_AND_ASSIGN(bool result, flatten.Run(module.get()));
EXPECT_TRUE(result);
}
RunCopyInsertion(module.get());
HloSchedule schedule = ScheduleModule(module.get(), ByteSizeOf).value();
schedule.set_sequence(
module->entry_computation(),
{input1, weights1, one, output1, while1->mutable_operand(0), while1,
input0, weights0, zero, output0, while0->mutable_operand(0), while0,
gte0, gte1, root_add});
TF_ASSERT_OK(schedule.Verify());
auto assignment =
BufferAssigner::Run(
module.get(), std::make_unique<SequentialHloOrdering>(schedule),
ByteSizeOf, [](LogicalBuffer::Color) { return 1; },
true)
.value();
EXPECT_TRUE(BuffersDistinct({while0}, {while1}, *assignment));
}
TEST_F(WhileBufferAssignmentTest, WhilesDontShareEntryParamIfLiveOut) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder("entry");
auto input0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape_, "input0"));
auto weights0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, data_shape_, "weights0"));
auto zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto output0 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, zero, {}));
auto output1 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, zero, {}));
auto cond0 =
module->AddEmbeddedComputation(BuildWhileConditionComputation("cond"));
auto body0 =
module->AddEmbeddedComputation(BuildWhileBodyComputation("body"));
auto tuple0 = builder.AddInstruction(
HloInstruction::CreateTuple({input0, weights0, output0}));
auto while0 = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape_, cond0, body0, tuple0));
auto while0_out = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, while0, 2));
auto cond1 =
module->AddEmbeddedComputation(BuildWhileConditionComputation("cond"));
auto body1 =
module->AddEmbeddedComputation(BuildWhileBodyComputation("body"));
auto tuple1 = builder.AddInstruction(
HloInstruction::CreateTuple({while0_out, weights0, output1}));
auto while1 = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape_, cond1, body1, tuple1));
auto while1_out = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, while1, 2));
module->AddEntryComputation(builder.Build());
RunCopyInsertion(module.get());
auto assignment = RunBufferAssignment(module.get());
auto* root_alloc =
assignment->GetUniqueTopLevelSlice(while1_out).value().allocation();
EXPECT_TRUE(root_alloc->maybe_live_out());
EXPECT_FALSE(root_alloc->is_entry_computation_parameter());
}
TEST_F(WhileBufferAssignmentTest, WhileWithDynamicUpdateSliceShare) {
const char* const hlo_string = R"(
HloModule test
while_body {
state = (s32[], f32[1280,1,128]{2,1,0}) parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128]{2,1,0} broadcast(constant.1), dimensions={}
get-tuple-element.4 = f32[1280,1,128]{2,1,0} get-tuple-element(state), index=1
get-tuple-element.3 = s32[] get-tuple-element(state), index=0
constant.2 = s32[] constant(128)
add.5 = s32[] add(get-tuple-element.3, constant.2)
constant.3 = s32[] constant(0)
dynamic-update-slice.5 = f32[1280,1,128]{2,1,0} dynamic-update-slice(get-tuple-element.4, broadcast.6, constant.3, constant.3, constant.3)
dynamic-update-slice.9 = f32[1280,1,128]{2,1,0} dynamic-update-slice(dynamic-update-slice.5, broadcast.6, constant.3, constant.3, constant.3)
ROOT tuple.85 = (s32[], f32[1280,1,128]{2,1,0}) tuple(add.5, dynamic-update-slice.9)
}
while_condition {
state = (s32[], f32[1280,1,128]{2,1,0}) parameter(0)
get-tuple-element = s32[] get-tuple-element(state), index=0
get-tuple-element.1 = s32[] constant(3)
ROOT less-than.339.338 = pred[] compare(get-tuple-element, get-tuple-element.1), direction=LT
}
ENTRY entry_computation {
constant.7 = s32[] constant(0)
copy.1 = s32[] copy(constant.7)
constant.6 = f32[] constant(0)
broadcast.6 = f32[1280,1,128]{2,1,0} broadcast(constant.6), dimensions={}
tuple.1 = (s32[], f32[1280,1,128]{2,1,0}) tuple(copy.1, broadcast.6)
while.0 = (s32[], f32[1280,1,128]{2,1,0}) while(tuple.1), condition=while_condition, body=while_body
ROOT get-tuple-element.2 = s32[] get-tuple-element(while.0), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
RunCopyInsertion(module.get());
auto assignment = RunBufferAssignment(module.get());
auto dus9 = FindInstruction(module.get(), "dynamic-update-slice.9");
auto dus9_alloc_slice = assignment->GetUniqueTopLevelSlice(dus9).value();
auto dus5 = FindInstruction(module.get(), "dynamic-update-slice.5");
auto dus5_alloc_slice = assignment->GetUniqueTopLevelSlice(dus5).value();
EXPECT_EQ(dus9_alloc_slice.allocation(), dus5_alloc_slice.allocation());
EXPECT_EQ(dus9_alloc_slice, dus5_alloc_slice);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/buffer_assignment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/buffer_assignment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
07635265-48b2-4ba1-86d5-a8203c9f03e8 | cpp | tensorflow/tensorflow | attr_value_util | tensorflow/core/framework/attr_value_util.cc | tensorflow/core/framework/attr_value_util_test.cc | #include "tensorflow/core/framework/attr_value_util.h"
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/strings/escaping.h"
#include "tensorflow/core/framework/attr_value.pb_text.h"
#include "tensorflow/core/framework/tensor.pb_text.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb_text.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/util/overflow.h"
namespace tensorflow {
namespace attr_value_util_internal {
int64_t TensorByteSize(const TensorProto& t) {
auto result = PartialTensorShape::BuildPartialTensorShape(t.tensor_shape());
if (!result.ok()) {
VLOG(1) << "Error encounted while computing computing tensor byte size: "
<< result.status();
return -1;
}
int64_t num_elems = result.value().num_elements();
if (num_elems < 0) {
return -1;
}
int64_t tensor_byte_size =
MultiplyWithoutOverflow(num_elems, DataTypeSize(t.dtype()));
if (tensor_byte_size < 0) {
VLOG(1)
<< "Overflow encountered when computing tensor byte size, multiplying "
<< num_elems << " with " << DataTypeSize(t.dtype());
return -1;
}
return tensor_byte_size;
}
}
namespace {
constexpr int kMaxAttrValueTensorByteSize = 32 * 1024 * 1024;
constexpr int kMaxTensorNestDepth = 100;
uint64 TensorProtoHash(const TensorProto& tp) {
Tensor tensor(tp.dtype());
bool success = tensor.FromProto(tp);
if (success) {
TensorProto p;
tensor.AsProtoTensorContent(&p);
return DeterministicProtoHash64(p);
} else {
return DeterministicProtoHash64(tp);
}
}
uint64 FastTensorProtoHash(const TensorProto& tp) {
if (attr_value_util_internal::TensorByteSize(tp) >
kMaxAttrValueTensorByteSize) {
return DeterministicProtoHash64(tp);
} else {
return TensorProtoHash(tp);
}
}
bool AreTensorProtosEqual(const TensorProto& lhs, const TensorProto& rhs,
bool allow_false_negatives) {
const int64_t lhs_tensor_bytes =
attr_value_util_internal::TensorByteSize(lhs);
const int64_t rhs_tensor_bytes =
attr_value_util_internal::TensorByteSize(rhs);
if (lhs_tensor_bytes != rhs_tensor_bytes) {
return false;
}
const int64_t lhs_proto_bytes = lhs.ByteSizeLong();
const bool large_expansion =
(lhs_proto_bytes < 512 && lhs_tensor_bytes > 4096);
const bool only_compare_proto =
(allow_false_negatives && lhs_tensor_bytes > kMaxAttrValueTensorByteSize);
if (large_expansion || only_compare_proto) {
if (AreSerializedProtosEqual(lhs, rhs))
return true;
else if (only_compare_proto)
return false;
}
Tensor lhs_t(lhs.dtype());
bool success = lhs_t.FromProto(lhs);
if (!success) {
return false;
}
Tensor rhs_t(rhs.dtype());
success = rhs_t.FromProto(rhs);
if (!success) {
return false;
}
TensorProto lhs_tp;
lhs_t.AsProtoTensorContent(&lhs_tp);
TensorProto rhs_tp;
rhs_t.AsProtoTensorContent(&rhs_tp);
return AreSerializedProtosEqual(lhs_tp, rhs_tp);
}
using TensorProtoHasher = std::function<uint64(const TensorProto&)>;
uint64 AttrValueHash(const AttrValue& a, const TensorProtoHasher& tensor_hash) {
if (a.has_tensor()) return tensor_hash(a.tensor());
if (a.has_func()) {
const NameAttrList& func = a.func();
uint64 h = Hash64(func.name());
std::map<string, AttrValue> map(func.attr().begin(), func.attr().end());
for (const auto& pair : map) {
h = Hash64(pair.first.data(), pair.first.size(), h);
h = Hash64Combine(AttrValueHash(pair.second, tensor_hash), h);
}
return h;
}
return DeterministicProtoHash64(a);
}
string SummarizeString(const string& str) {
string escaped = absl::CEscape(str);
constexpr int kMaxStringSummarySize = 80;
if (escaped.size() >= kMaxStringSummarySize) {
StringPiece prefix(escaped);
StringPiece suffix = prefix;
prefix.remove_suffix(escaped.size() - 10);
suffix.remove_prefix(escaped.size() - 10);
return strings::StrCat("\"", prefix, "...", suffix, "\"");
} else {
return strings::StrCat("\"", escaped, "\"");
}
}
string SummarizeTensor(const TensorProto& tensor_proto) {
Tensor t;
int64_t tensor_byte_size =
attr_value_util_internal::TensorByteSize(tensor_proto);
if (tensor_byte_size > kMaxAttrValueTensorByteSize ||
tensor_byte_size == -1
) {
return strings::StrCat("<TensorProto: ", tensor_proto.ShortDebugString(),
">");
} else if (!t.FromProto(tensor_proto)) {
return strings::StrCat(
"<Invalid TensorProto: ", tensor_proto.ShortDebugString(), ">");
}
return t.DebugString();
}
string SummarizeFunc(const NameAttrList& func) {
std::vector<string> entries;
for (const auto& p : func.attr()) {
entries.push_back(
strings::StrCat(p.first, "=", SummarizeAttrValue(p.second)));
}
std::sort(entries.begin(), entries.end());
return strings::StrCat(func.name(), "[", absl::StrJoin(entries, ", "), "]");
}
bool ParseAttrValueHelper_TensorNestsUnderLimit(int limit, string to_parse) {
int nests = 0;
int maxed_out = to_parse.length();
int open_curly = to_parse.find('{');
int open_bracket = to_parse.find('<');
int close_curly = to_parse.find('}');
int close_bracket = to_parse.find('>');
if (open_curly == -1) {
open_curly = maxed_out;
}
if (open_bracket == -1) {
open_bracket = maxed_out;
}
int min = std::min(open_curly, open_bracket);
do {
if (open_curly == maxed_out && open_bracket == maxed_out) {
return true;
}
if (min == open_curly) {
nests += 1;
open_curly = to_parse.find('{', open_curly + 1);
if (open_curly == -1) {
open_curly = maxed_out;
}
} else if (min == open_bracket) {
nests += 1;
open_bracket = to_parse.find('<', open_bracket + 1);
if (open_bracket == -1) {
open_bracket = maxed_out;
}
} else if (min == close_curly) {
nests -= 1;
close_curly = to_parse.find('}', close_curly + 1);
if (close_curly == -1) {
close_curly = maxed_out;
}
} else if (min == close_bracket) {
nests -= 1;
close_bracket = to_parse.find('>', close_bracket + 1);
if (close_bracket == -1) {
close_bracket = maxed_out;
}
}
min = std::min({open_curly, open_bracket, close_curly, close_bracket});
} while (nests < 100);
return false;
}
}
string SummarizeAttrValue(const AttrValue& attr_value) {
switch (attr_value.value_case()) {
case AttrValue::kS:
return SummarizeString(attr_value.s());
case AttrValue::kI:
return strings::StrCat(attr_value.i());
case AttrValue::kF:
return strings::StrCat(attr_value.f());
case AttrValue::kB:
return attr_value.b() ? "true" : "false";
case AttrValue::kType:
return EnumName_DataType(attr_value.type());
case AttrValue::kShape:
return PartialTensorShape::DebugString(attr_value.shape());
case AttrValue::kTensor:
return SummarizeTensor(attr_value.tensor());
case AttrValue::kList: {
std::vector<string> pieces;
if (attr_value.list().s_size() > 0) {
for (int i = 0; i < attr_value.list().s_size(); ++i) {
pieces.push_back(SummarizeString(attr_value.list().s(i)));
}
} else if (attr_value.list().i_size() > 0) {
for (int i = 0; i < attr_value.list().i_size(); ++i) {
pieces.push_back(strings::StrCat(attr_value.list().i(i)));
}
} else if (attr_value.list().f_size() > 0) {
for (int i = 0; i < attr_value.list().f_size(); ++i) {
pieces.push_back(strings::StrCat(attr_value.list().f(i)));
}
} else if (attr_value.list().b_size() > 0) {
for (int i = 0; i < attr_value.list().b_size(); ++i) {
pieces.push_back(attr_value.list().b(i) ? "true" : "false");
}
} else if (attr_value.list().type_size() > 0) {
for (int i = 0; i < attr_value.list().type_size(); ++i) {
pieces.push_back(EnumName_DataType(attr_value.list().type(i)));
}
} else if (attr_value.list().shape_size() > 0) {
for (int i = 0; i < attr_value.list().shape_size(); ++i) {
pieces.push_back(
TensorShape::DebugString(attr_value.list().shape(i)));
}
} else if (attr_value.list().tensor_size() > 0) {
for (int i = 0; i < attr_value.list().tensor_size(); ++i) {
pieces.push_back(SummarizeTensor(attr_value.list().tensor(i)));
}
} else if (attr_value.list().func_size() > 0) {
for (int i = 0; i < attr_value.list().func_size(); ++i) {
pieces.push_back(SummarizeFunc(attr_value.list().func(i)));
}
}
constexpr int kMaxListSummarySize = 30;
if (pieces.size() >= kMaxListSummarySize) {
uint64_t fingerprint =
Fingerprint64(absl::StrJoin(pieces.begin(), pieces.end(), ","));
pieces.erase(pieces.begin() + 5, pieces.end() - 6);
pieces[5] = "...";
return strings::StrCat("[", absl::StrJoin(pieces, ", "),
"]{attr_hash=", fingerprint, "}");
} else {
return strings::StrCat("[", absl::StrJoin(pieces, ", "), "]");
}
}
case AttrValue::kFunc: {
return SummarizeFunc(attr_value.func());
}
case AttrValue::kPlaceholder:
return strings::StrCat("$", attr_value.placeholder());
case AttrValue::VALUE_NOT_SET:
return "<Unknown AttrValue type>";
}
return "<Unknown AttrValue type>";
}
Status AttrValueHasType(const AttrValue& attr_value, StringPiece type) {
int num_set = 0;
#define VALIDATE_FIELD(name, type_string, oneof_case) \
do { \
if (attr_value.has_list()) { \
if (attr_value.list().name##_size() > 0) { \
if (type != "list(" type_string ")") { \
return errors::InvalidArgument( \
"AttrValue had value with type 'list(" type_string ")' when '", \
type, "' expected"); \
} \
++num_set; \
} \
} else if (attr_value.value_case() == AttrValue::oneof_case) { \
if (type != type_string) { \
return errors::InvalidArgument( \
"AttrValue had value with type '" type_string "' when '", type, \
"' expected"); \
} \
++num_set; \
} \
} while (false)
VALIDATE_FIELD(s, "string", kS);
VALIDATE_FIELD(i, "int", kI);
VALIDATE_FIELD(f, "float", kF);
VALIDATE_FIELD(b, "bool", kB);
VALIDATE_FIELD(type, "type", kType);
VALIDATE_FIELD(shape, "shape", kShape);
VALIDATE_FIELD(tensor, "tensor", kTensor);
VALIDATE_FIELD(func, "func", kFunc);
#undef VALIDATE_FIELD
if (attr_value.value_case() == AttrValue::kPlaceholder) {
return errors::InvalidArgument(
"AttrValue had value with unexpected type 'placeholder'");
}
if (absl::StartsWith(type, "list(") && !attr_value.has_list()) {
if (num_set) {
return errors::InvalidArgument(
"AttrValue missing value with expected type '", type, "'");
} else {
++num_set;
}
}
if (num_set == 0 && !absl::StartsWith(type, "list(")) {
return errors::InvalidArgument(
"AttrValue missing value with expected type '", type, "'");
}
if (type == "type") {
if (!DataType_IsValid(attr_value.type())) {
return errors::InvalidArgument("AttrValue has invalid DataType enum: ",
attr_value.type());
}
if (IsRefType(attr_value.type())) {
return errors::InvalidArgument(
"AttrValue must not have reference type value of ",
DataTypeString(attr_value.type()));
}
if (attr_value.type() == DT_INVALID) {
return errors::InvalidArgument("AttrValue has invalid DataType");
}
} else if (type == "list(type)") {
for (auto as_int : attr_value.list().type()) {
const DataType dtype = static_cast<DataType>(as_int);
if (!DataType_IsValid(dtype)) {
return errors::InvalidArgument("AttrValue has invalid DataType enum: ",
as_int);
}
if (IsRefType(dtype)) {
return errors::InvalidArgument(
"AttrValue must not have reference type value of ",
DataTypeString(dtype));
}
if (dtype == DT_INVALID) {
return errors::InvalidArgument("AttrValue contains invalid DataType");
}
}
}
return absl::OkStatus();
}
bool ParseAttrValue(StringPiece type, StringPiece text, AttrValue* out) {
string field_name;
bool is_list = absl::ConsumePrefix(&type, "list(");
if (absl::ConsumePrefix(&type, "string")) {
field_name = "s";
} else if (absl::ConsumePrefix(&type, "int")) {
field_name = "i";
} else if (absl::ConsumePrefix(&type, "float")) {
field_name = "f";
} else if (absl::ConsumePrefix(&type, "bool")) {
field_name = "b";
} else if (absl::ConsumePrefix(&type, "type")) {
field_name = "type";
} else if (absl::ConsumePrefix(&type, "shape")) {
field_name = "shape";
} else if (absl::ConsumePrefix(&type, "tensor")) {
field_name = "tensor";
} else if (absl::ConsumePrefix(&type, "func")) {
field_name = "func";
} else if (absl::ConsumePrefix(&type, "placeholder")) {
field_name = "placeholder";
} else {
return false;
}
if (is_list && !absl::ConsumePrefix(&type, ")")) {
return false;
}
string to_parse;
if (is_list) {
StringPiece cleaned = text;
str_util::RemoveLeadingWhitespace(&cleaned);
str_util::RemoveTrailingWhitespace(&cleaned);
if (cleaned.size() < 2 || cleaned[0] != '[' ||
cleaned[cleaned.size() - 1] != ']') {
return false;
}
cleaned.remove_prefix(1);
str_util::RemoveLeadingWhitespace(&cleaned);
if (cleaned.size() == 1) {
out->Clear();
out->mutable_list();
return true;
}
to_parse = strings::StrCat("list { ", field_name, ": ", text, " }");
} else {
to_parse = strings::StrCat(field_name, ": ", text);
}
if (field_name == "tensor") {
if (!ParseAttrValueHelper_TensorNestsUnderLimit(kMaxTensorNestDepth,
to_parse)) {
return false;
}
}
return ProtoParseFromString(to_parse, out);
}
void SetAttrValue(const AttrValue& value, AttrValue* out) { *out = value; }
#define DEFINE_SET_ATTR_VALUE_ONE(ARG_TYPE, FIELD) \
void SetAttrValue(ARG_TYPE value, AttrValue* out) { out->set_##FIELD(value); }
#define DEFINE_SET_ATTR_VALUE_LIST(ARG_TYPE, FIELD) \
void SetAttrValue(ARG_TYPE value, AttrValue* out) { \
out->mutable_list()->Clear(); \
for (const auto& v : value) { \
out->mutable_list()->add_##FIELD(v); \
} \
}
#define DEFINE_SET_ATTR_VALUE_BOTH(ARG_TYPE, FIELD) \
DEFINE_SET_ATTR_VALUE_ONE(ARG_TYPE, FIELD) \
DEFINE_SET_ATTR_VALUE_LIST(gtl::ArraySlice<ARG_TYPE>, FIELD)
DEFINE_SET_ATTR_VALUE_ONE(const string&, s)
DEFINE_SET_ATTR_VALUE_LIST(absl::Span<const string>, s)
DEFINE_SET_ATTR_VALUE_BOTH(const char*, s)
DEFINE_SET_ATTR_VALUE_BOTH(int64_t, i)
DEFINE_SET_ATTR_VALUE_BOTH(int32_t, i)
DEFINE_SET_ATTR_VALUE_BOTH(float, f)
DEFINE_SET_ATTR_VALUE_BOTH(double, f)
DEFINE_SET_ATTR_VALUE_BOTH(bool, b)
DEFINE_SET_ATTR_VALUE_LIST(const std::vector<bool>&, b)
DEFINE_SET_ATTR_VALUE_LIST(std::initializer_list<bool>, b)
DEFINE_SET_ATTR_VALUE_BOTH(DataType, type)
void SetAttrValue(const tstring& value, AttrValue* out) {
out->set_s(value.data(), value.size());
}
void SetAttrValue(absl::Span<const tstring> value, AttrValue* out) {
out->mutable_list()->Clear();
for (const auto& v : value) {
out->mutable_list()->add_s(v.data(), v.size());
}
}
void SetAttrValue(StringPiece value, AttrValue* out) {
out->set_s(value.data(), value.size());
}
void SetAttrValue(const absl::Span<const StringPiece> value, AttrValue* out) {
out->mutable_list()->Clear();
for (const auto& v : value) {
out->mutable_list()->add_s(v.data(), v.size());
}
}
void MoveAttrValue(std::vector<string>&& value, AttrValue* out) {
out->mutable_list()->Clear();
for (auto& v : value) {
out->mutable_list()->add_s(std::move(v));
}
}
void SetAttrValue(const TensorShape& value, AttrValue* out) {
value.AsProto(out->mutable_shape());
}
void SetAttrValue(const TensorShapeProto& value, AttrValue* out) {
*out->mutable_shape() = value;
}
void SetAttrValue(const PartialTensorShape& value, AttrValue* out) {
value.AsProto(out->mutable_shape());
}
void SetAttrValue(const absl::Span<const TensorShape> value, AttrValue* out) {
out->mutable_list()->Clear();
for (const auto& v : value) {
v.AsProto(out->mutable_list()->add_shape());
}
}
void SetAttrValue(absl::Span<const TensorShapeProto> value, AttrValue* out) {
out->mutable_list()->Clear();
for (const auto& v : value) {
*out->mutable_list()->add_shape() = v;
}
}
void SetAttrValue(const absl::Span<const PartialTensorShape> value,
AttrValue* out) {
out->mutable_list()->Clear();
for (const auto& v : value) {
v.AsProto(out->mutable_list()->add_shape());
}
}
void SetAttrValue(const Tensor& value, AttrValue* out) {
if (value.NumElements() > 1) {
value.AsProtoTensorContent(out->mutable_tensor());
} else {
value.AsProtoField(out->mutable_tensor());
}
}
void SetAttrValue(const absl::Span<const Tensor> value, AttrValue* out) {
out->mutable_list()->Clear();
for (const auto& v : value) {
if (v.NumElements() > 1) {
v.AsProtoTensorContent(out->mutable_list()->add_tensor());
} else {
v.AsProtoField(out->mutable_list()->add_tensor());
}
}
}
void SetAttrValue(const TensorProto& value, AttrValue* out) {
*out->mutable_tensor() = value;
}
void SetAttrValue(const absl::Span<const TensorProto> value, AttrValue* out) {
out->mutable_list()->Clear();
for (const auto& v : value) {
*out->mutable_list()->add_tensor() = v;
}
}
void SetAttrValue(const NameAttrList& value, AttrValue* out) {
*out->mutable_func() = value;
}
void SetAttrValue(absl::Span<const NameAttrList> value, AttrValue* out) {
out->mutable_list()->Clear();
for (const auto& v : value) {
*out->mutable_list()->add_func() = v;
}
}
bool AreAttrValuesEqual(const AttrValue& a, const AttrValue& b,
bool allow_false_negatives) {
if (a.type() != b.type()) {
return false;
} else if (a.type() != DT_INVALID && b.type() != DT_INVALID) {
return a.type() == b.type();
}
if (a.has_tensor() != b.has_tensor()) {
return false;
} else if (a.has_tensor() && b.has_tensor()) {
return AreTensorProtosEqual(a.tensor(), b.tensor(), allow_false_negatives);
}
if (a.has_func() != b.has_func()) {
return false;
} else if (a.has_func() && b.has_func()) {
const NameAttrList& af = a.func();
const NameAttrList& bf = b.func();
if (af.name() != bf.name()) return false;
std::unordered_map<string, AttrValue> am(af.attr().begin(),
af.attr().end());
for (const auto& bm_pair : bf.attr()) {
const auto& iter = am.find(bm_pair.first);
if (iter == am.end()) return false;
if (!AreAttrValuesEqual(iter->second, bm_pair.second,
allow_false_negatives))
return false;
am.erase(iter);
}
if (!am.empty()) return false;
return true;
}
return AreSerializedProtosEqual(a, b);
}
uint64 AttrValueHash(const AttrValue& a) {
return AttrValueHash(a, TensorProtoHash);
}
uint64 FastAttrValueHash(const AttrValue& a) {
return AttrValueHash(a, FastTensorProtoHash);
}
bool HasPlaceHolder(const AttrValue& val) {
switch (val.value_case()) {
case AttrValue::kList: {
for (const NameAttrList& func : val.list().func()) {
for (const auto& p : func.attr()) {
if (HasPlaceHolder(p.second)) {
return true;
}
}
}
break;
}
case AttrValue::kFunc:
for (const auto& p : val.func().attr()) {
if (HasPlaceHolder(p.second)) {
return true;
}
}
break;
case AttrValue::kPlaceholder:
return true;
default:
break;
}
return false;
}
bool SubstitutePlaceholders(const SubstituteFunc& substitute,
AttrValue* value) {
switch (value->value_case()) {
case AttrValue::kList: {
for (NameAttrList& func : *value->mutable_list()->mutable_func()) {
for (auto& p : *func.mutable_attr()) {
if (!SubstitutePlaceholders(substitute, &p.second)) {
return false;
}
}
}
break;
}
case AttrValue::kFunc:
for (auto& p : *(value->mutable_func()->mutable_attr())) {
if (!SubstitutePlaceholders(substitute, &p.second)) {
return false;
}
}
break;
case AttrValue::kPlaceholder:
return substitute(value->placeholder(), value);
case AttrValue::VALUE_NOT_SET:
return false;
default:
break;
}
return true;
}
} | #include "tensorflow/core/framework/attr_value_util.h"
#include <numeric>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
template <typename T>
AttrValue V(T value) {
AttrValue ret;
SetAttrValue(value, &ret);
return ret;
}
AttrValue P(const string& p) {
AttrValue ret;
ret.set_placeholder(p);
return ret;
}
AttrValue F(const string& name,
std::vector<std::pair<string, AttrValue>> pairs) {
AttrValue ret;
ret.mutable_func()->set_name(name);
ret.mutable_func()->mutable_attr()->insert(pairs.begin(), pairs.end());
return ret;
}
AttrValue Fs(
std::vector<std::pair<string, std::vector<std::pair<string, AttrValue>>>>
funcs) {
AttrValue ret;
for (const auto& func : funcs) {
NameAttrList* entry = ret.mutable_list()->add_func();
entry->set_name(func.first);
entry->mutable_attr()->insert(func.second.begin(), func.second.end());
}
return ret;
}
TEST(AttrValueUtil, HasType) {
EXPECT_TRUE(AttrValueHasType(V(123), "int").ok());
EXPECT_TRUE(AttrValueHasType(V(1.2), "float").ok());
EXPECT_TRUE(AttrValueHasType(V(DT_FLOAT), "type").ok());
EXPECT_TRUE(AttrValueHasType(F("f", {}), "func").ok());
EXPECT_TRUE(AttrValueHasType(Fs({{"f", {}}, {"g", {}}}), "list(func)").ok());
EXPECT_FALSE(AttrValueHasType(V(123), "func").ok());
EXPECT_FALSE(AttrValueHasType(V(1.2), "int").ok());
EXPECT_FALSE(AttrValueHasType(V(DT_FLOAT), "shape").ok());
EXPECT_FALSE(AttrValueHasType(F("f", {}), "string").ok());
EXPECT_FALSE(AttrValueHasType(P("T"), "float").ok());
EXPECT_FALSE(AttrValueHasType(V(static_cast<DataType>(1000)), "type").ok());
std::vector<DataType> list_type({static_cast<DataType>(1000)});
EXPECT_FALSE(AttrValueHasType(V(list_type), "list(type)").ok());
}
SubstituteFunc ReplaceTWith(const AttrValue& val) {
return [val](const string& placeholder, AttrValue* target) {
if (placeholder == "T") {
*target = val;
return true;
} else {
return false;
}
};
}
TEST(AttrValueUtil, Basic) {
auto v = F("MatMul", {{"dtype", P("T")},
{"transpose_a", V(false)},
{"transpose_b", V(true)},
{"use_cublas", V(true)}});
TF_EXPECT_OK(AttrValueHasType(v, "func"));
EXPECT_TRUE(HasPlaceHolder(v));
EXPECT_EQ(
SummarizeAttrValue(v),
"MatMul[dtype=$T, transpose_a=false, transpose_b=true, use_cublas=true]");
SubstitutePlaceholders(ReplaceTWith(V(DT_FLOAT)), &v);
EXPECT_TRUE(!HasPlaceHolder(v));
EXPECT_EQ(SummarizeAttrValue(v),
"MatMul[dtype=DT_FLOAT, transpose_a=false, transpose_b=true, "
"use_cublas=true]");
}
TEST(AttrValueUtil, Shaped) {
auto v =
F("OpRequiresShape", {{"shape_full", V(TensorShape({1, 0}))},
{"shape_part", V(PartialTensorShape({-1, 1, 0}))}});
TF_EXPECT_OK(AttrValueHasType(v, "func"));
EXPECT_FALSE(HasPlaceHolder(v));
EXPECT_EQ(SummarizeAttrValue(v),
"OpRequiresShape[shape_full=[1,0], shape_part=[?,1,0]]");
}
TEST(AttrValueUtil, DeepAttr) {
auto v = Fs({{"f", {{"T", P("T")}}}, {"g", {{"T", P("T")}}}});
TF_EXPECT_OK(AttrValueHasType(v, "list(func)"));
EXPECT_TRUE(HasPlaceHolder(v));
for (int i = 0; i < 3; ++i) {
v = F("f", {{"T", P("T")}, {"F", v}});
EXPECT_TRUE(HasPlaceHolder(v));
}
EXPECT_EQ(SummarizeAttrValue(v),
"f[F=f[F=f[F=[f[T=$T], g[T=$T]], T=$T], T=$T], T=$T]");
SubstitutePlaceholders(ReplaceTWith(F("x", {})), &v);
EXPECT_TRUE(!HasPlaceHolder(v));
EXPECT_EQ(SummarizeAttrValue(v),
"f[F=f[F=f[F=[f[T=x[]], g[T=x[]]], T=x[]], T=x[]], T=x[]]");
}
TEST(AttrValueUtil, SummarizeAttrValueDoesNotElideShortStrings) {
AttrValue attr_value;
SetAttrValue(string(40, '-'), &attr_value);
EXPECT_EQ(strings::StrCat("\"", string(40, '-'), "\""),
SummarizeAttrValue(attr_value));
}
TEST(AttrValueUtil, SummarizeAttrValueElidesLongStrings) {
AttrValue attr_value;
SetAttrValue(string(80, '-'), &attr_value);
EXPECT_EQ("\"----------...----------\"", SummarizeAttrValue(attr_value));
}
TEST(AttrValueUtil, SummarizeAttrValueDoesNotElideShortLists) {
std::vector<int> alist(10);
std::iota(alist.begin(), alist.end(), 0);
AttrValue attr_value;
SetAttrValue(alist, &attr_value);
EXPECT_EQ("[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]", SummarizeAttrValue(attr_value));
}
TEST(AttrValueUtil, SummarizeAttrValueElidesLongLists) {
std::vector<int> alist(110);
std::iota(alist.begin(), alist.end(), 0);
AttrValue attr_value;
SetAttrValue(alist, &attr_value);
EXPECT_EQ(
"[0, 1, 2, 3, 4, ..., 105, 106, 107, 108, "
"109]{attr_hash=14506120815048308275}",
SummarizeAttrValue(attr_value));
}
TEST(AttrValueUtil, TensorByteSizeNumElementsOverflows) {
TensorProto proto;
proto.mutable_tensor_shape()->add_dim()->set_size(9223372036854775807L);
proto.mutable_tensor_shape()->add_dim()->set_size(2092026309338556617L);
proto.set_dtype(DT_INT32);
EXPECT_EQ(attr_value_util_internal::TensorByteSize(proto), -1);
}
TEST(AttrValueUtil, TensorByteSizeShouldNotOverflow) {
{
TensorProto proto;
proto.mutable_tensor_shape()->add_dim()->set_size(4611686018427387904L);
proto.set_dtype(DT_INT32);
EXPECT_EQ(attr_value_util_internal::TensorByteSize(proto), -1);
}
{
TensorProto proto;
proto.mutable_tensor_shape()->add_dim()->set_size(46123445412334L);
proto.set_dtype(DT_INT32);
EXPECT_NE(attr_value_util_internal::TensorByteSize(proto), -1);
}
}
AttrValue FromText(const string& text) {
AttrValue attr;
EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &attr));
return attr;
}
void ExpectDifferent(const AttrValue& a1, const AttrValue& a2) {
EXPECT_FALSE(AreAttrValuesEqual(a1, a2));
EXPECT_FALSE(AreAttrValuesEqual(a2, a1));
EXPECT_NE(AttrValueHash(a1), AttrValueHash(a2));
}
TEST(AttrValueEquality, StringAndFuncTensors) {
AttrValue a = FromText(R"(
tensor {
dtype: DT_STRING
tensor_shape {
dim {
size: 2
}
}
string_val: 'reader_dataset_ops_test/tmphtXHks/text_line.0.txt'
string_val: 'reader_dataset_ops_test/tmphtXHks/text_line.1.txt'
})");
EXPECT_TRUE(AreAttrValuesEqual(a, a));
EXPECT_EQ(AttrValueHash(a), AttrValueHash(a));
AttrValue b = a;
(*b.mutable_tensor()->mutable_string_val(0))[3] = '1';
ExpectDifferent(a, b);
AttrValue c1;
c1.mutable_func()->set_name("func_name");
(*c1.mutable_func()->mutable_attr())["attr1"] = a;
(*c1.mutable_func()->mutable_attr())["attr2"] = b;
EXPECT_TRUE(AreAttrValuesEqual(c1, c1));
EXPECT_EQ(AttrValueHash(c1), AttrValueHash(c1));
ExpectDifferent(c1, a);
AttrValue c2 = c1;
c2.mutable_func()->set_name("func_name2");
ExpectDifferent(c1, c2);
c2 = c1;
(*c2.mutable_func()->mutable_attr())["attr3"] = b;
ExpectDifferent(c1, c2);
c2 = c1;
(*c2.mutable_func()->mutable_attr())["attr2"] = a;
ExpectDifferent(c1, c2);
c2 = c1;
c2.mutable_func()->mutable_attr()->erase("attr2");
ExpectDifferent(c1, c2);
}
TEST(AttrValueEquality, GiantTensors) {
AttrValue tensor = FromText(R"(
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 1024
}
dim {
size: 1024
}
dim {
size: 1024
}
dim {
size: 1024
}
}
int_val: 0
})");
EXPECT_TRUE(AreAttrValuesEqual(tensor, tensor));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/attr_value_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/attr_value_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e0d08b23-2f0e-480d-a7ab-790f1efa7514 | cpp | google/arolla | quote | arolla/expr/quote.cc | arolla/expr/quote_test.cc | #include "arolla/expr/quote.h"
#include "absl/log/check.h"
#include "absl/numeric/int128.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/simple_qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
namespace arolla::expr {
constexpr Fingerprint kEmptyQuoteHash{
absl::MakeUint128(0x5466dba2e1989659, 0x6f2834ee88b8b08b)};
absl::StatusOr<ExprNodePtr> ExprQuote::expr() const {
if (expr_ == nullptr) {
return absl::InvalidArgumentError("uninitialized ExprQuote");
}
return expr_;
}
Fingerprint ExprQuote::expr_fingerprint() const {
return expr_ != nullptr ? expr_->fingerprint() : kEmptyQuoteHash;
}
}
namespace arolla {
void FingerprintHasherTraits<expr::ExprQuote>::operator()(
FingerprintHasher* hasher, const expr::ExprQuote& value) const {
hasher->Combine(absl::string_view("::arolla::expr::ExprQuote"),
value.expr_fingerprint());
}
ReprToken ReprTraits<expr::ExprQuote>::operator()(
const expr::ExprQuote& value) const {
if (!value.has_expr()) {
return ReprToken{"ExprQuote(nullptr)"};
}
return ReprToken{absl::StrFormat(
"ExprQuote('%s')", absl::Utf8SafeCHexEscape(ToDebugString(*value)))};
}
AROLLA_DEFINE_SIMPLE_QTYPE(EXPR_QUOTE, expr::ExprQuote);
AROLLA_DEFINE_OPTIONAL_QTYPE(EXPR_QUOTE, expr::ExprQuote);
AROLLA_DEFINE_DENSE_ARRAY_QTYPE(EXPR_QUOTE, expr::ExprQuote);
} | #include "arolla/expr/quote.h"
#include <memory>
#include <optional>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/hash/hash_testing.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/testing/test_operators.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
#include "arolla/util/text.h"
namespace arolla::expr {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::StatusIs;
using ::arolla::expr::testing::DummyOp;
using ::testing::Eq;
using ::testing::IsFalse;
using ::testing::IsTrue;
using ::testing::Ne;
class ExprQuoteTest : public ::testing::Test {
protected:
ExprOperatorPtr op_ = std::make_shared<DummyOp>(
"op", ExprOperatorSignature::MakeVariadicArgs());
};
TEST_F(ExprQuoteTest, Empty) {
ExprQuote quote;
EXPECT_THAT(quote.has_expr(), IsFalse());
EXPECT_THAT(quote.expr(), StatusIs(absl::StatusCode::kInvalidArgument,
"uninitialized ExprQuote"));
}
TEST_F(ExprQuoteTest, NotEmpty) {
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op_, {Leaf("x")}));
ExprQuote quote(expr);
EXPECT_THAT(quote.has_expr(), IsTrue());
ASSERT_THAT(quote.expr(), IsOk());
EXPECT_THAT(quote.expr()->get(), Eq(expr.get()));
EXPECT_THAT(quote->get(), Eq(expr.get()));
}
TEST_F(ExprQuoteTest, DenseArray) {
ASSERT_OK_AND_ASSIGN(auto expr_1, CallOp(op_, {Leaf("x")}));
ASSERT_OK_AND_ASSIGN(auto expr_2, CallOp(op_, {Leaf("y")}));
auto array = CreateDenseArray<expr::ExprQuote>(
{ExprQuote(expr_1), std::nullopt, ExprQuote(expr_2)});
EXPECT_TRUE(array[0].present);
EXPECT_FALSE(array[1].present);
EXPECT_TRUE(array[2].present);
EXPECT_EQ(array[0].value, ExprQuote(expr_1));
EXPECT_EQ(array[2].value, ExprQuote(expr_2));
}
TEST_F(ExprQuoteTest, AbslHash) {
ASSERT_OK_AND_ASSIGN(auto expr_1, CallOp(op_, {Leaf("x")}));
ASSERT_OK_AND_ASSIGN(auto expr_2, CallOp(op_, {Leaf("y")}));
ASSERT_OK_AND_ASSIGN(auto expr_3, CallOp(op_, {Leaf("z")}));
ASSERT_OK_AND_ASSIGN(auto expr_4, CallOp(op_, {Leaf("x")}));
std::vector cases{
ExprQuote(expr_1),
ExprQuote(expr_2),
ExprQuote(expr_3),
ExprQuote(expr_4),
};
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases));
}
TEST_F(ExprQuoteTest, Fingerprint) {
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op_, {Leaf("x")}));
EXPECT_THAT(ExprQuote(expr).expr_fingerprint(), Eq(expr->fingerprint()));
EXPECT_THAT(ExprQuote().expr_fingerprint(), Ne(expr->fingerprint()));
}
TEST_F(ExprQuoteTest, FingerprintHasher) {
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op_, {Leaf("x")}));
ExprQuote quote(expr);
auto quote_fingerprint = FingerprintHasher("").Combine(quote).Finish();
EXPECT_THAT(quote_fingerprint, Ne(expr->fingerprint()));
EXPECT_THAT(FingerprintHasher("").Combine(quote).Finish(),
Eq(quote_fingerprint));
}
TEST_F(ExprQuoteTest, Repr) {
EXPECT_THAT(Repr(ExprQuote()), Eq("ExprQuote(nullptr)"));
Text text_with_quote{"some\"\ntext"};
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(op_, {Leaf("x"), Literal(text_with_quote)}));
ExprQuote quote{expr};
EXPECT_THAT(Repr(text_with_quote), Eq("'some\\\"\\ntext'"));
EXPECT_THAT(Repr(quote),
Eq("ExprQuote('op(L.x, \\'some\\\\\\\"\\\\ntext\\')')"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/quote.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/quote_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
95425b08-8054-4f6b-a08d-0daf2092292b | cpp | tensorflow/tensorflow | c_api_function | tensorflow/c/c_api_function.cc | tensorflow/c/c_api_function_test.cc | #include <algorithm>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include "absl/strings/match.h"
#include "tensorflow/c/c_api_internal.h"
#include "tensorflow/c/tf_buffer_internal.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/base64.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/util/debug_data_dumper.h"
using tensorflow::errors::InvalidArgument;
namespace tensorflow {
namespace {
Status ValidateNonRefOutput(const Node* node, int idx) {
const DataType& dt = node->output_type(idx);
return IsRefType(dt)
? InvalidArgument("Output ", idx, " of node '", node->name(),
"' has a reference type ", DataTypeString(dt))
: absl::OkStatus();
}
Status ProcessInputs(
const TF_Graph* fn_body, const char* fn_name, int ninputs,
const TF_Output* inputs, std::vector<OutputTensor>* input_tensors,
std::unordered_map<const Node*, std::vector<int>>* input_nodes)
TF_EXCLUSIVE_LOCKS_REQUIRED(fn_body->mu) {
input_tensors->reserve(ninputs);
for (int i = 0; i < ninputs; ++i) {
Node* node = inputs[i].oper ? &inputs[i].oper->node : nullptr;
int idx = inputs[i].index;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
fn_body->graph.IsValidOutputTensor(node, idx),
"Encountered while processing input ", i, " into function '", fn_name,
"'");
TF_RETURN_WITH_CONTEXT_IF_ERROR(ValidateNonRefOutput(node, idx),
"Encountered while processing input ", i,
" into function '", fn_name, "'");
input_tensors->emplace_back(node, idx);
const auto& iter = input_nodes->find(node);
if (iter == input_nodes->end()) {
input_nodes->insert({node, {idx}});
} else {
auto& indices = iter->second;
if (std::find(indices.begin(), indices.end(), idx) != indices.end()) {
return InvalidArgument("TF_Output ", node->name(), ":", idx,
" appears more than once in the input list");
}
indices.push_back(idx);
}
}
return absl::OkStatus();
}
Status ProcessOutputs(const TF_Graph* fn_body, const char* fn_name,
int noutputs, const TF_Output* outputs,
std::vector<OutputTensor>* output_tensors)
TF_EXCLUSIVE_LOCKS_REQUIRED(fn_body->mu) {
output_tensors->reserve(noutputs);
for (int i = 0; i < noutputs; ++i) {
Node* node = outputs[i].oper ? &outputs[i].oper->node : nullptr;
int idx = outputs[i].index;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
fn_body->graph.IsValidOutputTensor(node, idx),
"Encountered while processing output ", i, " from function '", fn_name,
"'");
TF_RETURN_WITH_CONTEXT_IF_ERROR(ValidateNonRefOutput(node, idx),
"Encountered while creating function '",
fn_name, "'");
output_tensors->emplace_back(node, idx);
}
return absl::OkStatus();
}
Status ComputeBodyNodes(
const TF_Graph* fn_body, const char* fn_name, int num_opers,
const TF_Operation* const* opers,
const std::unordered_map<const Node*, std::vector<int>>& input_nodes,
std::vector<const Node*>* body_nodes)
TF_EXCLUSIVE_LOCKS_REQUIRED(fn_body->mu) {
if (num_opers == -1) {
for (const Node* node : fn_body->graph.op_nodes()) {
const auto& iter = input_nodes.find(node);
if (iter == input_nodes.end()) {
body_nodes->push_back(node);
} else {
if (node->num_outputs() != 1) {
return InvalidArgument(
"When `num_opers` is set to -1, nodes referenced in `inputs` "
"must have a single output. Node ",
node->name(), " has ", node->num_outputs(),
" outputs. Encountered while creating function '", fn_name, "'");
}
}
}
} else {
body_nodes->reserve(num_opers);
for (int i = 0; i < num_opers; ++i) {
const Node* node = &opers[i]->node;
body_nodes->push_back(node);
}
}
return absl::OkStatus();
}
}
}
using tensorflow::Node;
using tensorflow::string;
TF_Function* TF_GraphToFunctionWithControlOutputs(
const TF_Graph* fn_body, const char* fn_name,
unsigned char append_hash_to_fn_name, int num_opers,
const TF_Operation* const* opers, int ninputs, const TF_Output* inputs,
int noutputs, const TF_Output* outputs, const char* const* output_names,
int ncontrol_outputs, const TF_Operation* const* control_outputs,
const char* const* control_output_names, const TF_FunctionOptions* opts,
const char* description, TF_Status* status) {
tensorflow::mutex_lock l(fn_body->mu);
std::vector<tensorflow::OutputTensor> input_tensors;
std::unordered_map<const Node*, std::vector<int>> input_nodes;
status->status = tensorflow::ProcessInputs(fn_body, fn_name, ninputs, inputs,
&input_tensors, &input_nodes);
if (TF_GetCode(status) != TF_OK) return nullptr;
std::vector<tensorflow::OutputTensor> output_tensors;
status->status = tensorflow::ProcessOutputs(fn_body, fn_name, noutputs,
outputs, &output_tensors);
if (TF_GetCode(status) != TF_OK) return nullptr;
std::vector<string> output_names_vec;
if (output_names) {
output_names_vec.reserve(noutputs);
for (int i = 0; i < noutputs; ++i) {
output_names_vec.push_back(string(output_names[i]));
}
}
std::vector<string> control_output_names_vec;
if (control_output_names) {
control_output_names_vec.reserve(ncontrol_outputs);
for (int i = 0; i < ncontrol_outputs; ++i) {
control_output_names_vec.push_back(string(control_output_names[i]));
}
}
std::vector<const Node*> body_nodes;
status->status = tensorflow::ComputeBodyNodes(
fn_body, fn_name, num_opers, opers, input_nodes, &body_nodes);
if (TF_GetCode(status) != TF_OK) return nullptr;
std::vector<const Node*> control_output_nodes;
control_output_nodes.reserve(ncontrol_outputs);
for (int i = 0; i < ncontrol_outputs; ++i) {
control_output_nodes.push_back(&control_outputs[i]->node);
}
DCHECK(append_hash_to_fn_name <= 1);
tensorflow::FunctionDef fdef;
status->status = tensorflow::GraphToFunctionDef(
fn_body->graph, fn_name, append_hash_to_fn_name != 0,
true,
true, body_nodes, input_tensors,
output_tensors, output_names_vec, control_output_nodes,
control_output_names_vec, description, &fdef);
if (TF_GetCode(status) != TF_OK) {
return nullptr;
}
DEBUG_DATA_DUMPER()->DumpOpCreationStackTraces(
fn_name, kDebugGroupOpStacktrace, "initial", &fn_body->graph);
tensorflow::StackTracesMap stack_traces;
for (const Node* n : fn_body->graph.nodes()) {
stack_traces[n->name()] = n->GetStackTrace();
}
TF_Function* tf_function = new TF_Function();
tf_function->record = new tensorflow::FunctionRecord(
std::move(fdef), std::move(stack_traces), false);
return tf_function;
}
TF_Function* TF_GraphToFunction(const TF_Graph* fn_body, const char* fn_name,
unsigned char append_hash_to_fn_name,
int num_opers, const TF_Operation* const* opers,
int ninputs, const TF_Output* inputs,
int noutputs, const TF_Output* outputs,
const char* const* output_names,
const TF_FunctionOptions* opts,
const char* description, TF_Status* status) {
return TF_GraphToFunctionWithControlOutputs(
fn_body, fn_name, append_hash_to_fn_name, num_opers, opers, ninputs,
inputs, noutputs, outputs, output_names, 0, nullptr, nullptr, opts,
description, status);
}
const char* TF_FunctionName(TF_Function* func) {
return func->record->fdef().signature().name().c_str();
}
void TF_GraphCopyFunction(TF_Graph* g, const TF_Function* func,
const TF_Function* grad, TF_Status* status) {
if (func == nullptr) {
status->status = InvalidArgument(
"'func' argument to TF_GraphCopyFunction cannot be null");
return;
}
tensorflow::mutex_lock l(g->mu);
status->status = g->graph.AddFunctionDef(func->record->fdef(),
func->record->stack_traces());
if (TF_GetCode(status) != TF_OK) return;
if (!grad) return;
status->status = g->graph.AddFunctionDef(grad->record->fdef(),
grad->record->stack_traces());
if (TF_GetCode(status) != TF_OK) return;
tensorflow::GradientDef gdef;
gdef.set_function_name(func->record->fdef().signature().name());
gdef.set_gradient_func(grad->record->fdef().signature().name());
status->status = g->graph.AddGradientDef(std::move(gdef));
}
int TF_GraphNumFunctions(TF_Graph* g) {
tensorflow::mutex_lock l(g->mu);
return g->graph.flib_def().num_functions();
}
int TF_GraphGetFunctions(TF_Graph* g, TF_Function** funcs, int max_func,
TF_Status* status) {
tensorflow::FunctionDefLibrary lib;
{
tensorflow::mutex_lock l(g->mu);
lib = g->graph.flib_def().ToProto();
}
const auto len = std::min(max_func, static_cast<int>(lib.function_size()));
for (int i = 0; i < len; ++i) {
TF_Function* func = new TF_Function();
func->record = new tensorflow::FunctionRecord(lib.function(i), {}, false);
funcs[i] = func;
}
status->status = absl::OkStatus();
return len;
}
void TF_FunctionToFunctionDef(TF_Function* func, TF_Buffer* output_func_def,
TF_Status* status) {
status->status = MessageToBuffer(func->record->fdef(), output_func_def);
}
TF_Function* TF_FunctionImportFunctionDef(const void* proto, size_t proto_len,
TF_Status* status) {
tensorflow::FunctionDef fdef;
bool success = fdef.ParseFromArray(proto, proto_len);
if (!success) {
status->status = InvalidArgument(
"Invalid FunctionDef given to TF_FunctionImportFunctionDef");
return nullptr;
}
TF_Function* func = new TF_Function();
func->record = new tensorflow::FunctionRecord(std::move(fdef), {}, false);
status->status = absl::OkStatus();
return func;
}
void TF_FunctionSetAttrValueProto(TF_Function* func, const char* attr_name,
const void* proto, size_t proto_len,
TF_Status* status) {
tensorflow::AttrValue attr_value;
if (!attr_value.ParseFromArray(proto, proto_len)) {
status->status = InvalidArgument(
"Unparseable AttrValue proto passed to "
"TF_FunctionSetAttrValueProto");
return;
}
auto fdef_or = func->record->mutable_fdef();
if (!fdef_or.ok()) {
status->status = fdef_or.status();
return;
}
(*(fdef_or.value()->mutable_attr()))[string(attr_name)] = attr_value;
status->status = absl::OkStatus();
}
void TF_FunctionGetAttrValueProto(TF_Function* func, const char* attr_name,
TF_Buffer* output_attr_value,
TF_Status* status) {
const auto& it = func->record->fdef().attr().find(attr_name);
if (it == func->record->fdef().attr().end()) {
status->status =
InvalidArgument("Function '", func->record->fdef().signature().name(),
"' has no attr named '", attr_name, "'.");
return;
}
status->status = MessageToBuffer(it->second, output_attr_value);
}
void TF_DeleteFunction(TF_Function* func) {
if (func == nullptr) {
return;
}
func->record->Unref();
func->record = nullptr;
delete func;
} | #include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_internal.h"
#include "tensorflow/c/c_test_util.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
typedef std::pair<string, DataType> IOSpec;
const char* kFeedStackToString = "File \"feed.cc\", line 10, in alpha";
const char* kNegStackToString = "File \"neg.cc\", line 15, in beta";
std::vector<IOSpec> M(const std::initializer_list<string>& names) {
std::vector<IOSpec> v;
for (const string& name : names) {
v.push_back(IOSpec(name, DT_INVALID));
}
return v;
}
struct EdgeSpec : public std::pair<string, string> {
typedef std::pair<string, string> Base;
using Base::pair;
string ToString() const { return strings::StrCat(first, "->", second); }
};
class CApiFunctionTest : public ::testing::Test {
protected:
CApiFunctionTest()
: s_(TF_NewStatus()),
func_graph_(TF_NewGraph()),
host_graph_(TF_NewGraph()),
func_(nullptr) {}
void SetUp() override {}
~CApiFunctionTest() override {
TF_DeleteFunction(func_);
TF_DeleteGraph(host_graph_);
TF_DeleteGraph(func_graph_);
TF_DeleteStatus(s_);
}
void Run(const std::vector<std::pair<TF_Operation*, TF_Tensor*>>& inputs,
TF_Operation* output, int32_t expected_result) {
Run(inputs, {{output, 0}}, {expected_result});
}
void RunT(const std::vector<std::pair<TF_Operation*, TF_Tensor*>>& inputs,
std::initializer_list<TF_Output> outputs,
const std::vector<std::vector<int32_t>>& expected_results) {
CSession csession(host_graph_, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
csession.SetInputs(inputs);
csession.SetOutputs(outputs);
csession.Run(s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
for (int i = 0; i < expected_results.size(); ++i) {
TF_Tensor* out = csession.output_tensor(i);
ASSERT_TRUE(out != nullptr);
EXPECT_EQ(TF_INT32, TF_TensorType(out));
EXPECT_EQ(1, TF_NumDims(out));
CompareInt32Tensor(expected_results[i], out);
}
}
void Run(const std::vector<std::pair<TF_Operation*, TF_Tensor*>>& inputs,
std::initializer_list<TF_Output> outputs,
const std::vector<int32_t>& expected_results) {
CSession csession(host_graph_, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
csession.SetInputs(inputs);
csession.SetOutputs(outputs);
csession.Run(s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
for (int i = 0; i < expected_results.size(); ++i) {
TF_Tensor* out = csession.output_tensor(i);
ASSERT_TRUE(out != nullptr);
EXPECT_EQ(TF_INT32, TF_TensorType(out));
EXPECT_EQ(0, TF_NumDims(out));
ASSERT_EQ(sizeof(int32_t), TF_TensorByteSize(out));
int32_t* output_contents = static_cast<int32_t*>(TF_TensorData(out));
EXPECT_EQ(expected_results[i], *output_contents);
}
}
void CompareInt32Tensor(const std::vector<int32_t>& expected, TF_Tensor* t) {
int32_t* data = static_cast<int32_t*>(TF_TensorData(t));
size_t size = TF_TensorByteSize(t);
ASSERT_EQ(expected.size() * sizeof(int32_t), size);
for (int i = 0; i < expected.size(); ++i) {
ASSERT_EQ(expected[i], data[i]) << "Different data at index " << i;
}
}
std::vector<TF_Output> ToOutput(const std::vector<TF_Operation*> ops) {
std::vector<TF_Output> out;
for (auto op : ops) {
out.push_back({op, 0});
}
return out;
}
void Define(int num_opers, const std::vector<TF_Operation*>& opers,
const std::vector<TF_Operation*>& inputs,
const std::vector<TF_Operation*>& outputs,
const std::vector<string>& output_names,
bool expect_failure = false) {
DefineT(num_opers, opers, ToOutput(inputs), ToOutput(outputs), output_names,
expect_failure);
}
static const char** ToArray(const std::vector<string>& strs) {
const char** ptr = nullptr;
if (!strs.empty()) {
ptr = new const char*[strs.size()];
for (size_t i = 0; i < strs.size(); ++i) {
ptr[i] = strs[i].c_str();
}
}
return ptr;
}
void DefineT(int num_opers, const std::vector<TF_Operation*>& opers,
const std::vector<TF_Output>& inputs,
const std::vector<TF_Output>& outputs,
const std::vector<string>& output_names,
bool expect_failure = false) {
ASSERT_EQ(func_, nullptr);
const char** output_names_ptr = ToArray(output_names);
func_ = TF_GraphToFunction(func_graph_, func_name_, false, num_opers,
num_opers == -1 ? nullptr : opers.data(),
inputs.size(), inputs.data(), outputs.size(),
outputs.data(), output_names_ptr,
nullptr, nullptr, s_);
delete[] output_names_ptr;
if (expect_failure) {
ASSERT_EQ(func_, nullptr);
return;
}
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
ASSERT_NE(func_, nullptr);
ASSERT_EQ(std::string(func_name_), std::string(TF_FunctionName(func_)));
TF_GraphCopyFunction(host_graph_, func_, nullptr, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
}
TF_Operation* Use(const std::vector<TF_Operation*>& inputs) {
return UseT(ToOutput(inputs));
}
TF_Operation* UseT(const std::vector<TF_Output>& inputs) {
TF_Operation* op;
UseHelper(inputs, &op);
return op;
}
void UseHelper(const std::vector<TF_Output>& inputs, TF_Operation** op) {
TF_OperationDescription* desc =
TF_NewOperation(host_graph_, func_name_, func_node_name_);
for (auto input : inputs) {
TF_AddInput(desc, input);
}
TF_SetDevice(desc, "/cpu:0");
*op = TF_FinishOperation(desc, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
ASSERT_NE(*op, nullptr);
}
FunctionDef fdef() {
tensorflow::FunctionDef fdef;
EXPECT_TRUE(GetFunctionDef(func_, &fdef));
return fdef;
}
template <class Container>
string ToString(const Container& v) {
std::stringstream ss;
ss << "{";
size_t i = 0;
for (const auto& e : v) {
if (i != 0) {
ss << ", ";
}
ss << e.ToString();
++i;
}
ss << "}";
return ss.str();
}
void VerifyFDefNodes(const tensorflow::FunctionDef& fdef,
const std::unordered_set<string>& nodes) {
ASSERT_EQ(nodes.size(), fdef.node_def_size())
<< "Got unexpected number of nodes. Expected: ["
<< absl::StrJoin(nodes, ", ")
<< "] Actual nodes in fdef: " << fdef.DebugString();
for (const NodeDef& node_def : fdef.node_def()) {
ASSERT_TRUE(nodes.find(node_def.name()) != nodes.end())
<< "Got unexpected node: " << node_def.name()
<< " in fdef: " << fdef.DebugString();
}
}
void VerifyFDefInputs(const tensorflow::FunctionDef& fdef,
const std::vector<IOSpec>& inputs) {
const OpDef& signature = fdef.signature();
ASSERT_EQ(inputs.size(), signature.input_arg_size());
for (int i = 0; i < inputs.size(); ++i) {
const OpDef::ArgDef& arg = signature.input_arg(i);
const IOSpec& in = inputs[i];
if (in.second != DT_INVALID) {
ASSERT_EQ(arg.type(), in.second)
<< "Got unexpected type for input " << i
<< ". fdef: " << fdef.DebugString();
}
ASSERT_EQ(arg.name(), in.first) << "Got unexpected name for input " << i
<< ". fdef: " << fdef.DebugString();
}
}
void VerifyFDefOutputs(const tensorflow::FunctionDef& fdef,
const std::vector<IOSpec>& outputs) {
const OpDef& signature = fdef.signature();
ASSERT_EQ(outputs.size(), signature.output_arg_size());
for (int i = 0; i < outputs.size(); ++i) {
const OpDef::ArgDef& arg = signature.output_arg(i);
const IOSpec& out = outputs[i];
if (out.second != DT_INVALID) {
ASSERT_EQ(arg.type(), out.second)
<< "Got unexpected type for output " << i
<< ". fdef: " << fdef.DebugString();
}
ASSERT_EQ(arg.name(), out.first) << "Got unexpected name for output " << i
<< ". fdef: " << fdef.DebugString();
}
}
void VerifyFDefEdges(
const tensorflow::FunctionDef& fdef,
const std::vector<EdgeSpec>& e_edges,
const std::vector<EdgeSpec>& c_edges,
bool is_exact_edges = true) {
std::set<EdgeSpec> a_edges;
for (const NodeDef& node_def : fdef.node_def()) {
for (int i = 0; i < node_def.input_size(); ++i) {
const string& in = node_def.input(i);
const auto& v =
a_edges.insert({in, strings::StrCat(node_def.name(), ":", i)});
ASSERT_TRUE(v.second) << "Duplicate edge " << in << " -> "
<< strings::StrCat(node_def.name(), ":", i)
<< ". fdef: " << fdef.DebugString();
}
}
for (const OpDef::ArgDef& arg : fdef.signature().output_arg()) {
const auto& iter = fdef.ret().find(arg.name());
if (iter != fdef.ret().end()) {
const auto& v = a_edges.insert({iter->second, arg.name()});
ASSERT_TRUE(v.second) << "Duplicate edge " << iter->second << " -> "
<< arg.name() << ". fdef: " << fdef.DebugString();
} else {
const auto& v = a_edges.insert({arg.name(), arg.name()});
ASSERT_TRUE(v.second) << "Duplicate edge " << arg.name() << " -> "
<< arg.name() << ". fdef: " << fdef.DebugString();
}
}
for (const EdgeSpec& e : e_edges) {
ASSERT_TRUE(a_edges.find(e) != a_edges.end())
<< "Failed to find expected edge " << e.ToString()
<< " in fdef: " << fdef.DebugString();
}
for (const EdgeSpec& e : c_edges) {
ASSERT_TRUE(a_edges.find(e) != a_edges.end())
<< "Failed to find expected control edge " << e.ToString()
<< " in fdef: " << fdef.DebugString();
}
if (is_exact_edges) {
ASSERT_EQ(e_edges.size() + c_edges.size(), a_edges.size())
<< "Expected edges: " << ToString(e_edges)
<< " Expected Control edges: " << ToString(c_edges)
<< " Actual edges: " << ToString(a_edges)
<< " in fdef: " << fdef.DebugString();
}
}
void VerifyFDef(const std::unordered_set<string>& nodes,
const std::vector<IOSpec>& inputs,
const std::vector<IOSpec>& outputs,
const std::vector<EdgeSpec>& e_edges,
const std::vector<EdgeSpec>& c_edges,
bool is_exact_edges = true) {
tensorflow::FunctionDef fdef;
ASSERT_TRUE(GetFunctionDef(func_, &fdef));
VerifyFDefNodes(fdef, nodes);
VerifyFDefInputs(fdef, inputs);
VerifyFDefOutputs(fdef, outputs);
VerifyFDefEdges(fdef, e_edges, c_edges, is_exact_edges);
}
void Reincarnate() {
tensorflow::FunctionDef fdef;
ASSERT_TRUE(GetFunctionDef(func_, &fdef));
TF_DeleteFunction(func_);
string buf;
ASSERT_TRUE(fdef.SerializeToString(&buf));
func_ = TF_FunctionImportFunctionDef(buf.data(), buf.size(), s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
}
void GetAttr(const char* attr_name, AttrValue* out_attr) {
TF_Buffer* attr_buf = TF_NewBuffer();
TF_FunctionGetAttrValueProto(func_, attr_name, attr_buf, s_);
ASSERT_TRUE(out_attr->ParseFromArray(attr_buf->data, attr_buf->length));
TF_DeleteBuffer(attr_buf);
}
const char* func_name_ = "MyFunc";
const char* func_node_name_ = "MyFunc_0";
TF_Status* s_;
TF_Graph* func_graph_;
TF_Graph* host_graph_;
TF_Function* func_;
std::unordered_set<string> empty_;
};
TEST_F(CApiFunctionTest, OneOp_ZeroInputs_OneOutput) {
TF_Operation* c = ScalarConst(10, func_graph_, s_, "scalar10");
Define(-1, {}, {}, {c}, {});
TF_Operation* func_op = Use({});
Run({}, func_op, 10);
VerifyFDef({"scalar10_0"}, {}, {{"scalar10", DT_INT32}},
{{"scalar10_0:output:0", "scalar10"}}, {});
}
TEST_F(CApiFunctionTest, OneOp_OneInput_OneOutput) {
TF_Operation* feed = Placeholder(func_graph_, s_);
TF_Operation* neg = Neg(feed, func_graph_, s_);
Define(-1, {}, {feed}, {neg}, {});
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, -3);
VerifyFDef({"neg_0"}, {{"feed", DT_INT32}}, {{"neg", DT_INT32}},
{{"feed", "neg_0:0"}, {"neg_0:y:0", "neg"}}, {});
}
TEST_F(CApiFunctionTest, OneOutput_OutputNames) {
TF_Operation* feed = Placeholder(func_graph_, s_);
TF_Operation* neg = Neg(feed, func_graph_, s_);
Define(-1, {}, {feed}, {neg}, {"negated_num"});
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, -3);
VerifyFDef({"neg"}, {{"feed", DT_INT32}}, {{"negated_num", DT_INT32}},
{{"feed", "neg:0"}, {"neg:y:0", "negated_num"}}, {});
}
TEST_F(CApiFunctionTest, OutputNames_SameNameAsInput) {
TF_Operation* feed = Placeholder(func_graph_, s_, "negation");
TF_Operation* neg = Neg(feed, func_graph_, s_, "neg");
Define(-1, {}, {feed}, {neg}, {"negation"});
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, -3);
VerifyFDef({"neg"}, {{"negation_0", DT_INT32}}, {{"negation", DT_INT32}},
{{"negation_0", "neg:0"}, {"neg:y:0", "negation"}}, {});
}
TEST_F(CApiFunctionTest, ZeroOps_Identity) {
TF_Operation* feed = Placeholder(func_graph_, s_);
Define(-1, {}, {feed}, {feed}, {});
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, 3);
VerifyFDef(empty_, {{"feed_0", DT_INT32}}, {{"feed", DT_INT32}},
{{"feed_0", "feed"}}, {});
}
TEST_F(CApiFunctionTest, ZeroOps_Permutation) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
Define(-1, {}, {feed1, feed2}, {feed2, feed1}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_);
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, {{func_op, 0}, {func_op, 1}}, {3, 2});
VerifyFDef(empty_, M({{"feed1_0"}, {"feed2_0"}}), M({{"feed2"}, {"feed1"}}),
{{"feed1_0", "feed1"}, {"feed2_0", "feed2"}}, {});
}
TEST_F(CApiFunctionTest, ZeroOps_Permutation_OutputNames) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
Define(-1, {}, {feed1, feed2}, {feed2, feed1}, {"first", "second"});
TF_Operation* two = ScalarConst(2, host_graph_, s_);
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, {{func_op, 0}, {func_op, 1}}, {3, 2});
VerifyFDef(empty_, M({{"feed1"}, {"feed2"}}), M({{"first"}, {"second"}}),
{{"feed1", "second"}, {"feed2", "first"}}, {});
}
TEST_F(CApiFunctionTest, OneOp_TwoInputs_OneOutput) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* add = Add(feed1, feed2, func_graph_, s_);
Define(-1, {}, {feed1, feed2}, {add}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_);
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, 2 + 3);
VerifyFDef(
{"add_0"}, M({{"feed1"}, {"feed2"}}), M({{"add"}}),
{{"feed1", "add_0:0"}, {"feed2", "add_0:1"}, {"add_0:sum:0", "add"}}, {});
}
TEST_F(CApiFunctionTest, OneOp_TwoInputs_ZeroOutputs) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
Add(feed1, feed2, func_graph_, s_);
Define(-1, {}, {feed1, feed2}, {}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_);
TF_Operation* func_feed = Placeholder(host_graph_, s_);
Use({two, func_feed});
VerifyFDef({"add"}, M({{"feed1"}, {"feed2"}}), {},
{{"feed1", "add:0"}, {"feed2", "add:1"}}, {});
}
TEST_F(CApiFunctionTest, TwoOps_ThreeInputs_OneOutput) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* feed3 = Placeholder(func_graph_, s_, "feed3");
TF_Operation* add1 = Add(feed1, feed2, func_graph_, s_, "add1");
TF_Operation* add2 = Add(add1, feed3, func_graph_, s_, "add2");
Define(-1, {}, {feed1, feed2, feed3}, {add2}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_, "two");
TF_Operation* ten = ScalarConst(10, host_graph_, s_, "ten");
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, ten, func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, 2 + 10 + 3);
VerifyFDef({"add1", "add2_0"}, M({{"feed1"}, {"feed2"}, {"feed3"}}),
M({{"add2"}}),
{{"feed1", "add1:0"},
{"feed2", "add1:1"},
{"add1:sum:0", "add2_0:0"},
{"feed3", "add2_0:1"},
{"add2_0:sum:0", "add2"}},
{});
}
TEST_F(CApiFunctionTest, OneOp_TwoInputs_TwoDuplicateOutputs) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* add = Add(feed1, feed2, func_graph_, s_);
Define(-1, {}, {feed1, feed2}, {add, add}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_);
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, {{func_op, 0}, {func_op, 1}}, {5, 5});
VerifyFDef({"add_1"}, M({{"feed1"}, {"feed2"}}), M({{"add"}, {"add_0"}}),
{{"feed1", "add_1:0"},
{"feed2", "add_1:1"},
{"add_1:sum:0", "add"},
{"add_1:sum:0", "add_0"}},
{});
}
TEST_F(CApiFunctionTest, TwoDuplicateOutputs_OutputNames) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* add = Add(feed1, feed2, func_graph_, s_);
Define(-1, {}, {feed1, feed2}, {add, add}, {"out1", "out2"});
TF_Operation* two = ScalarConst(2, host_graph_, s_);
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, {{func_op, 0}, {func_op, 1}}, {5, 5});
VerifyFDef({"add"}, M({{"feed1"}, {"feed2"}}), M({{"out1"}, {"out2"}}),
{{"feed1", "add:0"},
{"feed2", "add:1"},
{"add:sum:0", "out1"},
{"add:sum:0", "out2"}},
{});
}
TEST_F(CApiFunctionTest, TwoOps_ThreeInputs_TwoOutputs) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* feed3 = Placeholder(func_graph_, s_, "feed3");
TF_Operation* add1 = Add(feed1, feed2, func_graph_, s_, "add1");
TF_Operation* add2 = Add(add1, feed3, func_graph_, s_, "add2");
Define(-1, {}, {feed1, feed2, feed3}, {add1, add2}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_, "two");
TF_Operation* ten = ScalarConst(10, host_graph_, s_, "ten");
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, ten, func_feed});
Run({{func_feed, Int32Tensor(3)}}, {{func_op, 0}, {func_op, 1}}, {12, 15});
VerifyFDef({"add1_0", "add2_0"}, M({{"feed1"}, {"feed2"}, {"feed3"}}),
M({{"add1"}, {"add2"}}),
{{"feed1", "add1_0:0"},
{"feed2", "add1_0:1"},
{"add1_0:sum:0", "add2_0:0"},
{"feed3", "add2_0:1"},
{"add1_0:sum:0", "add1"},
{"add2_0:sum:0", "add2"}},
{});
}
TEST_F(CApiFunctionTest, FromSubsetOfOps) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* feed3 = Placeholder(func_graph_, s_, "feed3");
TF_Operation* add1 = Add(feed1, feed2, func_graph_, s_, "add1");
TF_Operation* add2 = Add(add1, feed3, func_graph_, s_, "add2");
Define(1, {add2}, {add1, feed3}, {add2}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_, "two");
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, 2 + 3);
VerifyFDef(
{"add2_0"}, M({{"add1"}, {"feed3"}}), M({{"add2"}}),
{{"add1", "add2_0:0"}, {"feed3", "add2_0:1"}, {"add2_0:sum:0", "add2"}},
{});
}
TEST_F(CApiFunctionTest, UsingOneOutputOfSplit) {
TF_Operation* feed = Placeholder(func_graph_, s_);
TF_Operation* split = Split3(feed, func_graph_, s_);
DefineT(-1, {}, {{feed, 0}}, {{split, 1}}, {});
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({func_feed});
RunT({{func_feed, Int32Tensor({1, 2, 3, 4, 5, 6})}}, {{func_op, 0}},
{{3, 4}});
VerifyFDef({"split3_const0", "split3_0"}, M({{"feed"}}), M({{"split3"}}),
{{"split3_const0:output:0", "split3_0:0"},
{"feed", "split3_0:1"},
{"split3_0:output:1", "split3"}},
{});
}
TEST_F(CApiFunctionTest, UsingTwoOutputsOfSplit) {
TF_Operation* feed = Placeholder(func_graph_, s_);
TF_Operation* split = Split3(feed, func_graph_, s_);
DefineT(-1, {}, {{feed, 0}}, {{split, 0}, {split, 2}}, {});
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({func_feed});
RunT({{func_feed, Int32Tensor({1, 2, 3, 4, 5, 6})}},
{{func_op, 0}, {func_op, 1}}, {{1, 2}, {5, 6}});
VerifyFDef({"split3_const0", "split3_1"}, M({{"feed"}}),
M({{"split3"}, {"split3_0"}}),
{{"split3_const0:output:0", "split3_1:0"},
{"feed", "split3_1:1"},
{"split3_1:output:0", "split3"},
{"split3_1:output:2", "split3_0"}},
{});
}
TEST_F(CApiFunctionTest, UsingTwoOutputsOfSplitAsInputs) {
TF_Operation* feed = Placeholder(func_graph_, s_);
TF_Operation* split = Split3(feed, func_graph_, s_);
TF_Operation* add = Add({split, 0}, {split, 2}, func_graph_, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
DefineT(1, {add}, {{split, 0}, {split, 2}}, {{add, 0}}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_, "two");
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, 2 + 3);
VerifyFDef(
{"add_0"}, M({{"split3"}, {"split3_0"}}), M({{"add"}}),
{{"split3", "add_0:0"}, {"split3_0", "add_0:1"}, {"add_0:sum:0", "add"}},
{});
}
TEST_F(CApiFunctionTest, NodesUsedInInputsMustHaveSingleOutput) {
TF_Tensor* tensor_123 = Int32Tensor({1, 2, 3});
TF_Operation* c = Const(tensor_123, func_graph_, s_, "const_array");
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Operation* split = Split3(c, func_graph_, s_);
TF_Operation* add = Add({split, 0}, {split, 2}, func_graph_, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
DefineT(-1, {}, {{split, 0}, {split, 2}}, {{add, 0}}, {}, true);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("When `num_opers` is set to -1, nodes referenced in "
"`inputs` must have a single output. Node split3 has "
"3 outputs. Encountered while creating function 'MyFunc'"),
string(TF_Message(s_)));
TF_DeleteTensor(tensor_123);
}
TEST_F(CApiFunctionTest, FunctionWithWhileLoop) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
std::vector<TF_Output> outputs;
{
std::vector<TF_Output> inputs = {{feed1, 0}, {feed2, 0}};
std::unique_ptr<TF_WhileParams> params(new TF_WhileParams(
TF_NewWhile(func_graph_, &inputs[0], inputs.size(), s_)));
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
params->name = "test_loop";
outputs.resize(2, {nullptr, -1});
TF_Operation* less_than = LessThan(
params->cond_inputs[0], params->cond_inputs[1], params->cond_graph, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
params->cond_output = {less_than, 0};
TF_Operation* add1 = Add(params->body_inputs[0], params->body_inputs[1],
params->body_graph, s_, "add1");
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Operation* one = ScalarConst(1, params->body_graph, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Operation* add2 = Add(add1, one, params->body_graph, s_, "add2");
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
params->body_outputs[0] = {add2, 0};
params->body_outputs[1] = params->body_inputs[1];
TF_FinishWhile(params.get(), s_, &outputs[0]);
EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
}
DefineT(-1, {}, {{feed1, 0}, {feed2, 0}}, {outputs[0]}, {});
TF_Operation* five = ScalarConst(5, host_graph_, s_, "five");
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({func_feed, five});
Run({{func_feed, Int32Tensor(2)}}, func_op, 2 + 5 + 1);
tensorflow::FunctionDef fdef;
ASSERT_TRUE(GetFunctionDef(func_, &fdef));
VerifyFDefInputs(fdef, M({{"feed1"}, {"feed2"}}));
VerifyFDefOutputs(fdef, M({{"test_loop_exit"}}));
VerifyFDefEdges(fdef,
{{"feed1", "test_loop/Enter:0"},
{"test_loop/Enter:output:0", "test_loop/Merge:0"},
{"test_loop/Merge:output:0", "test_loop/Switch:0"},
{"test_loop/Switch:output_false:0", "test_loop/Exit:0"},
{"test_loop/Exit:output:0", "test_loop_exit"}},
{}, false);
}
TEST_F(CApiFunctionTest, ControlDependency) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* five = ScalarConst(5, func_graph_, s_);
TF_Operation* add =
AddWithCtrlDependency(feed1, feed2, func_graph_, five, s_);
EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
Define(-1, {}, {feed1, feed2}, {add}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_);
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, 2 + 3);
VerifyFDef(
{"add_0", "scalar"}, M({{"feed1"}, {"feed2"}}), M({{"add"}}),
{{"feed1", "add_0:0"}, {"feed2", "add_0:1"}, {"add_0:sum:0", "add"}},
{{"^scalar", "add_0:2"}});
}
TEST_F(CApiFunctionTest, ControlDependencyOutsideOfBody) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* five = ScalarConst(5, func_graph_, s_);
TF_Operation* add =
AddWithCtrlDependency(feed1, feed2, func_graph_, five, s_);
EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
Define(1, {add}, {feed1, feed2}, {add}, {}, true);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("The source of control edge [id=3 scalar:-1 -> add:-1] "
"is not in the body. Encountered while creating "
"function 'MyFunc'"),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, ControlDependencyOutsideOfBody_FromInputNode) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* add =
AddWithCtrlDependency(feed1, feed2, func_graph_, feed1, s_);
EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
Define(-1, {}, {feed1, feed2}, {add}, {});
TF_Operation* two = ScalarConst(2, host_graph_, s_);
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, 2 + 3);
VerifyFDef(
{"add_0"}, M({{"feed1"}, {"feed2"}}), M({{"add"}}),
{{"feed1", "add_0:0"}, {"feed2", "add_0:1"}, {"add_0:sum:0", "add"}},
{{"^feed1", "add_0:2"}});
}
TEST_F(CApiFunctionTest, DuplicateInputsAreNotAllowed) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* add = Add(feed1, feed1, func_graph_, s_);
Define(-1, {}, {feed1, feed1}, {add}, {}, true);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(
string("TF_Output feed1:0 appears more than once in the input list"),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, DuplicateOutputNamesAreNotAllowed) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* feed3 = Placeholder(func_graph_, s_, "feed3");
TF_Operation* add1 = Add(feed1, feed2, func_graph_, s_, "add1");
TF_Operation* add2 = Add(add1, feed3, func_graph_, s_, "add2");
Define(-1, {}, {feed1, feed2, feed3}, {add1, add2}, {"my_out", "my_out"},
true);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("Cannot have duplicate output names. Name 'my_out' "
"appears more than once in 'output_names' array."),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, InvalidInputTensor_HighIndex) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* add = Add(feed1, feed2, func_graph_, s_);
DefineT(-1, {}, {{feed1, 0}, {feed2, 2}}, {{add, 0}}, {}, true);
EXPECT_EQ(TF_OUT_OF_RANGE, TF_GetCode(s_));
EXPECT_EQ(string("Node 'feed2' (type: 'Placeholder', num of outputs: 1) does "
"not have output 2\n\tEncountered while processing "
"input 1 into function 'MyFunc'"),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, InvalidInputTensor_BadNodePtr) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* add = Add(feed1, feed2, func_graph_, s_);
DefineT(-1, {}, {{feed1, 0}, {nullptr, 0}}, {{add, 0}}, {}, true);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("Node is null\n\tEncountered while processing input 1 "
"into function 'MyFunc'"),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, InvalidOutputTensor_HighIndex) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* add = Add(feed1, feed2, func_graph_, s_);
DefineT(-1, {}, {{feed1, 0}, {feed2, 0}}, {{add, 3}}, {}, true);
EXPECT_EQ(TF_OUT_OF_RANGE, TF_GetCode(s_));
EXPECT_EQ(string("Node 'add' (type: 'AddN', num of outputs: 1) does "
"not have output 3\n\tEncountered while processing "
"output 0 from function 'MyFunc'"),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, InvalidOutputTensor_BadNodePtr) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
Add(feed1, feed2, func_graph_, s_);
DefineT(-1, {}, {{feed1, 0}, {feed2, 0}}, {{nullptr, 3}}, {}, true);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("Node is null\n\tEncountered while processing output 0 "
"from function 'MyFunc'"),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, NodeMissingInput) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* add = Add(feed1, feed2, func_graph_, s_);
DefineT(1, {add}, {{feed1, 0}}, {{add, 0}}, {}, true);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("Input 1, 'feed2:0', of node 'add' in function 'MyFunc' "
"is not available. You might need to include it in inputs "
"or include its source node in the body"),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, OutputOpNotInBody) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* scalar = ScalarConst(2, func_graph_, s_);
TF_Operation* add = Add(feed1, feed2, func_graph_, s_);
Define(1, {add}, {feed1, feed2}, {add, scalar}, {}, true);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("TF_Output scalar:0 is neither in the function body nor "
"among function inputs. Encountered while creating "
"function 'MyFunc'"),
string(TF_Message(s_)));
}
void DefineFunction(const char* name, TF_Function** func,
const char* description = nullptr,
bool append_hash = false) {
std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)> func_graph(
TF_NewGraph(), TF_DeleteGraph);
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> s(TF_NewStatus(),
TF_DeleteStatus);
TF_Operation* feed = Placeholder(func_graph.get(), s.get());
TF_Operation* neg = Neg(feed, func_graph.get(), s.get());
std::vector<StackFrame> feed_frames = {{"feed.cc", 10, "alpha"}};
std::vector<StackFrame> neg_frames = {{"neg.cc", 15, "beta"}};
feed->node.SetStackTrace(std::make_shared<FrozenStackTrace>(feed_frames));
neg->node.SetStackTrace(std::make_shared<FrozenStackTrace>(neg_frames));
TF_Output inputs[] = {{feed, 0}};
TF_Output outputs[] = {{neg, 0}};
*func = TF_GraphToFunction(func_graph.get(), name, append_hash, -1,
nullptr, 1, inputs, 1, outputs,
nullptr,
nullptr, description, s.get());
ASSERT_EQ(TF_OK, TF_GetCode(s.get())) << TF_Message(s.get());
ASSERT_NE(*func, nullptr);
}
REGISTER_OP("CustomOp")
.Output("output: float32")
.Attr("index: int")
.SetShapeFn(tensorflow::shape_inference::UnknownShape);
void NodeWithPlaceholderAttrHelper(TF_Graph* graph, TF_Status* s,
const char* name, const char* placeholder,
TF_Operation** op) {
TF_OperationDescription* desc = TF_NewOperation(graph, "CustomOp", name);
TF_SetAttrPlaceholder(desc, "index", placeholder);
*op = TF_FinishOperation(desc, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
ASSERT_NE(*op, nullptr);
}
TEST_F(CApiFunctionTest, GraphToFunctionDefWithPlaceholderAttr) {
std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)> func_graph(
TF_NewGraph(), TF_DeleteGraph);
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> s(TF_NewStatus(),
TF_DeleteStatus);
TF_Operation *node1, *node2, *node3;
NodeWithPlaceholderAttrHelper(func_graph.get(), s.get(), "node1", "v1",
&node1);
NodeWithPlaceholderAttrHelper(func_graph.get(), s.get(), "node2", "v1",
&node2);
NodeWithPlaceholderAttrHelper(func_graph.get(), s.get(), "node3", "v2",
&node3);
TF_Output outputs[] = {{node1, 0}, {node2, 0}, {node3, 0}};
func_ = TF_GraphToFunction(
func_graph.get(), "func", false, -1,
nullptr, 0, nullptr, 3, outputs,
nullptr,
nullptr, nullptr, s.get());
ASSERT_EQ(TF_OK, TF_GetCode(s.get())) << TF_Message(s.get());
ASSERT_NE(func_, nullptr);
ASSERT_EQ(func_->record->fdef().signature().attr().size(), 2);
EXPECT_EQ(func_->record->fdef().signature().attr(0).name(), "v1");
EXPECT_EQ(func_->record->fdef().signature().attr(0).type(), "int");
EXPECT_EQ(func_->record->fdef().signature().attr(1).name(), "v2");
EXPECT_EQ(func_->record->fdef().signature().attr(1).type(), "int");
}
void NodeWithAttrHelper(TF_Graph* graph, TF_Status* s, const char* name,
const char* attr_name, const char* attr_value,
TF_Operation** op) {
TF_OperationDescription* desc = TF_NewOperation(graph, "Placeholder", name);
TF_SetAttrType(desc, "dtype", TF_INT32);
TF_SetAttrString(desc, attr_name, attr_value, strlen(attr_value));
*op = TF_FinishOperation(desc, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
ASSERT_NE(*op, nullptr);
}
TEST_F(CApiFunctionTest, GraphToFunctionDefWithArgAttr) {
std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)> func_graph(
TF_NewGraph(), TF_DeleteGraph);
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> s(TF_NewStatus(),
TF_DeleteStatus);
TF_Operation* node;
NodeWithAttrHelper(func_graph.get(), s.get(), "node", "_test_attr", "value",
&node);
TF_Output inputs[] = {{node, 0}};
func_ = TF_GraphToFunction(
func_graph.get(), "func", false, -1,
nullptr, 1, inputs, 0, nullptr,
nullptr,
nullptr, nullptr, s.get());
ASSERT_EQ(TF_OK, TF_GetCode(s.get())) << TF_Message(s.get());
ASSERT_NE(func_, nullptr);
ASSERT_EQ(func_->record->fdef().arg_attr_size(), 1);
auto arg_attrs = func_->record->fdef().arg_attr().find(0);
ASSERT_NE(arg_attrs, func_->record->fdef().arg_attr().end());
auto iter = arg_attrs->second.attr().find("_test_attr");
ASSERT_NE(iter, arg_attrs->second.attr().end());
EXPECT_EQ(iter->second.s(), "value");
}
TEST_F(CApiFunctionTest, TFGraphToFunctionWithStackTraces) {
DefineFunction(func_name_, &func_);
auto stack_traces = func_->record->stack_traces();
EXPECT_EQ(stack_traces.size(), 4);
EXPECT_EQ(stack_traces["neg"]->ToString({}), kNegStackToString);
EXPECT_EQ(stack_traces["feed"]->ToString({}), kFeedStackToString);
}
TEST_F(CApiFunctionTest, TFGraphCopyFunctionWithStackTraces) {
DefineFunction(func_name_, &func_);
TF_Function* grad_func;
DefineFunction("MyGrad", &grad_func);
TF_GraphCopyFunction(host_graph_, func_, grad_func, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_DeleteFunction(grad_func);
const StackTracesMap* func_stack_traces;
const StackTracesMap* grad_stack_traces;
{
mutex_lock l(host_graph_->mu);
auto flib_def = host_graph_->graph.flib_def();
func_stack_traces = flib_def.GetStackTraces(func_name_);
grad_stack_traces = flib_def.GetStackTraces("MyGrad");
}
ASSERT_NE(func_stack_traces, nullptr);
EXPECT_EQ(func_stack_traces->size(), 4);
EXPECT_EQ(func_stack_traces->at("neg")->ToString({}), kNegStackToString);
EXPECT_EQ(func_stack_traces->at("feed")->ToString({}), kFeedStackToString);
ASSERT_NE(grad_stack_traces, nullptr);
EXPECT_EQ(grad_stack_traces->size(), 4);
EXPECT_EQ(grad_stack_traces->at("neg")->ToString({}), kNegStackToString);
EXPECT_EQ(grad_stack_traces->at("feed")->ToString({}), kFeedStackToString);
}
TEST_F(CApiFunctionTest, SetGradientAndRun) {
DefineFunction(func_name_, &func_);
TF_Function* grad_func;
DefineFunction("MyGrad", &grad_func);
TF_GraphCopyFunction(host_graph_, func_, grad_func, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
GraphDef gdef;
GetGraphDef(host_graph_, &gdef);
std::vector<string> func_names = GetFuncNames(gdef);
ASSERT_EQ(2, func_names.size());
ASSERT_EQ(func_name_, func_names[0]);
ASSERT_EQ("MyGrad", func_names[1]);
std::vector<std::pair<string, string>> grads = GetGradDefs(gdef);
ASSERT_EQ(1, grads.size());
ASSERT_EQ(func_name_, grads[0].first);
ASSERT_EQ("MyGrad", grads[0].second);
TF_GraphCopyFunction(host_graph_, func_, grad_func, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_GraphCopyFunction(host_graph_, func_, nullptr, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_DeleteFunction(grad_func);
GraphDef gdef2;
GetGraphDef(host_graph_, &gdef2);
ASSERT_EQ(gdef.DebugString(), gdef2.DebugString());
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({func_feed});
Run({{func_feed, Int32Tensor(3)}}, func_op, -3);
}
TEST_F(CApiFunctionTest, SameGradForTwoFunctions) {
TF_Function* func1;
TF_Function* func2;
TF_Function* grad_func;
DefineFunction("FooFunc1", &func1);
DefineFunction("FooFunc2", &func2);
DefineFunction("MyGrad", &grad_func);
TF_GraphCopyFunction(host_graph_, func1, grad_func, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_GraphCopyFunction(host_graph_, func2, grad_func, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
GraphDef gdef;
GetGraphDef(host_graph_, &gdef);
std::vector<std::pair<string, string>> grads = GetGradDefs(gdef);
ASSERT_EQ(2, grads.size());
ASSERT_EQ("FooFunc1", grads[0].first);
ASSERT_EQ("MyGrad", grads[0].second);
ASSERT_EQ("FooFunc2", grads[1].first);
ASSERT_EQ("MyGrad", grads[1].second);
TF_DeleteFunction(func1);
TF_DeleteFunction(func2);
TF_DeleteFunction(grad_func);
}
TEST_F(CApiFunctionTest, AddFunctionsThenMakeOneGradientOfAnother) {
TF_Function* func;
TF_Function* grad_func;
DefineFunction("FooFunc", &func);
DefineFunction("MyGrad", &grad_func);
TF_GraphCopyFunction(host_graph_, func, nullptr, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_GraphCopyFunction(host_graph_, grad_func, nullptr, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
GraphDef gdef;
GetGraphDef(host_graph_, &gdef);
std::vector<string> func_names = GetFuncNames(gdef);
ASSERT_EQ(2, func_names.size());
ASSERT_EQ("FooFunc", func_names[0]);
ASSERT_EQ("MyGrad", func_names[1]);
ASSERT_EQ(0, GetGradDefs(gdef).size());
TF_GraphCopyFunction(host_graph_, func, grad_func, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
gdef.Clear();
GetGraphDef(host_graph_, &gdef);
std::vector<std::pair<string, string>> grads = GetGradDefs(gdef);
ASSERT_EQ(1, grads.size());
ASSERT_EQ("FooFunc", grads[0].first);
ASSERT_EQ("MyGrad", grads[0].second);
TF_DeleteFunction(func);
TF_DeleteFunction(grad_func);
}
TEST_F(CApiFunctionTest, GradientErrorCases) {
DefineFunction(func_name_, &func_);
TF_Function* grad_func1;
TF_Function* grad_func2;
DefineFunction("MyGrad1", &grad_func1);
DefineFunction("MyGrad2", &grad_func2);
TF_GraphCopyFunction(host_graph_, nullptr, func_, s_);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("'func' argument to TF_GraphCopyFunction cannot be null"),
string(TF_Message(s_)));
TF_GraphCopyFunction(host_graph_, func_, grad_func1, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_GraphCopyFunction(host_graph_, func_, grad_func2, s_);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("Cannot assign gradient function 'MyGrad2' to 'MyFunc' "
"because it already has gradient function 'MyGrad1'"),
string(TF_Message(s_)));
TF_DeleteFunction(grad_func1);
TF_DeleteFunction(grad_func2);
}
TEST_F(CApiFunctionTest, ImportFunctionDef) {
TF_Operation* feed1 = Placeholder(func_graph_, s_, "feed1");
TF_Operation* feed2 = Placeholder(func_graph_, s_, "feed2");
TF_Operation* feed3 = Placeholder(func_graph_, s_, "feed3");
TF_Operation* add1 = Add(feed1, feed2, func_graph_, s_, "add1");
TF_Operation* add2 = Add(add1, feed3, func_graph_, s_, "add2");
Define(-1, {}, {feed1, feed2, feed3}, {add1, add2},
{"internal_out", "final_out"});
Reincarnate();
TF_Operation* two = ScalarConst(2, host_graph_, s_, "two");
TF_Operation* ten = ScalarConst(10, host_graph_, s_, "ten");
TF_Operation* func_feed = Placeholder(host_graph_, s_);
TF_Operation* func_op = Use({two, ten, func_feed});
Run({{func_feed, Int32Tensor(3)}}, {{func_op, 0}, {func_op, 1}}, {12, 15});
VerifyFDef({"add1", "add2"}, M({{"feed1"}, {"feed2"}, {"feed3"}}),
M({{"internal_out"}, {"final_out"}}),
{{"feed1", "add1:0"},
{"feed2", "add1:1"},
{"add1:sum:0", "add2:0"},
{"feed3", "add2:1"},
{"add1:sum:0", "internal_out"},
{"add2:sum:0", "final_out"}},
{});
}
TEST_F(CApiFunctionTest, ImportFunctionDef_InvalidProto) {
char proto[] = {0x0, 0x0, 0x0, 0x0};
func_ = TF_FunctionImportFunctionDef(proto, 4, s_);
EXPECT_TRUE(func_ == nullptr);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("Invalid FunctionDef given to TF_FunctionImportFunctionDef"),
string(TF_Message(s_)));
}
TEST_F(CApiFunctionTest, Attribute) {
DefineFunction(func_name_, &func_);
TF_Buffer* attr_buf = TF_NewBuffer();
TF_FunctionGetAttrValueProto(func_, "foo_attr", attr_buf, s_);
EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_));
EXPECT_EQ(string("Function 'MyFunc' has no attr named 'foo_attr'."),
string(TF_Message(s_)));
TF_DeleteBuffer(attr_buf);
tensorflow::AttrValue attr;
attr.set_s("test_attr_value");
string bytes;
attr.SerializeToString(&bytes);
TF_FunctionSetAttrValueProto(func_, "test_attr_name", bytes.data(),
bytes.size(), s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
AttrValue read_attr;
GetAttr("test_attr_name", &read_attr);
ASSERT_EQ(attr.DebugString(), read_attr.DebugString());
Reincarnate();
AttrValue read_attr2;
GetAttr("test_attr_name", &read_attr2);
ASSERT_EQ(attr.DebugString(), read_attr2.DebugString());
}
TEST_F(CApiFunctionTest, Description) {
DefineFunction(func_name_, &func_, "Return something");
tensorflow::FunctionDef fdef;
ASSERT_TRUE(GetFunctionDef(func_, &fdef));
ASSERT_EQ(string("Return something"), fdef.signature().description());
}
TEST_F(CApiFunctionTest, Name) {
DefineFunction("long_func_name", &func_, "Return something",
false);
tensorflow::FunctionDef fdef;
ASSERT_TRUE(GetFunctionDef(func_, &fdef));
ASSERT_EQ(string("long_func_name"), fdef.signature().name());
}
TEST_F(CApiFunctionTest, AppendHash) {
DefineFunction("func_name_base", &func_, "Return something",
true);
tensorflow::FunctionDef fdef;
ASSERT_TRUE(GetFunctionDef(func_, &fdef));
#if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
ASSERT_EQ(string("func_name_base_ZpgUD4x8oqk"), fdef.signature().name());
#else
ASSERT_EQ(string("func_name_base_qaJ8jA8UmGY"), fdef.signature().name());
#endif
}
TEST_F(CApiFunctionTest, GetOpDef) {
DefineFunction(func_name_, &func_);
TF_GraphCopyFunction(host_graph_, func_, nullptr, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Buffer* buffer = TF_NewBuffer();
TF_GraphGetOpDef(host_graph_, func_name_, buffer, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
string data(static_cast<const char*>(buffer->data), buffer->length);
OpDef op_def;
op_def.ParseFromString(data);
EXPECT_EQ(op_def.name(), func_name_);
EXPECT_EQ(op_def.input_arg_size(), 1);
EXPECT_EQ(op_def.output_arg_size(), 1);
EXPECT_FALSE(op_def.is_stateful());
TF_DeleteBuffer(buffer);
}
void DefineStatefulFunction(const char* name, TF_Function** func) {
std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)> func_graph(
TF_NewGraph(), TF_DeleteGraph);
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> s(TF_NewStatus(),
TF_DeleteStatus);
TF_Tensor* tensor_shape = Int32Tensor({37, 1});
TF_Operation* shape = Const(tensor_shape, func_graph.get(), s.get(), "shape");
TF_Operation* random =
RandomUniform(shape, TF_FLOAT, func_graph.get(), s.get());
TF_Output outputs[] = {{random, 0}};
*func = TF_GraphToFunction(func_graph.get(), name,
false, -1,
nullptr, 0, nullptr, 1, outputs,
nullptr,
nullptr, "", s.get());
ASSERT_EQ(TF_OK, TF_GetCode(s.get())) << TF_Message(s.get());
ASSERT_NE(*func, nullptr);
TF_DeleteTensor(tensor_shape);
}
TEST_F(CApiFunctionTest, StatefulOpDef) {
DefineStatefulFunction(func_name_, &func_);
TF_GraphCopyFunction(host_graph_, func_, nullptr, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Buffer* buffer = TF_NewBuffer();
TF_GraphGetOpDef(host_graph_, func_name_, buffer, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
string data(static_cast<const char*>(buffer->data), buffer->length);
OpDef op_def;
op_def.ParseFromString(data);
EXPECT_EQ(op_def.name(), func_name_);
EXPECT_EQ(op_def.input_arg_size(), 0);
EXPECT_EQ(op_def.output_arg_size(), 1);
EXPECT_TRUE(op_def.is_stateful());
TF_DeleteBuffer(buffer);
}
void AssertEqual(TF_Function* f1, TF_Function* f2) {
string s1, s2;
tensorflow::FunctionDef fdef1, fdef2;
ASSERT_TRUE(GetFunctionDef(f1, &fdef1));
ASSERT_TRUE(GetFunctionDef(f2, &fdef2));
SerializeToStringDeterministic(fdef1, &s1);
SerializeToStringDeterministic(fdef2, &s2);
ASSERT_EQ(s1, s2);
}
string GetName(TF_Function* func) {
tensorflow::FunctionDef fdef;
GetFunctionDef(func, &fdef);
return fdef.signature().name();
}
TEST_F(CApiFunctionTest, GetFunctionsFromGraph) {
TF_Function* funcs[2];
EXPECT_EQ(TF_GraphNumFunctions(host_graph_), 0);
TF_GraphGetFunctions(host_graph_, nullptr, 0, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_Function* func0;
DefineFunction("FooFunc0", &func0);
TF_GraphCopyFunction(host_graph_, func0, nullptr, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
EXPECT_EQ(TF_GraphNumFunctions(host_graph_), 1);
EXPECT_EQ(TF_GraphGetFunctions(host_graph_, funcs, 0, s_), 0);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
EXPECT_EQ(TF_GraphGetFunctions(host_graph_, funcs, 1, s_), 1);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
AssertEqual(func0, funcs[0]);
TF_DeleteFunction(funcs[0]);
EXPECT_EQ(TF_GraphGetFunctions(host_graph_, funcs, 2, s_), 1);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
AssertEqual(func0, funcs[0]);
TF_DeleteFunction(funcs[0]);
TF_Function* func1;
DefineFunction("FooFunc1", &func1);
TF_GraphCopyFunction(host_graph_, func1, nullptr, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
EXPECT_EQ(TF_GraphNumFunctions(host_graph_), 2);
EXPECT_EQ(TF_GraphGetFunctions(host_graph_, funcs, 0, s_), 0);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
EXPECT_EQ(TF_GraphGetFunctions(host_graph_, funcs, 2, s_), 2);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
if (GetName(funcs[0]) == GetName(func0)) {
AssertEqual(func0, funcs[0]);
AssertEqual(func1, funcs[1]);
} else {
AssertEqual(func0, funcs[1]);
AssertEqual(func1, funcs[0]);
}
TF_DeleteFunction(funcs[0]);
TF_DeleteFunction(funcs[1]);
TF_DeleteFunction(func0);
TF_DeleteFunction(func1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/c_api_function.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/c_api_function_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d0fc22c9-cb54-45c7-b006-ab38399e6d11 | cpp | google/tsl | net | tsl/platform/windows/net.cc | tsl/platform/net_test.cc | #include "tsl/platform/net.h"
#include <sys/types.h>
#include <winsock2.h>
#include <cstdlib>
#include <unordered_set>
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/windows/error_windows.h"
#undef ERROR
namespace tsl {
namespace internal {
namespace {
bool IsPortAvailable(int* port, bool is_tcp) {
const int protocol = is_tcp ? IPPROTO_TCP : 0;
SOCKET sock = socket(AF_INET, is_tcp ? SOCK_STREAM : SOCK_DGRAM, protocol);
struct sockaddr_in addr;
int addr_len = static_cast<int>(sizeof(addr));
int actual_port;
CHECK_GE(*port, 0);
CHECK_LE(*port, 65535);
if (sock == INVALID_SOCKET) {
LOG(ERROR) << "socket() failed: "
<< tsl::internal::WindowsWSAGetLastErrorMessage();
return false;
}
const int one = 1;
int result = setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
reinterpret_cast<const char*>(&one), sizeof(one));
if (result == SOCKET_ERROR) {
LOG(ERROR) << "setsockopt() failed: "
<< tsl::internal::WindowsWSAGetLastErrorMessage();
closesocket(sock);
return false;
}
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = INADDR_ANY;
addr.sin_port = htons((uint16_t)*port);
result = bind(sock, (struct sockaddr*)&addr, sizeof(addr));
if (result == SOCKET_ERROR) {
LOG(WARNING) << "bind(port=" << *port << ") failed: "
<< tsl::internal::WindowsWSAGetLastErrorMessage();
closesocket(sock);
return false;
}
result = getsockname(sock, (struct sockaddr*)&addr, &addr_len);
if (result == SOCKET_ERROR) {
LOG(WARNING) << "getsockname() failed: "
<< tsl::internal::WindowsWSAGetLastErrorMessage();
closesocket(sock);
return false;
}
CHECK_LE(addr_len, sizeof(addr));
actual_port = ntohs(addr.sin_port);
CHECK_GT(actual_port, 0);
if (*port == 0) {
*port = actual_port;
} else {
CHECK_EQ(*port, actual_port);
}
closesocket(sock);
return true;
}
const int kNumRandomPortsToPick = 100;
const int kMaximumTrials = 1000;
}
int PickUnusedPortOrDie() {
WSADATA wsaData;
if (WSAStartup(MAKEWORD(2, 2), &wsaData) != NO_ERROR) {
LOG(ERROR) << "Error at WSAStartup()";
return false;
}
static std::unordered_set<int> chosen_ports;
bool is_tcp = true;
int trial = 0;
while (true) {
int port;
trial++;
CHECK_LE(trial, kMaximumTrials)
<< "Failed to pick an unused port for testing.";
if (trial == 1) {
port = GetCurrentProcessId() % (65536 - 30000) + 30000;
} else if (trial <= kNumRandomPortsToPick) {
port = rand() % (65536 - 30000) + 30000;
} else {
port = 0;
}
if (chosen_ports.find(port) != chosen_ports.end()) {
continue;
}
if (!IsPortAvailable(&port, is_tcp)) {
continue;
}
CHECK_GT(port, 0);
if (!IsPortAvailable(&port, !is_tcp)) {
is_tcp = !is_tcp;
continue;
}
chosen_ports.insert(port);
WSACleanup();
return port;
}
return 0;
}
}
} | #include "tsl/platform/net.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace internal {
TEST(Net, PickUnusedPortOrDie) {
int port0 = PickUnusedPortOrDie();
int port1 = PickUnusedPortOrDie();
CHECK_GE(port0, 0);
CHECK_LT(port0, 65536);
CHECK_GE(port1, 0);
CHECK_LT(port1, 65536);
CHECK_NE(port0, port1);
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/windows/net.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/net_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
aa6cc3de-ce31-4a6b-9ff6-3389fd34736b | cpp | tensorflow/tensorflow | hlo_memory_scheduler | third_party/xla/xla/service/hlo_memory_scheduler.cc | third_party/xla/xla/service/hlo_memory_scheduler_test.cc | #include "xla/service/hlo_memory_scheduler.h"
#include <algorithm>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <map>
#include <memory>
#include <queue>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/logical_buffer.h"
#include "xla/service/tuple_points_to_analysis.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/scoped_annotation.h"
namespace xla {
namespace {
using ::tsl::strings::HumanReadableNumBytes;
class ListScheduler {
public:
static absl::StatusOr<HloInstructionSequence> Run(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const BufferValue::SizeFunction& size_function) {
ListScheduler scheduler(computation, points_to_analysis, size_function);
return scheduler.CreateSchedule();
}
static bool IgnoreInstruction(const HloInstruction& instruction) {
return instruction.opcode() == HloOpcode::kParameter ||
instruction.opcode() == HloOpcode::kConstant;
}
private:
using Priority = std::pair<int64_t, int64_t>;
ListScheduler(HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const BufferValue::SizeFunction& size_function)
: computation_(computation),
points_to_analysis_(points_to_analysis),
size_function_(size_function) {
for (auto* instruction : computation->instructions()) {
absl::flat_hash_set<const LogicalBuffer*> instr_uses;
for (auto* operand : instruction->operands()) {
points_to_analysis.GetPointsToSet(operand).ForEachElement(
[&](const ShapeIndex& ,
const PointsToSet::BufferList& buffers) {
instr_uses.insert(buffers.begin(), buffers.end());
});
}
buffer_uses_[instruction] = std::vector<const LogicalBuffer*>(
instr_uses.begin(), instr_uses.end());
}
unscheduled_use_count_.reserve(points_to_analysis.num_logical_buffers());
for (auto* instruction : computation->instructions()) {
for (auto* buffer :
points_to_analysis.GetBuffersDefinedByInstruction(instruction)) {
unscheduled_use_count_[buffer] = 0;
}
}
for (auto* instruction : computation->instructions()) {
for (const LogicalBuffer* buffer : buffer_uses_.at(instruction)) {
++unscheduled_use_count_[buffer];
}
}
for (const LogicalBuffer* live_out_buffer :
points_to_analysis.GetPointsToSet(computation->root_instruction())
.CreateFlattenedSet()) {
++unscheduled_use_count_[live_out_buffer];
}
}
static bool IgnoreBuffer(const LogicalBuffer& buffer) {
return IgnoreInstruction(*buffer.instruction());
}
struct ReadyListEntry {
HloInstruction* instruction;
int64_t bytes_defined;
std::vector<const std::pair<const LogicalBuffer* const, int64_t>*>
used_buffer_unscheduled_use_counts;
};
ReadyListEntry MakeReadyListEntry(HloInstruction* instruction) {
ReadyListEntry entry;
entry.instruction = instruction;
entry.bytes_defined = 0;
for (auto* buffer :
points_to_analysis_.GetBuffersDefinedByInstruction(instruction)) {
if (!IgnoreBuffer(*buffer)) {
entry.bytes_defined += size_function_(*buffer);
}
}
for (auto* buffer : buffer_uses_.at(instruction)) {
if (IgnoreBuffer(*buffer)) {
continue;
}
auto unscheduled_use_count_it = unscheduled_use_count_.find(buffer);
CHECK(unscheduled_use_count_it != unscheduled_use_count_.end());
entry.used_buffer_unscheduled_use_counts.push_back(
&*unscheduled_use_count_it);
}
return entry;
}
int64_t BytesFreedIfScheduled(const ReadyListEntry& entry) {
auto instruction = entry.instruction;
auto opcode = instruction->opcode();
if (opcode == HloOpcode::kOutfeed &&
!instruction->outfeed_config().empty()) {
return INT_MAX;
}
if (opcode == HloOpcode::kInfeed && !instruction->infeed_config().empty()) {
return INT_MIN;
}
int64_t freed_bytes = 0;
for (const auto& kv : entry.used_buffer_unscheduled_use_counts) {
auto buffer = kv->first;
auto use_count = kv->second;
if (use_count == 1) {
freed_bytes += size_function_(*buffer);
}
}
return freed_bytes - entry.bytes_defined;
}
Priority GetPriority(const ReadyListEntry& entry) {
if (ShapeUtil::IsEffectiveScalar(entry.instruction->shape())) {
return {std::numeric_limits<int64_t>::max(),
std::numeric_limits<int64_t>::max()};
}
return {BytesFreedIfScheduled(entry), entry.instruction->user_count()};
}
HloInstructionSequence CreateSchedule() {
HloInstructionSequence schedule;
absl::flat_hash_map<const HloInstruction*, int64_t> unscheduled_pred_count;
for (auto* instruction : computation_->instructions()) {
for (HloInstruction* user : instruction->users()) {
unscheduled_pred_count[user]++;
}
for (HloInstruction* succ : instruction->control_successors()) {
unscheduled_pred_count[succ]++;
}
}
std::multimap<Priority, ReadyListEntry> ready_queue;
absl::flat_hash_map<const HloInstruction*,
std::multimap<Priority, ReadyListEntry>::iterator>
ready_instructions;
auto add_to_ready_queue = [&](HloInstruction* inst) {
auto entry = MakeReadyListEntry(inst);
auto it = ready_queue.emplace(GetPriority(entry), std::move(entry));
ready_instructions[inst] = it;
};
for (auto* instruction : computation_->instructions()) {
if (instruction->operands().empty() &&
instruction->control_predecessors().empty()) {
add_to_ready_queue(instruction);
}
}
while (!ready_queue.empty()) {
auto best_it = ready_queue.end();
--best_it;
HloInstruction* best = best_it->second.instruction;
VLOG(2) << "Schedule instruction: " << best->ToShortString()
<< " Bytes freed: " << best_it->first.first;
ready_queue.erase(best_it);
ready_instructions.erase(best);
schedule.push_back(best);
scheduled_instructions_.insert(best);
bool adjust_ready_queue = false;
for (const LogicalBuffer* buffer : buffer_uses_.at(best)) {
int64_t& count = unscheduled_use_count_[buffer];
CHECK_GT(count, 0);
--count;
if (count == 1) {
adjust_ready_queue = true;
}
}
auto update_pred_count = [&](HloInstruction* inst) {
int64_t pred_count = --unscheduled_pred_count.at(inst);
CHECK_GE(pred_count, 0);
if (pred_count == 0) {
add_to_ready_queue(inst);
}
};
for (HloInstruction* user : best->users()) {
update_pred_count(user);
}
for (HloInstruction* succ : best->control_successors()) {
update_pred_count(succ);
}
if (adjust_ready_queue) {
for (HloInstruction* operand : best->operands()) {
for (HloInstruction* operand_user : operand->users()) {
auto ready_instructions_it = ready_instructions.find(operand_user);
if (ready_instructions_it == ready_instructions.end()) {
continue;
}
auto ready_queue_it = ready_instructions_it->second;
auto& entry = ready_queue_it->second;
Priority new_priority = GetPriority(entry);
if (new_priority == ready_queue_it->first) {
continue;
}
ready_instructions_it->second =
ready_queue.emplace(new_priority, std::move(entry));
ready_queue.erase(ready_queue_it);
}
}
}
}
CHECK_EQ(schedule.size(), computation_->instruction_count());
CHECK_EQ(scheduled_instructions_.size(), computation_->instruction_count());
return schedule;
}
HloComputation* computation_;
const TuplePointsToAnalysis& points_to_analysis_;
const BufferValue::SizeFunction& size_function_;
absl::flat_hash_map<const HloInstruction*, std::vector<const LogicalBuffer*>>
buffer_uses_;
absl::flat_hash_map<const LogicalBuffer*, int64_t> unscheduled_use_count_;
absl::flat_hash_set<const HloInstruction*> scheduled_instructions_;
};
int64_t SumLogicalBufferSizes(
const TuplePointsToAnalysis::BufferDefinitionVector& buffers,
const BufferValue::SizeFunction& size_function) {
int64_t size = 0;
for (const LogicalBuffer* buffer : buffers) {
size += size_function(*buffer);
}
return size;
}
absl::StatusOr<HloInstructionSequence> ScheduleComputationHelper(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_function,
const MemorySchedulerAlgorithm& algorithm,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory) {
VLOG(2) << "Computation: " << computation->name();
if (algorithm) {
return algorithm(computation, points_to_analysis, alias_analysis,
size_function, postprocessor, peak_memory);
}
return DefaultMemoryScheduler(computation, points_to_analysis, alias_analysis,
size_function, postprocessor, peak_memory);
}
}
absl::StatusOr<HloInstructionSequence> DFSMemoryScheduler(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_function,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory) {
int64_t cumulative_total_size = 0;
int64_t total_hlos = computation->instruction_count();
struct Stats {
int64_t extra_users = 0;
int64_t total_sizes = 0;
};
absl::flat_hash_map<const HloInstruction*, Stats> stats_map;
stats_map.reserve(computation->instruction_count());
for (const HloInstruction* hlo : computation->MakeInstructionPostOrder()) {
auto& stats = stats_map[hlo];
if (ListScheduler::IgnoreInstruction(*hlo)) {
continue;
}
stats.extra_users = hlo->users().empty() ? 0 : hlo->users().size() - 1;
int64_t logical_buffer_size = SumLogicalBufferSizes(
points_to_analysis.GetBuffersDefinedByInstruction(hlo), size_function);
stats.total_sizes = logical_buffer_size;
cumulative_total_size += logical_buffer_size;
absl::flat_hash_set<const HloInstruction*> unique_operands(
hlo->operands().begin(), hlo->operands().end());
for (const HloInstruction* operand : unique_operands) {
auto& operand_stats = stats_map.at(operand);
stats.extra_users += operand_stats.extra_users;
stats.total_sizes += operand_stats.total_sizes;
}
stats.total_sizes = std::min(stats.total_sizes, cumulative_total_size);
stats.extra_users = std::min(stats.extra_users, total_hlos);
}
CHECK_EQ(stats_map.size(), computation->instruction_count());
HloInstructionSequence sequence;
FunctionVisitor visitor([&sequence](HloInstruction* hlo) {
sequence.push_back(hlo);
return absl::OkStatus();
});
visitor.ReserveVisitStates(computation->instruction_count());
TF_RETURN_IF_ERROR(computation->AcceptWithOperandOrder(
&visitor, [&stats_map](const HloInstruction* a, const HloInstruction* b) {
auto& stats_a = stats_map.at(a);
auto& stats_b = stats_map.at(b);
if (stats_a.extra_users != stats_b.extra_users) {
return stats_a.extra_users > stats_b.extra_users;
}
if (stats_a.total_sizes != stats_b.total_sizes) {
return stats_a.total_sizes > stats_b.total_sizes;
}
return a->name() < b->name();
}));
if (postprocessor) {
sequence = postprocessor(sequence);
}
CHECK_EQ(sequence.size(), computation->instruction_count());
if (peak_memory) {
TF_ASSIGN_OR_RETURN(
*peak_memory,
HeapSimulator::MinimumMemoryForComputation(
*computation, sequence, alias_analysis, size_function));
}
return sequence;
}
absl::StatusOr<HloInstructionSequence> BFSMemoryScheduler(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_function,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory) {
absl::flat_hash_map<const HloInstruction*, int64_t> inst_index;
std::vector<int64_t> inst_deps(computation->instruction_count(), 0);
std::queue<HloInstruction*> ready_queue;
auto update_queue = [&](HloInstruction* inst) {
int64_t index = inst_index.at(inst);
CHECK_GE(--inst_deps[index], 0);
if (inst_deps[index] == 0) {
ready_queue.push(inst);
}
};
for (HloInstruction* inst : computation->instructions()) {
size_t index = inst_index.size();
inst_index[inst] = index;
inst_deps[index] =
inst->unique_operands().size() + inst->control_predecessors().size();
if (inst_deps[index] == 0) {
ready_queue.push(inst);
}
}
HloInstructionSequence sequence;
while (!ready_queue.empty()) {
HloInstruction* inst = ready_queue.front();
ready_queue.pop();
for (HloInstruction* user : inst->users()) update_queue(user);
for (HloInstruction* succ : inst->control_successors()) update_queue(succ);
sequence.push_back(inst);
}
CHECK_EQ(sequence.size(), computation->instruction_count());
if (peak_memory) {
TF_ASSIGN_OR_RETURN(
*peak_memory,
HeapSimulator::MinimumMemoryForComputation(
*computation, sequence, alias_analysis, size_function));
}
return sequence;
}
ModuleSchedulerAlgorithm ComputationSchedulerToModuleScheduler(
const MemorySchedulerAlgorithm& computation_scheduler,
const MemorySchedulerPostprocessor& postprocessor) {
return [computation_scheduler, postprocessor](
const HloModule* module,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const LogicalBuffer::SizeFunction& size_func,
const absl::flat_hash_set<absl::string_view>& execution_threads,
int64_t* peak_memory) -> absl::StatusOr<HloSchedule> {
HloSchedule schedule(module);
for (auto* computation :
module->MakeComputationPostOrder(execution_threads)) {
if (!computation->IsFusionComputation()) {
TF_ASSIGN_OR_RETURN(HloInstructionSequence computation_sequence,
ScheduleComputationHelper(
computation, points_to_analysis, alias_analysis,
size_func, computation_scheduler, postprocessor,
nullptr));
schedule.set_sequence(computation, std::move(computation_sequence));
}
}
if (peak_memory) {
TF_ASSIGN_OR_RETURN(*peak_memory, HeapSimulator::MinimumMemoryForModule(
schedule, size_func));
}
return std::move(schedule);
};
}
absl::StatusOr<HloInstructionSequence> ListMemoryScheduler(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_function,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory) {
TF_ASSIGN_OR_RETURN(
HloInstructionSequence sequence,
ListScheduler::Run(computation, points_to_analysis, size_function));
if (postprocessor) {
sequence = postprocessor(sequence);
}
if (peak_memory) {
TF_ASSIGN_OR_RETURN(
*peak_memory,
HeapSimulator::MinimumMemoryForComputation(
*computation, sequence, alias_analysis, size_function));
}
return sequence;
}
absl::StatusOr<HloInstructionSequence> PostOrderMemoryScheduler(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_function,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory) {
HloInstructionSequence sequence(computation->MakeInstructionPostOrder());
if (postprocessor) {
sequence = postprocessor(sequence);
}
if (peak_memory) {
TF_ASSIGN_OR_RETURN(
*peak_memory,
HeapSimulator::MinimumMemoryForComputation(
*computation, sequence, alias_analysis, size_function));
}
return sequence;
}
absl::StatusOr<HloInstructionSequence> DefaultMemoryScheduler(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_function,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory) {
int64_t list_memory;
TF_ASSIGN_OR_RETURN(
HloInstructionSequence list_sequence,
ListMemoryScheduler(computation, points_to_analysis, alias_analysis,
size_function, postprocessor, &list_memory));
VLOG(2) << "Min-memory list sequence: " << HumanReadableNumBytes(list_memory);
int64_t dfs_memory;
TF_ASSIGN_OR_RETURN(
HloInstructionSequence dfs_sequence,
DFSMemoryScheduler(computation, points_to_analysis, alias_analysis,
size_function, postprocessor, &dfs_memory));
VLOG(2) << "Min-memory dfs sequence: " << HumanReadableNumBytes(dfs_memory);
int64_t post_order_memory;
TF_ASSIGN_OR_RETURN(HloInstructionSequence post_order_sequence,
PostOrderMemoryScheduler(
computation, points_to_analysis, alias_analysis,
size_function, postprocessor, &post_order_memory));
VLOG(2) << "Min-memory post order sequence: "
<< HumanReadableNumBytes(post_order_memory);
auto min_memory = std::min({dfs_memory, post_order_memory, list_memory});
if (peak_memory) {
*peak_memory = min_memory;
}
if (min_memory == list_memory) {
VLOG(2) << "Chose min-memory list sequence: "
<< HumanReadableNumBytes(list_memory);
return list_sequence;
} else if (min_memory == dfs_memory) {
VLOG(2) << "Chose min-memory dfs sequence: "
<< HumanReadableNumBytes(dfs_memory);
return dfs_sequence;
} else {
VLOG(2) << "Chose min-memory post_order sequence: "
<< HumanReadableNumBytes(post_order_memory);
return post_order_sequence;
}
}
absl::StatusOr<HloSchedule> DefaultModuleScheduler(
const HloModule* module, const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_function,
const absl::flat_hash_set<absl::string_view>& execution_threads,
int64_t* peak_memory) {
int64_t list_memory;
TF_ASSIGN_OR_RETURN(
HloSchedule list_sequence,
ComputationSchedulerToModuleScheduler(ListMemoryScheduler, {})(
module, points_to_analysis, alias_analysis, size_function,
execution_threads, &list_memory));
VLOG(2) << "Min-memory list sequence: " << HumanReadableNumBytes(list_memory);
int64_t dfs_memory;
TF_ASSIGN_OR_RETURN(
HloSchedule dfs_sequence,
ComputationSchedulerToModuleScheduler(DFSMemoryScheduler, {})(
module, points_to_analysis, alias_analysis, size_function,
execution_threads, &dfs_memory));
VLOG(2) << "Min-memory dfs sequence: " << HumanReadableNumBytes(dfs_memory);
int64_t post_order_memory;
TF_ASSIGN_OR_RETURN(
HloSchedule post_order_sequence,
ComputationSchedulerToModuleScheduler(PostOrderMemoryScheduler, {})(
module, points_to_analysis, alias_analysis, size_function,
execution_threads, &post_order_memory));
VLOG(2) << "Min-memory post order sequence: "
<< HumanReadableNumBytes(post_order_memory);
auto min_memory = std::min({dfs_memory, post_order_memory, list_memory});
if (peak_memory) {
*peak_memory = min_memory;
}
if (min_memory == list_memory) {
VLOG(2) << "Chose min-memory list sequence: "
<< HumanReadableNumBytes(list_memory);
return list_sequence;
} else if (min_memory == dfs_memory) {
VLOG(2) << "Chose min-memory dfs sequence: "
<< HumanReadableNumBytes(dfs_memory);
return dfs_sequence;
} else {
VLOG(2) << "Chose min-memory post_order sequence: "
<< HumanReadableNumBytes(post_order_memory);
return post_order_sequence;
}
}
absl::StatusOr<HloSchedule> ScheduleModule(
const HloModule* module, const BufferValue::SizeFunction& size_function,
const ModuleSchedulerAlgorithm& algorithm,
const absl::flat_hash_set<absl::string_view>& execution_threads,
int64_t* peak_memory) {
tsl::profiler::ScopedAnnotation annotation([&] {
return absl::StrFormat("XlaMemoryScheduler:#module=%s,program_id=%d#",
module->name(), module->unique_id());
});
TF_ASSIGN_OR_RETURN(std::unique_ptr<TuplePointsToAnalysis> points_to_analysis,
TuplePointsToAnalysis::Run(module));
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module));
TF_ASSIGN_OR_RETURN(HloSchedule schedule,
(algorithm ? algorithm : DefaultModuleScheduler)(
module, *points_to_analysis, *alias_analysis,
size_function, execution_threads, peak_memory));
TF_RETURN_IF_ERROR(schedule.Verify());
return std::move(schedule);
}
HloMemoryScheduler::HloMemoryScheduler(
const BufferValue::SizeFunction& size_function,
const ModuleSchedulerAlgorithm& algorithm)
: size_function_(size_function), algorithm_(algorithm) {}
absl::StatusOr<bool> HloMemoryScheduler::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(
HloSchedule schedule,
ScheduleModule(module, size_function_, algorithm_, execution_threads));
TF_RETURN_IF_ERROR(module->set_schedule(std::move(schedule)));
return true;
}
absl::StatusOr<bool> HloTrivialScheduler::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
HloSchedule schedule(module);
for (HloComputation* computation :
module->MakeComputationPostOrder(execution_threads)) {
if (!computation->IsFusionComputation()) {
HloInstructionSequence& computation_sequence =
schedule.GetOrCreateSequence(computation);
FunctionVisitor visitor(
[&computation_sequence](HloInstruction* instruction) {
computation_sequence.push_back(instruction);
return absl::OkStatus();
});
visitor.ReserveVisitStates(computation->instruction_count());
TF_RETURN_IF_ERROR(computation->Accept(&visitor));
}
}
TF_RETURN_IF_ERROR(module->set_schedule(std::move(schedule)));
return true;
}
absl::StatusOr<bool> HloDescheduler::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = module->has_schedule();
module->clear_schedule();
return changed;
}
} | #include "xla/service/hlo_memory_scheduler.h"
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <string>
#include <string_view>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/literal_util.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_value.h"
#include "xla/service/logical_buffer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class HloSchedulingTest : public HloTestBase {};
int64_t PeakMemoryUseOfEntryComputation(
HloModule* module, LogicalBuffer::SizeFunction size_function) {
CHECK(module->has_entry_computation());
CHECK(module->has_schedule());
std::unique_ptr<HloAliasAnalysis> alias_analysis =
HloAliasAnalysis::Run(module).value();
const HloSchedule& schedule = module->schedule();
HloComputation* computation = module->entry_computation();
const HloInstructionSequence& sequence = schedule.sequence(computation);
return HeapSimulator::Run(
std::make_unique<NoFragmentationStatsHeap<HloValue>>(),
*computation, sequence, *alias_analysis, size_function)
.value()
.heap_size;
}
TEST_F(HloSchedulingTest, LastUseScheduledFirst) {
const Shape vec = ShapeUtil::MakeShape(xla::F32, {42});
auto builder = HloComputation::Builder(TestName());
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, vec, "param"));
auto ab = builder.AddInstruction(
HloInstruction::CreateUnary(vec, HloOpcode::kAbs, param));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(vec, HloOpcode::kExp, param));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(vec, HloOpcode::kAdd, ab, exp));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(vec, HloOpcode::kNegate, exp));
auto sub = builder.AddInstruction(
HloInstruction::CreateBinary(vec, HloOpcode::kSubtract, add, negate));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
HloMemoryScheduler scheduler([](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
});
ASSERT_FALSE(module->has_schedule());
TF_ASSERT_OK_AND_ASSIGN(bool changed, scheduler.Run(module.get()));
EXPECT_TRUE(changed);
ASSERT_TRUE(module->has_schedule());
TF_ASSERT_OK(module->schedule().Verify());
const std::vector<HloInstruction*>& sequence =
module->schedule().sequence(module->entry_computation()).instructions();
EXPECT_EQ(module->entry_computation()->instruction_count(), sequence.size());
EXPECT_EQ(param, sequence.front());
EXPECT_EQ(sub, sequence.back());
SequentialHloOrdering ordering(module->schedule());
EXPECT_TRUE(ordering.ExecutesBefore(add, negate));
HloDescheduler descheduler;
EXPECT_TRUE(module->has_schedule());
TF_ASSERT_OK_AND_ASSIGN(bool descheduler_changed,
descheduler.Run(module.get()));
EXPECT_TRUE(descheduler_changed);
EXPECT_FALSE(module->has_schedule());
}
TEST_F(HloSchedulingTest, ListSchedulerHandlesAliasing) {
const char* module_str = R"(
HloModule test_aliasing_module
ENTRY root {
param = s32[1000] parameter(0)
p0 = s32[1000] copy(param)
p1 = s32[1000] copy(param)
t = (s32[1000], s32[1000]) tuple(p0, p1)
a = s32[1000] get-tuple-element(t), index=0
b = s32[1000] get-tuple-element(t), index=1
c = s32[1000] add(a, b)
d = s32[1000] add(c, b)
e = s32[1000] add(c, c)
f = s32[1000] add(e, e)
ROOT result = (s32[1000], s32[1000], s32[1000]) tuple(d, e, f)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 8);
};
int64_t peak_memory;
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), size_fn,
ComputationSchedulerToModuleScheduler(ListMemoryScheduler),
{}, &peak_memory));
TF_ASSERT_OK(module->set_schedule(schedule));
const std::vector<HloInstruction*>& sequence =
schedule.sequence(module->entry_computation()).instructions();
EXPECT_EQ(module->entry_computation()->instruction_count(), sequence.size());
absl::flat_hash_map<std::string, const HloInstruction*> instructions_by_name;
for (const HloInstruction* instruction : sequence) {
instructions_by_name[instruction->name()] = instruction;
}
EXPECT_EQ(instructions_by_name.at("param"), sequence.front());
EXPECT_EQ(instructions_by_name.at("result"), sequence.back());
SequentialHloOrdering ordering(schedule);
EXPECT_TRUE(ordering.ExecutesBefore(instructions_by_name.at("d"),
instructions_by_name.at("e")));
EXPECT_EQ(PeakMemoryUseOfEntryComputation(module.get(), size_fn),
peak_memory);
}
TEST_F(HloSchedulingTest, HostSendDoneSchedule) {
const char* const module_str = R"(
HloModule module
ENTRY entry {
%p = f32[1000, 1000] parameter(0)
%token.0 = token[] after-all()
%send = (f32[1000, 1000], token[]) send(%p, %token.0),
channel_id=1, is_host_transfer=true
%n1 = f32[1000, 1000] negate(%p)
%n2 = f32[1000, 1000] negate(%n1)
%n3 = f32[1000, 1000] negate(%n2)
%send-done = token[] send-done(%send), channel_id=1, is_host_transfer=true
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 8);
};
TF_ASSERT_OK_AND_ASSIGN(HloSchedule schedule,
ScheduleModule(module.get(), size_fn,
ComputationSchedulerToModuleScheduler(
ListMemoryScheduler)));
const std::vector<HloInstruction*>& sequence =
schedule.sequence(module->entry_computation()).instructions();
EXPECT_EQ(module->entry_computation()->instruction_count(), sequence.size());
absl::flat_hash_map<std::string, const HloInstruction*> instructions_by_name;
for (const HloInstruction* instruction : sequence) {
instructions_by_name[instruction->name()] = instruction;
}
EXPECT_LT(absl::c_find(sequence, instructions_by_name.at("send-done")),
absl::c_find(sequence, instructions_by_name.at("n1")));
}
TEST_F(HloSchedulingTest, TuplesAreAccountedCorrectly) {
auto builder = HloComputation::Builder(TestName());
const Shape r1f32 = ShapeUtil::MakeShape(xla::F32, {6});
auto lit = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 1, 1, 1, 1, 1})));
auto abs_const = builder.AddInstruction(
HloInstruction::CreateUnary(r1f32, HloOpcode::kAbs, lit));
auto abs_abs1 = builder.AddInstruction(
HloInstruction::CreateUnary(r1f32, HloOpcode::kAbs, abs_const));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple(
absl::Span<HloInstruction* const>({abs_abs1})));
auto tuple_elm = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(r1f32, tuple, 0));
auto abs_abs2 = builder.AddInstruction(
HloInstruction::CreateUnary(r1f32, HloOpcode::kAbs, abs_const));
builder.AddInstruction(HloInstruction::CreateBinary(r1f32, HloOpcode::kAdd,
tuple_elm, abs_abs2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(
module.get(),
[](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 1);
},
ComputationSchedulerToModuleScheduler(ListMemoryScheduler)));
EXPECT_EQ(module->entry_computation()->instruction_count(),
schedule.sequence(module->entry_computation()).size());
SequentialHloOrdering ordering(schedule);
EXPECT_TRUE(ordering.ExecutesBefore(abs_abs2, tuple));
}
TEST_F(HloSchedulingTest, MultiOutputFusionAccountedCorrectly) {
const Shape r1f32 = ShapeUtil::MakeShape(xla::F32, {5});
HloComputation::Builder builder(TestName());
auto c1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 1, 1, 1, 1})));
auto c2 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 2, 3, 4, 5})));
auto c3 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({0, 2, 4, 6, 8})));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r1f32, HloOpcode::kAdd, c1, c2));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(r1f32, HloOpcode::kMultiply, add, c3));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({add, mul}));
auto tuple_elm = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(r1f32, tuple, 0));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r1f32, HloOpcode::kExp, c3));
builder.AddInstruction(
HloInstruction::CreateBinary(r1f32, HloOpcode::kAdd, tuple_elm, exp));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto fusion = computation->CreateFusionInstruction(
{tuple, mul, add}, HloInstruction::FusionKind::kLoop);
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(
module.get(),
[](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 2);
},
ComputationSchedulerToModuleScheduler(ListMemoryScheduler)));
EXPECT_EQ(module->entry_computation()->instruction_count(),
schedule.sequence(module->entry_computation()).size());
SequentialHloOrdering ordering(schedule);
EXPECT_TRUE(ordering.ExecutesBefore(exp, fusion));
}
TEST_F(HloSchedulingTest, TrivialScheduler) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
param.b = (s32[], s32[]) parameter(0)
gte.0 = s32[] get-tuple-element(param.b), index=0
gte.1 = s32[] get-tuple-element(param.b), index=1
add = s32[] add(gte.0, gte.1)
ROOT tuple = (s32[], s32[]) tuple(gte.0, add)
}
cond {
param.c = (s32[], s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY main {
init = (s32[], s32[]) parameter(0)
ROOT while = (s32[], s32[]) while(init), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_FALSE(module->has_schedule());
TF_ASSERT_OK(HloTrivialScheduler().Run(module.get()).status());
ASSERT_TRUE(module->has_schedule());
TF_ASSERT_OK(module->schedule().Verify());
std::unique_ptr<HloModule> clone = module->Clone();
ASSERT_TRUE(clone->has_schedule());
TF_ASSERT_OK(clone->schedule().Verify());
}
TEST_F(HloSchedulingTest, BFSScheduler) {
const char* const hlo_string = R"(
HloModule m
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
ENTRY e {
p0 = f32[1,2,1,512,256] parameter(0)
c0 = f32[] constant(0)
c1 = f32[] constant(1)
bcast1 = f32[1,2,1,512,256] broadcast(c1), dimensions={}
add1 = f32[1,2,1,512,256] add(p0, bcast1)
c2 = f32[] constant(2)
bcast2 = f32[1,2,1,512,256] broadcast(c2), dimensions={}
add2 = f32[1,2,1,512,256] add(p0, bcast2)
c3 = f32[] constant(3)
bcast3 = f32[1,2,1,512,256] broadcast(c3), dimensions={}
add3 = f32[1,2,1,512,256] add(p0, bcast3)
c4 = f32[] constant(4)
bcast4 = f32[1,2,1,512,256] broadcast(c4), dimensions={}
add4 = f32[1,2,1,512,256] add(p0, bcast4)
c5 = f32[] constant(5)
bcast5 = f32[1,2,1,512,256] broadcast(c5), dimensions={}
add5 = f32[1,2,1,512,256] add(p0, bcast5)
r1 = f32[1,2] reduce(add1, c0), dimensions={2,3,4}, to_apply=add
r2 = f32[1,2] reduce(add2, c0), dimensions={2,3,4}, to_apply=add
r3 = f32[1,2] reduce(add3, c0), dimensions={2,3,4}, to_apply=add
r4 = f32[1,2] reduce(add4, c0), dimensions={2,3,4}, to_apply=add
r5 = f32[1,2] reduce(add5, c0), dimensions={2,3,4}, to_apply=add
out0 = f32[1,2] add(r1, r2)
out1 = f32[1,2] add(r3, r4)
out2 = f32[1,2] add(out0, out1)
ROOT out3 = f32[1,2] add(out2, r5)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(
module.get(),
[](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
},
ComputationSchedulerToModuleScheduler(BFSMemoryScheduler)));
const std::vector<HloInstruction*>& sequence =
schedule.sequence(module->entry_computation()).instructions();
absl::flat_hash_map<std::string, const HloInstruction*> instructions_by_name;
for (const HloInstruction* instruction : sequence) {
instructions_by_name[instruction->name()] = instruction;
}
auto index = [&](std::string_view name) -> size_t {
const HloInstruction* instruction = instructions_by_name.at(name);
return std::distance(sequence.begin(), absl::c_find(sequence, instruction));
};
std::vector<size_t> indices = {
index("bcast1"), index("bcast2"), index("bcast3"), index("bcast4"),
index("bcast5"), index("add1"), index("add2"), index("add3"),
index("add4"), index("add5"), index("r1"), index("r2"),
index("r3"), index("r4"), index("r5"), index("out0"),
index("out1"), index("out2"), index("out3")};
EXPECT_TRUE(absl::c_is_sorted(indices));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_memory_scheduler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_memory_scheduler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a9884785-43c1-4307-b823-e878e158c6f3 | cpp | tensorflow/tensorflow | se_gpu_pjrt_compiler | third_party/xla/xla/pjrt/gpu/se_gpu_pjrt_compiler.cc | third_party/xla/xla/pjrt/gpu/se_gpu_pjrt_compiler_test.cc | #include "xla/pjrt/gpu/se_gpu_pjrt_compiler.h"
#include <memory>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/client/xla_computation.h"
#include "xla/pjrt/gpu/se_gpu_pjrt_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/stream_executor/platform/initialize.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/statusor.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "xla/client/local_client.h"
#include "xla/pjrt/mlir_to_hlo.h"
#include "xla/pjrt/stream_executor_executable.h"
#include "xla/pjrt/utils.h"
#include "xla/service/dump.h"
#include "xla/service/gpu/executable.pb.h"
#include "xla/service/gpu/gpu_compiler.h"
#include "xla/service/hlo_module_util.h"
#include "xla/service/hlo_proto_util.h"
#include "xla/service/local_service.h"
#include "xla/service/local_service_utils.h"
#endif
#if GOOGLE_CUDA
#include "xla/service/gpu/nvptx_compiler.h"
#include "xla/stream_executor/cuda/cuda_platform_id.h"
#elif TENSORFLOW_USE_ROCM
#include "xla/service/gpu/amdgpu_compiler.h"
#include "xla/stream_executor/rocm/rocm_platform_id.h"
#endif
namespace xla {
namespace {
bool IsGpuClient(const PjRtClient& client) {
return client.platform_id() == CudaId() || client.platform_id() == RocmId() ||
client.platform_id() == SyclId();
}
bool IsSameTopology(const PjRtTopologyDescription& topology1,
const PjRtTopologyDescription& topology2) {
const StreamExecutorGpuTopologyDescription& gpu_topology1 =
tensorflow::down_cast<const StreamExecutorGpuTopologyDescription&>(
topology1);
const StreamExecutorGpuTopologyDescription& gpu_topology2 =
tensorflow::down_cast<const StreamExecutorGpuTopologyDescription&>(
topology2);
return gpu_topology1 == gpu_topology2;
}
absl::Status IsValidTopologyAndClientForCompile(
const PjRtTopologyDescription& topology, PjRtClient* client) {
if (client == nullptr) {
return absl::UnimplementedError(
"SE:GPU compiler requires non-null client.");
}
if (!IsGpuClient(*client)) {
return absl::InvalidArgumentError(
"SE:GPU compiler requires a GPU PjRtClient.");
}
TF_ASSIGN_OR_RETURN(auto client_topology, client->GetTopologyDescription());
if (!IsSameTopology(topology, *client_topology)) {
return absl::UnimplementedError(
"SE:GPU compiler requires the topology same as the one in the client.");
}
return absl::OkStatus();
}
}
absl::StatusOr<std::unique_ptr<PjRtExecutable>>
StreamExecutorGpuCompiler::Compile(CompileOptions options,
const XlaComputation& computation,
const PjRtTopologyDescription& topology,
PjRtClient* client) {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if GOOGLE_CUDA
auto gpu_compiler = gpu::NVPTXCompiler();
#else
auto gpu_compiler = gpu::AMDGPUCompiler();
#endif
CompileOptions input_options = options;
if (!options.target_config) {
if (client != nullptr) {
TF_RETURN_IF_ERROR(IsValidTopologyAndClientForCompile(topology, client));
return client->Compile(computation, options);
}
auto attr = topology.Attributes();
if (auto it = attr.find("target_config"); it != attr.end()) {
auto target_config_str = std::get<std::string>(it->second);
stream_executor::GpuTargetConfigProto gpu_target_config_proto;
if (!gpu_target_config_proto.ParseFromString(target_config_str)) {
return FailedPrecondition("Failed to parse GpuTargetConfigProto");
}
options.target_config.emplace(
Compiler::TargetConfig(gpu_target_config_proto));
} else {
return absl::UnimplementedError(
"Compilation without client and without target_config specified is "
"not implemented");
}
}
TF_RETURN_IF_ERROR(options.ApplyAllOptionOverrides());
std::vector<const Shape*> argument_layout_pointers;
TF_RETURN_IF_ERROR(DetermineArgumentLayoutsFromCompileOptions(
computation,
[](Shape shape) { return LayoutUtil::GetWithDefaultLayout(shape); },
options.argument_layouts, &options.executable_build_options,
&argument_layout_pointers));
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModuleConfig> hlo_config,
GetHloModuleConfig(computation, argument_layout_pointers,
options.executable_build_options));
HloModuleProto hlo_module_proto = computation.proto();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> hlo_module,
HloModule::CreateFromProto(hlo_module_proto, *hlo_config));
UpdateEntryComputationLayout(
hlo_module.get(), std::bind(&Compiler::DefaultDeviceShapeRepresentation,
&gpu_compiler, std::placeholders::_1));
DumpHloModuleIfEnabled(*hlo_module, kBeforeOptimizationsDumpName);
Compiler::CompileOptions opts;
opts.target_config = options.target_config;
AotCompilationOptions aot_options(gpu_compiler.PlatformId());
aot_options.set_target_config(*options.target_config);
aot_options.set_run_backend_only(
options.executable_build_options.run_backend_only());
const int num_replicas = hlo_module->config().replica_count();
const int num_partitions = hlo_module->config().num_partitions();
const std::string name = hlo_module->name();
const std::string fingerprint = hlo_module->GetFingerprint128();
const int num_outputs = hlo_module->result_shape().IsTuple()
? hlo_module->result_shape().tuple_shapes_size()
: 1;
auto unique_module_group =
std::make_unique<HloModuleGroup>(std::move(hlo_module));
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<AotCompilationResult>> aot_results,
gpu_compiler.CompileAheadOfTime(std::move(unique_module_group),
aot_options));
std::vector<std::vector<absl::string_view>> output_memory_kinds(1);
output_memory_kinds[0].resize(num_outputs,
StreamExecutorGpuHbmMemorySpace::kKind);
return std::make_unique<StreamExecutorExecutable>(
std::move(input_options), std::move(aot_results), num_replicas,
num_partitions, name, fingerprint, std::move(output_memory_kinds));
#else
return absl::InternalError(
"GPU Compilation requires the target to be built with CUDA or "
"ROCm.");
#endif
}
absl::StatusOr<std::unique_ptr<PjRtExecutable>>
StreamExecutorGpuCompiler::Compile(CompileOptions options,
mlir::ModuleOp module,
const PjRtTopologyDescription& topology,
PjRtClient* client) {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
CompileOptions input_options = options;
XlaComputation xla_computation;
TF_RETURN_IF_ERROR(MlirToXlaComputation(
module, xla_computation,
options.parameter_is_tupled_arguments,
false,
false));
return Compile(std::move(input_options), xla_computation, topology, client);
#else
return absl::InternalError(
"GPU AOT compilation requires the target to be built with CUDA or "
"ROCm.");
#endif
}
#if TENSORFLOW_USE_ROCM
STREAM_EXECUTOR_REGISTER_MODULE_INITIALIZER(pjrt_register_se_gpu_compiler, {
PjRtRegisterCompiler(RocmName(),
std::make_unique<StreamExecutorGpuCompiler>());
});
#else
STREAM_EXECUTOR_REGISTER_MODULE_INITIALIZER(pjrt_register_se_gpu_compiler, {
PjRtRegisterCompiler(CudaName(),
std::make_unique<StreamExecutorGpuCompiler>());
});
#endif
} | #include "xla/pjrt/gpu/se_gpu_pjrt_compiler.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include "absl/status/status.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Parser/Parser.h"
#include "xla/client/xla_computation.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/pjrt/gpu/gpu_topology.h"
#include "xla/pjrt/gpu/se_gpu_pjrt_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/service/hlo_parser.h"
#include "xla/test.h"
#include "xla/tests/literal_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::tsl::testing::StatusIs;
constexpr absl::string_view kProgram = R"(HloModule Computation
ENTRY Computation() -> s32[] {
ROOT result = s32[] constant(2)
})";
constexpr absl::string_view mlir_str = R"mlir(
module {
func.func @main() -> tensor<i32> {
%0 = mhlo.constant dense<2> : tensor<i32>
return %0 : tensor<i32>
}
})mlir";
absl::StatusOr<xla::XlaComputation> GetXlaComputation(
absl::string_view program) {
TF_ASSIGN_OR_RETURN(auto hlo_module,
xla::ParseAndReturnUnverifiedModule(program, {}));
return XlaComputation(hlo_module->ToProto());
}
std::shared_ptr<xla::GpuTopology> GetGpuTopology(
std::vector<int> device_ids, absl::string_view platform_version,
int num_slices, int num_hosts_per_slice, int num_devices_per_host,
int core_count_per_chip) {
return std::make_shared<xla::GpuTopology>(device_ids, platform_version,
num_slices, num_hosts_per_slice,
num_devices_per_host);
}
TEST(StreamExecutorGpuCompilerTest, NoClientXla) {
StreamExecutorGpuCompiler compiler;
StreamExecutorGpuTopologyDescription topology(
CudaId(), CudaName(), GetGpuTopology({0, 1}, "Fake_device", 1, 1, 2, 10));
TF_ASSERT_OK_AND_ASSIGN(auto computation, GetXlaComputation(kProgram));
EXPECT_THAT(compiler.Compile(xla::CompileOptions(), computation, topology,
nullptr),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST(StreamExecutorGpuCompilerTest, TopologyNotSameXla) {
StreamExecutorGpuCompiler compiler;
StreamExecutorGpuTopologyDescription topology(
CudaId(), CudaName(), GetGpuTopology({0, 1}, "Fake_device", 1, 1, 2, 10));
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto computation, GetXlaComputation(kProgram));
EXPECT_THAT(compiler.Compile(xla::CompileOptions(), computation, topology,
client.get()),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST(StreamExecutorGpuCompilerTest, SuccessXla) {
StreamExecutorGpuCompiler compiler;
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto computation, GetXlaComputation(kProgram));
TF_ASSERT_OK_AND_ASSIGN(auto topology, client->GetTopologyDescription());
TF_ASSERT_OK_AND_ASSIGN(auto executable,
compiler.Compile(xla::CompileOptions(), computation,
*topology, client.get()));
const LoadOptions load_options;
TF_ASSERT_OK_AND_ASSIGN(auto loaded_executable,
client->Load(std::move(executable), load_options));
TF_ASSERT_OK_AND_ASSIGN(
auto result, loaded_executable->Execute({{}}, {}));
ASSERT_EQ(result.size(), 1);
std::vector<std::unique_ptr<xla::PjRtBuffer>>& result_buffers = result[0];
ASSERT_EQ(result_buffers.size(), 1);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::Literal> result_literal,
result_buffers[0]->ToLiteralSync());
EXPECT_TRUE(
LiteralTestUtil::Equal(LiteralUtil::CreateR0(2), *result_literal));
}
TEST(StreamExecutorGpuCompilerTest, NoClientMlir) {
StreamExecutorGpuCompiler compiler;
mlir::MLIRContext context;
context.loadDialect<mlir::mhlo::MhloDialect, mlir::func::FuncDialect>();
auto mlir_module =
mlir::parseSourceString<mlir::ModuleOp>(mlir_str, &context);
StreamExecutorGpuTopologyDescription topology(
CudaId(), CudaName(), GetGpuTopology({0, 1}, "Fake_device", 1, 1, 2, 10));
EXPECT_THAT(
compiler.Compile(xla::CompileOptions(), mlir_module.get(), topology,
nullptr),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST(StreamExecutorGpuCompilerTest, TopologyNotSameMlir) {
StreamExecutorGpuCompiler compiler;
mlir::MLIRContext context;
context.loadDialect<mlir::mhlo::MhloDialect, mlir::func::FuncDialect>();
auto mlir_module =
mlir::parseSourceString<mlir::ModuleOp>(mlir_str, &context);
StreamExecutorGpuTopologyDescription topology(
CudaId(), CudaName(), GetGpuTopology({0, 1}, "Fake_device", 1, 1, 2, 10));
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
EXPECT_THAT(compiler.Compile(xla::CompileOptions(), mlir_module.get(),
topology, client.get()),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST(StreamExecutorGpuCompilerTest, SuccessMlir) {
StreamExecutorGpuCompiler compiler;
mlir::MLIRContext context;
context.loadDialect<mlir::mhlo::MhloDialect, mlir::func::FuncDialect>();
auto mlir_module =
mlir::parseSourceString<mlir::ModuleOp>(mlir_str, &context);
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto topology, client->GetTopologyDescription());
TF_ASSERT_OK_AND_ASSIGN(
auto executable,
compiler.Compile(xla::CompileOptions(), mlir_module.get(), *topology,
client.get()));
const LoadOptions load_options;
TF_ASSERT_OK_AND_ASSIGN(auto loaded_executable,
client->Load(std::move(executable), load_options));
TF_ASSERT_OK_AND_ASSIGN(
auto result, loaded_executable->Execute({{}}, {}));
ASSERT_EQ(result.size(), 1);
std::vector<std::unique_ptr<xla::PjRtBuffer>>& result_buffers = result[0];
ASSERT_EQ(result_buffers.size(), 1);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::Literal> result_literal,
result_buffers[0]->ToLiteralSync());
EXPECT_TRUE(
LiteralTestUtil::Equal(LiteralUtil::CreateR0(2), *result_literal));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/gpu/se_gpu_pjrt_compiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/gpu/se_gpu_pjrt_compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5eb65c3c-b5da-4d15-bec0-f019bbe5d977 | cpp | google/tensorstore | index | python/tensorstore/index.cc | python/tensorstore/index_test.cc | #include <pybind11/pybind11.h>
#include "python/tensorstore/index.h"
#include <cstddef>
#include <string>
#include <variant>
#include <vector>
#include "python/tensorstore/sequence_parameter.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_vector_or_scalar.h"
#include "tensorstore/index_space/internal/numpy_indexing_spec.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_python {
IndexVectorOrScalarContainer ToIndexVectorOrScalarContainer(
const OptionallyImplicitIndexVectorOrScalarContainer& x,
Index implicit_value) {
if (auto* index = std::get_if<OptionallyImplicitIndex>(&x)) {
return index->value_or(implicit_value);
}
const auto& v = std::get<SequenceParameter<OptionallyImplicitIndex>>(x);
std::vector<Index> out_v;
out_v.reserve(v.size());
for (size_t i = 0; i < v.size(); ++i) {
out_v.push_back(v[i].value_or(implicit_value));
}
return out_v;
}
internal_index_space::IndexVectorOrScalarView ToIndexVectorOrScalar(
const IndexVectorOrScalarContainer& x) {
constexpr static Index temp = 0;
if (auto* index = std::get_if<Index>(&x)) {
return *index;
} else {
const auto& v = std::get<std::vector<Index>>(x);
if (v.empty()) {
return span(&temp, 0);
}
return span(v);
}
}
std::string IndexVectorRepr(const IndexVectorOrScalarContainer& x,
bool implicit, bool subscript) {
return internal::IndexVectorRepr(ToIndexVectorOrScalar(x), implicit,
subscript);
}
}
}
namespace pybind11 {
namespace detail {
handle type_caster<tensorstore::internal_python::PythonDimensionIndex>::cast(
tensorstore::internal_python::PythonDimensionIndex x,
return_value_policy , handle ) {
return int_(x.value).release();
}
bool type_caster<tensorstore::internal_python::PythonDimensionIndex>::load(
handle src, bool convert) {
value.value = PyNumber_AsSsize_t(src.ptr(), PyExc_IndexError);
if (value.value == -1 && PyErr_Occurred()) {
PyErr_Clear();
return false;
}
return true;
}
handle type_caster<tensorstore::internal_python::OptionallyImplicitIndex>::cast(
tensorstore::internal_python::OptionallyImplicitIndex x,
return_value_policy , handle ) {
if (x.value == tensorstore::kImplicit) return none().release();
return int_(x.value).release();
}
bool type_caster<tensorstore::internal_python::OptionallyImplicitIndex>::load(
handle src, bool convert) {
if (src.is_none()) {
value.value = tensorstore::kImplicit;
return true;
}
value.value = PyNumber_AsSsize_t(src.ptr(), PyExc_IndexError);
if (value.value == -1 && PyErr_Occurred()) {
PyErr_Clear();
return false;
}
return true;
}
}
} | #include "python/tensorstore/index.h"
#include <vector>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
namespace {
TEST(OptionallyImplicitIndexReprTest, Basic) {
using tensorstore::kImplicit;
using tensorstore::internal_python::OptionallyImplicitIndexRepr;
EXPECT_EQ("None", OptionallyImplicitIndexRepr(kImplicit));
EXPECT_EQ("3", OptionallyImplicitIndexRepr(3));
EXPECT_EQ("-3", OptionallyImplicitIndexRepr(-3));
}
TEST(IndexVectorReprTest, Basic) {
using tensorstore::Index;
using tensorstore::kImplicit;
using tensorstore::internal_python::IndexVectorRepr;
for (bool subscript : {false, true}) {
EXPECT_EQ("None", IndexVectorRepr(kImplicit, true, subscript));
for (bool implicit : {false, true}) {
EXPECT_EQ("1", IndexVectorRepr(1, implicit, subscript));
EXPECT_EQ("-1", IndexVectorRepr(-1, implicit, subscript));
}
}
for (bool implicit : {false, true}) {
EXPECT_EQ("[1,2,3]", IndexVectorRepr(std::vector<Index>{1, 2, 3}, implicit,
false));
EXPECT_EQ("1,2,3", IndexVectorRepr(std::vector<Index>{1, 2, 3}, implicit,
true));
EXPECT_EQ("[]", IndexVectorRepr(std::vector<Index>{}, implicit,
false));
EXPECT_EQ("()", IndexVectorRepr(std::vector<Index>{}, implicit,
true));
}
EXPECT_EQ("[1,2,None]", IndexVectorRepr(std::vector<Index>{1, 2, kImplicit},
true,
false));
EXPECT_EQ("1,2,None", IndexVectorRepr(std::vector<Index>{1, 2, kImplicit},
true,
true));
}
TEST(ToIndexVectorOrScalarContainerTest, Basic) {
using tensorstore::Index;
using tensorstore::kImplicit;
using tensorstore::internal_python::IndexVectorOrScalarContainer;
using tensorstore::internal_python::OptionallyImplicitIndex;
using tensorstore::internal_python::ToIndexVectorOrScalarContainer;
EXPECT_EQ(
IndexVectorOrScalarContainer{Index{3}},
ToIndexVectorOrScalarContainer(OptionallyImplicitIndex{3}, kImplicit));
EXPECT_EQ(IndexVectorOrScalarContainer{3},
ToIndexVectorOrScalarContainer(OptionallyImplicitIndex{3}, 4));
EXPECT_EQ(
IndexVectorOrScalarContainer{Index{3}},
ToIndexVectorOrScalarContainer(OptionallyImplicitIndex{kImplicit}, 3));
EXPECT_EQ(IndexVectorOrScalarContainer{kImplicit},
ToIndexVectorOrScalarContainer(OptionallyImplicitIndex{kImplicit},
kImplicit));
EXPECT_EQ(IndexVectorOrScalarContainer{std::vector<Index>({1, 2, 3})},
ToIndexVectorOrScalarContainer(
std::vector<OptionallyImplicitIndex>{
OptionallyImplicitIndex{1},
OptionallyImplicitIndex{2},
OptionallyImplicitIndex{3},
},
kImplicit));
EXPECT_EQ(IndexVectorOrScalarContainer{std::vector<Index>({1, 2, kImplicit})},
ToIndexVectorOrScalarContainer(
std::vector<OptionallyImplicitIndex>{
OptionallyImplicitIndex{1},
OptionallyImplicitIndex{2},
OptionallyImplicitIndex{kImplicit},
},
kImplicit));
EXPECT_EQ(IndexVectorOrScalarContainer{std::vector<Index>({1, 2, 3})},
ToIndexVectorOrScalarContainer(
std::vector<OptionallyImplicitIndex>{
OptionallyImplicitIndex{1},
OptionallyImplicitIndex{2},
OptionallyImplicitIndex{kImplicit},
},
3));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/python/tensorstore/index.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/python/tensorstore/index_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
085ce812-51f1-4c00-90f7-ec5e49d65dd9 | cpp | abseil/abseil-cpp | inline_variable | absl/base/internal/inline_variable.h | absl/base/inline_variable_test.cc | #ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_
#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_
#include <type_traits>
#include "absl/base/internal/identity.h"
#ifdef __cpp_inline_variables
#if defined(__clang__)
#define ABSL_INTERNAL_EXTERN_DECL(type, name) \
extern const ::absl::internal::type_identity_t<type> name;
#else
#define ABSL_INTERNAL_EXTERN_DECL(type, name)
#endif
#define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \
ABSL_INTERNAL_EXTERN_DECL(type, name) \
inline constexpr ::absl::internal::type_identity_t<type> name = init
#else
#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \
template <class = void> \
struct AbslInternalInlineVariableHolder##name { \
static constexpr ::absl::internal::type_identity_t<var_type> kInstance = \
init; \
}; \
\
template <class AbslInternalDummy> \
constexpr ::absl::internal::type_identity_t<var_type> \
AbslInternalInlineVariableHolder##name<AbslInternalDummy>::kInstance; \
\
static constexpr const ::absl::internal::type_identity_t<var_type>& \
name = \
AbslInternalInlineVariableHolder##name<>::kInstance; \
static_assert(sizeof(void (*)(decltype(name))) != 0, \
"Silence unused variable warnings.")
#endif
#endif | #include <type_traits>
#include "absl/base/internal/inline_variable.h"
#include "absl/base/internal/inline_variable_testing.h"
#include "gtest/gtest.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace inline_variable_testing_internal {
namespace {
TEST(InlineVariableTest, Constexpr) {
static_assert(inline_variable_foo.value == 5, "");
static_assert(other_inline_variable_foo.value == 5, "");
static_assert(inline_variable_int == 5, "");
static_assert(other_inline_variable_int == 5, "");
}
TEST(InlineVariableTest, DefaultConstructedIdentityEquality) {
EXPECT_EQ(get_foo_a().value, 5);
EXPECT_EQ(get_foo_b().value, 5);
EXPECT_EQ(&get_foo_a(), &get_foo_b());
}
TEST(InlineVariableTest, DefaultConstructedIdentityInequality) {
EXPECT_NE(&inline_variable_foo, &other_inline_variable_foo);
}
TEST(InlineVariableTest, InitializedIdentityEquality) {
EXPECT_EQ(get_int_a(), 5);
EXPECT_EQ(get_int_b(), 5);
EXPECT_EQ(&get_int_a(), &get_int_b());
}
TEST(InlineVariableTest, InitializedIdentityInequality) {
EXPECT_NE(&inline_variable_int, &other_inline_variable_int);
}
TEST(InlineVariableTest, FunPtrType) {
static_assert(
std::is_same<void(*)(),
std::decay<decltype(inline_variable_fun_ptr)>::type>::value,
"");
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/inline_variable.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/inline_variable_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
f681c41e-8beb-4b1d-9b31-c9afbba8d48d | cpp | google/tsl | stringprintf | tsl/platform/stringprintf.cc | tsl/platform/stringprintf_test.cc | #include "tsl/platform/stringprintf.h"
#include <errno.h>
#include <stdarg.h>
#include <stdio.h>
namespace tsl {
namespace strings {
void Appendv(string* dst, const char* format, va_list ap) {
static const int kSpaceLength = 1024;
char space[kSpaceLength];
va_list backup_ap;
va_copy(backup_ap, ap);
int result = vsnprintf(space, kSpaceLength, format, backup_ap);
va_end(backup_ap);
if (result < kSpaceLength) {
if (result >= 0) {
dst->append(space, result);
return;
}
#ifdef _MSC_VER
va_copy(backup_ap, ap);
result = vsnprintf(nullptr, 0, format, backup_ap);
va_end(backup_ap);
#endif
if (result < 0) {
return;
}
}
int length = result + 1;
char* buf = new char[length];
va_copy(backup_ap, ap);
result = vsnprintf(buf, length, format, backup_ap);
va_end(backup_ap);
if (result >= 0 && result < length) {
dst->append(buf, result);
}
delete[] buf;
}
string Printf(const char* format, ...) {
va_list ap;
va_start(ap, format);
string result;
Appendv(&result, format, ap);
va_end(ap);
return result;
}
void Appendf(string* dst, const char* format, ...) {
va_list ap;
va_start(ap, format);
Appendv(dst, format, ap);
va_end(ap);
}
}
} | #include "tsl/platform/stringprintf.h"
#include <string>
#include "tsl/platform/test.h"
namespace tsl {
namespace strings {
namespace {
TEST(PrintfTest, Empty) {
EXPECT_EQ("", Printf("%s", string().c_str()));
EXPECT_EQ("", Printf("%s", ""));
}
TEST(PrintfTest, Misc) {
#if !defined(_MSC_VER)
EXPECT_EQ("123hello w", Printf("%3$d%2$s %1$c", 'w', "hello", 123));
#endif
}
TEST(AppendfTest, Empty) {
string value("Hello");
const char* empty = "";
Appendf(&value, "%s", empty);
EXPECT_EQ("Hello", value);
}
TEST(AppendfTest, EmptyString) {
string value("Hello");
Appendf(&value, "%s", "");
EXPECT_EQ("Hello", value);
}
TEST(AppendfTest, String) {
string value("Hello");
Appendf(&value, " %s", "World");
EXPECT_EQ("Hello World", value);
}
TEST(AppendfTest, Int) {
string value("Hello");
Appendf(&value, " %d", 123);
EXPECT_EQ("Hello 123", value);
}
TEST(PrintfTest, Multibyte) {
char* old_locale = setlocale(LC_CTYPE, nullptr);
setlocale(LC_CTYPE, "en_US.utf8");
const char kInvalidCodePoint[] = "\375\067s";
string value = Printf("%.*s", 3, kInvalidCodePoint);
EXPECT_TRUE(value.empty() || value == kInvalidCodePoint);
int n = 2048;
char* buf = new char[n + 1];
memset(buf, ' ', n - 3);
memcpy(buf + n - 3, kInvalidCodePoint, 4);
value = Printf("%.*s", n, buf);
EXPECT_TRUE(value.empty() || value == buf);
delete[] buf;
setlocale(LC_CTYPE, old_locale);
}
TEST(PrintfTest, NoMultibyte) {
char* old_locale = setlocale(LC_CTYPE, nullptr);
setlocale(LC_CTYPE, "POSIX");
string value = Printf("%.*s", 3, "\375\067s");
setlocale(LC_CTYPE, old_locale);
EXPECT_EQ("\375\067s", value);
}
TEST(PrintfTest, DontOverwriteErrno) {
errno = ECHILD;
string value = Printf("Hello, %s!", "World");
EXPECT_EQ(ECHILD, errno);
}
TEST(PrintfTest, LargeBuf) {
int n = 2048;
char* buf = new char[n + 1];
memset(buf, ' ', n);
buf[n] = 0;
string value = Printf("%s", buf);
EXPECT_EQ(buf, value);
delete[] buf;
}
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/stringprintf.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/stringprintf_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
02873ad8-4e28-4ca9-854c-96b36f1384eb | cpp | google/quiche | simple_ticket_crypter | quiche/quic/tools/simple_ticket_crypter.cc | quiche/quic/tools/simple_ticket_crypter_test.cc | #include "quiche/quic/tools/simple_ticket_crypter.h"
#include <memory>
#include <utility>
#include <vector>
#include "openssl/aead.h"
#include "openssl/rand.h"
namespace quic {
namespace {
constexpr QuicTime::Delta kTicketKeyLifetime =
QuicTime::Delta::FromSeconds(60 * 60 * 24 * 7);
constexpr size_t kEpochSize = 1;
constexpr size_t kIVSize = 16;
constexpr size_t kAuthTagSize = 16;
constexpr size_t kIVOffset = kEpochSize;
constexpr size_t kMessageOffset = kIVOffset + kIVSize;
}
SimpleTicketCrypter::SimpleTicketCrypter(QuicClock* clock) : clock_(clock) {
RAND_bytes(&key_epoch_, 1);
current_key_ = NewKey();
}
SimpleTicketCrypter::~SimpleTicketCrypter() = default;
size_t SimpleTicketCrypter::MaxOverhead() {
return kEpochSize + kIVSize + kAuthTagSize;
}
std::vector<uint8_t> SimpleTicketCrypter::Encrypt(
absl::string_view in, absl::string_view encryption_key) {
QUICHE_DCHECK(encryption_key.empty());
MaybeRotateKeys();
std::vector<uint8_t> out(in.size() + MaxOverhead());
out[0] = key_epoch_;
RAND_bytes(out.data() + kIVOffset, kIVSize);
size_t out_len;
const EVP_AEAD_CTX* ctx = current_key_->aead_ctx.get();
if (!EVP_AEAD_CTX_seal(ctx, out.data() + kMessageOffset, &out_len,
out.size() - kMessageOffset, out.data() + kIVOffset,
kIVSize, reinterpret_cast<const uint8_t*>(in.data()),
in.size(), nullptr, 0)) {
return std::vector<uint8_t>();
}
out.resize(out_len + kMessageOffset);
return out;
}
std::vector<uint8_t> SimpleTicketCrypter::Decrypt(absl::string_view in) {
MaybeRotateKeys();
if (in.size() < kMessageOffset) {
return std::vector<uint8_t>();
}
const uint8_t* input = reinterpret_cast<const uint8_t*>(in.data());
std::vector<uint8_t> out(in.size() - kMessageOffset);
size_t out_len;
const EVP_AEAD_CTX* ctx = current_key_->aead_ctx.get();
if (input[0] != key_epoch_) {
if (input[0] == static_cast<uint8_t>(key_epoch_ - 1) && previous_key_) {
ctx = previous_key_->aead_ctx.get();
} else {
return std::vector<uint8_t>();
}
}
if (!EVP_AEAD_CTX_open(ctx, out.data(), &out_len, out.size(),
input + kIVOffset, kIVSize, input + kMessageOffset,
in.size() - kMessageOffset, nullptr, 0)) {
return std::vector<uint8_t>();
}
out.resize(out_len);
return out;
}
void SimpleTicketCrypter::Decrypt(
absl::string_view in,
std::shared_ptr<quic::ProofSource::DecryptCallback> callback) {
callback->Run(Decrypt(in));
}
void SimpleTicketCrypter::MaybeRotateKeys() {
QuicTime now = clock_->ApproximateNow();
if (current_key_->expiration < now) {
previous_key_ = std::move(current_key_);
current_key_ = NewKey();
key_epoch_++;
}
}
std::unique_ptr<SimpleTicketCrypter::Key> SimpleTicketCrypter::NewKey() {
auto key = std::make_unique<SimpleTicketCrypter::Key>();
RAND_bytes(key->key, kKeySize);
EVP_AEAD_CTX_init(key->aead_ctx.get(), EVP_aead_aes_128_gcm(), key->key,
kKeySize, EVP_AEAD_DEFAULT_TAG_LENGTH, nullptr);
key->expiration = clock_->ApproximateNow() + kTicketKeyLifetime;
return key;
}
} | #include "quiche/quic/tools/simple_ticket_crypter.h"
#include <memory>
#include <vector>
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_clock.h"
namespace quic {
namespace test {
namespace {
constexpr QuicTime::Delta kOneDay = QuicTime::Delta::FromSeconds(60 * 60 * 24);
}
class DecryptCallback : public quic::ProofSource::DecryptCallback {
public:
explicit DecryptCallback(std::vector<uint8_t>* out) : out_(out) {}
void Run(std::vector<uint8_t> plaintext) override { *out_ = plaintext; }
private:
std::vector<uint8_t>* out_;
};
absl::string_view StringPiece(const std::vector<uint8_t>& in) {
return absl::string_view(reinterpret_cast<const char*>(in.data()), in.size());
}
class SimpleTicketCrypterTest : public QuicTest {
public:
SimpleTicketCrypterTest() : ticket_crypter_(&mock_clock_) {}
protected:
MockClock mock_clock_;
SimpleTicketCrypter ticket_crypter_;
};
TEST_F(SimpleTicketCrypterTest, EncryptDecrypt) {
std::vector<uint8_t> plaintext = {1, 2, 3, 4, 5};
std::vector<uint8_t> ciphertext =
ticket_crypter_.Encrypt(StringPiece(plaintext), {});
EXPECT_NE(plaintext, ciphertext);
std::vector<uint8_t> out_plaintext;
ticket_crypter_.Decrypt(StringPiece(ciphertext),
std::make_unique<DecryptCallback>(&out_plaintext));
EXPECT_EQ(out_plaintext, plaintext);
}
TEST_F(SimpleTicketCrypterTest, CiphertextsDiffer) {
std::vector<uint8_t> plaintext = {1, 2, 3, 4, 5};
std::vector<uint8_t> ciphertext1 =
ticket_crypter_.Encrypt(StringPiece(plaintext), {});
std::vector<uint8_t> ciphertext2 =
ticket_crypter_.Encrypt(StringPiece(plaintext), {});
EXPECT_NE(ciphertext1, ciphertext2);
}
TEST_F(SimpleTicketCrypterTest, DecryptionFailureWithModifiedCiphertext) {
std::vector<uint8_t> plaintext = {1, 2, 3, 4, 5};
std::vector<uint8_t> ciphertext =
ticket_crypter_.Encrypt(StringPiece(plaintext), {});
EXPECT_NE(plaintext, ciphertext);
for (size_t i = 0; i < ciphertext.size(); i++) {
SCOPED_TRACE(i);
std::vector<uint8_t> munged_ciphertext = ciphertext;
munged_ciphertext[i] ^= 1;
std::vector<uint8_t> out_plaintext;
ticket_crypter_.Decrypt(StringPiece(munged_ciphertext),
std::make_unique<DecryptCallback>(&out_plaintext));
EXPECT_TRUE(out_plaintext.empty());
}
}
TEST_F(SimpleTicketCrypterTest, DecryptionFailureWithEmptyCiphertext) {
std::vector<uint8_t> out_plaintext;
ticket_crypter_.Decrypt(absl::string_view(),
std::make_unique<DecryptCallback>(&out_plaintext));
EXPECT_TRUE(out_plaintext.empty());
}
TEST_F(SimpleTicketCrypterTest, KeyRotation) {
std::vector<uint8_t> plaintext = {1, 2, 3};
std::vector<uint8_t> ciphertext =
ticket_crypter_.Encrypt(StringPiece(plaintext), {});
EXPECT_FALSE(ciphertext.empty());
mock_clock_.AdvanceTime(kOneDay * 8);
std::vector<uint8_t> out_plaintext;
ticket_crypter_.Decrypt(StringPiece(ciphertext),
std::make_unique<DecryptCallback>(&out_plaintext));
EXPECT_EQ(out_plaintext, plaintext);
mock_clock_.AdvanceTime(kOneDay * 8);
ticket_crypter_.Decrypt(StringPiece(ciphertext),
std::make_unique<DecryptCallback>(&out_plaintext));
EXPECT_TRUE(out_plaintext.empty());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/tools/simple_ticket_crypter.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/tools/simple_ticket_crypter_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
7cf8e0a3-4cf1-437a-bcf9-712f019c16e5 | cpp | tensorflow/tensorflow | pattern_utils | tensorflow/core/grappler/utils/pattern_utils.cc | tensorflow/core/grappler/utils/pattern_utils_test.cc | #include "tensorflow/core/grappler/utils/pattern_utils.h"
#include <algorithm>
#include <memory>
#include "absl/container/flat_hash_set.h"
namespace tensorflow {
namespace grappler {
namespace utils {
const bool IsCommutativeOp(const string& op) {
std::vector<string> op_list = str_util::Split(op, '|');
static const auto* commutative_ops = new absl::flat_hash_set<string>(
{"Add", "AddV2", "Mul", "Maximum", "SquaredDifference"});
for (const string& op_ : op_list) {
if (commutative_ops->contains(op_)) return true;
}
return false;
}
bool IsSame(string op1, string op2) {
if (op1 == "*") return true;
std::vector<string> op1_list = str_util::Split(op1, '|');
for (const string& op_1 : op1_list) {
if (op_1 == op2) return true;
}
return false;
}
template <>
bool SubGraphMatcher<MatchingDirection::kFollowInputs>::DoesOpTypePatternMatch(
const OpTypePattern& pattern, MutableNodeView* node_view,
NodeViewMatch* match) {
if ((node_view->NumControllingFanins() > 0 &&
pattern.node_status != NodeStatus::kRemain) ||
(node_view->NumControlledFanouts() > 0 &&
pattern.node_status == NodeStatus::kRemove))
return false;
bool op_type_matched = false;
if (pattern.op == "*") {
op_type_matched = true;
} else {
std::vector<string> op_list = str_util::Split(pattern.op, '|');
for (const string& op : op_list) {
if (node_view->node()->op() == op) {
op_type_matched = true;
break;
}
}
}
if (op_type_matched) {
if (node_label_to_index_.find(pattern.label) ==
node_label_to_index_.end()) {
node_label_to_index_[pattern.label] = node_view->node_index();
matched_node_indices_.insert(node_view->node_index());
if (pattern.node_status == NodeStatus::kRemove) {
remove_node_indices_.insert(node_view->node_index());
}
} else if (node_label_to_index_[pattern.label] != node_view->node_index()) {
return false;
} else {
DCHECK(node_label_to_index_[pattern.label] == node_view->node_index());
}
} else {
return false;
}
match->node_view = node_view;
if (!pattern.children.empty()) {
auto graph_children = node_view->GetRegularFanins();
int num_children = graph_children.size();
if (num_children != pattern.children.size()) {
return false;
} else {
std::vector<int> pattern_child_indices(num_children);
std::iota(pattern_child_indices.begin(), pattern_child_indices.end(), 0);
string op_name = pattern.op;
if (IsCommutativeOp(op_name) && num_children == 2) {
MutableNodeView* graph_child0_node_view =
graph_view_->GetNode(graph_children[0].node_index());
MutableNodeView* graph_child1_node_view =
graph_view_->GetNode(graph_children[1].node_index());
if ((!IsSame(pattern.children[0].op, graph_child0_node_view->GetOp()) &&
IsSame(pattern.children[1].op, graph_child0_node_view->GetOp())) ||
(!IsSame(pattern.children[1].op, graph_child1_node_view->GetOp()) &&
IsSame(pattern.children[0].op, graph_child1_node_view->GetOp())))
std::swap(pattern_child_indices[0], pattern_child_indices[1]);
}
for (int i = 0; i < num_children; ++i) {
auto child_node_index = graph_children[i].node_index();
MutableNodeView* child_node_view =
graph_view_->GetNode(child_node_index);
const OpTypePattern& child_pattern =
pattern.children[pattern_child_indices[i]];
match->children.push_back(NodeViewMatch());
NodeViewMatch* child_match = &(match->children.back());
if (!DoesOpTypePatternMatch(child_pattern, child_node_view,
child_match)) {
return false;
}
}
}
}
return true;
}
template <>
bool SubGraphMatcher<MatchingDirection::kFollowInputs>::GetMatchedNodes(
const OpTypePattern& pattern,
const std::unordered_set<string>& nodes_to_preserve,
MutableNodeView* node_view, std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
bool found_match = false;
match_ = std::make_unique<NodeViewMatch>();
if (DoesOpTypePatternMatch(pattern, node_view, match_.get())) {
if (IsSafeNodesToRemove(nodes_to_preserve)) {
found_match = true;
*matched_nodes_map = this->node_label_to_index_;
*remove_node_indices = this->remove_node_indices_;
}
} else {
found_match = false;
}
match_->Clear();
match_.reset(nullptr);
matched_node_indices_.clear();
node_label_to_index_.clear();
remove_node_indices_.clear();
return found_match;
}
}
}
} | #include "tensorflow/core/grappler/utils/pattern_utils.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace grappler {
namespace utils {
namespace {
using ::tensorflow::ops::Placeholder;
void GetMatMulBiasAddGeluGraph(GraphDef* graph,
bool add_external_dependent = false) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32});
auto weight_shape = ops::Placeholder::Shape({32, 64});
auto bias_shape = ops::Placeholder::Shape({64});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto weight = Placeholder(s.WithOpName("weight"), DT_FLOAT, weight_shape);
auto bias = Placeholder(s.WithOpName("bias"), DT_FLOAT, bias_shape);
auto matmul = ops::MatMul(s.WithOpName("matmul"), input, weight);
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), matmul, bias);
if (add_external_dependent) {
auto external_dependent =
ops::Identity(s.WithOpName("external_dependent"), bias_add);
}
auto one_over_square_root_two =
ops::Const(s.WithOpName("one_over_square_root_two"), {0.707f}, {});
auto bias_add_times_const = ops::Mul(s.WithOpName("bias_add_times_const"),
bias_add, one_over_square_root_two);
auto erf = ops::Erf(s.WithOpName("erf"), bias_add_times_const);
auto one = ops::Const(s.WithOpName("one"), {1.0f}, {});
auto erf_plus_one = ops::AddV2(s.WithOpName("erf_plus_one"), erf, one);
auto one_half = ops::Const(s.WithOpName("one_half"), {0.5f}, {});
auto one_half_times_erf_plus_one = ops::Mul(
s.WithOpName("one_half_times_erf_plus_one"), one_half, erf_plus_one);
auto gelu =
ops::Mul(s.WithOpName("gelu"), one_half_times_erf_plus_one, bias_add);
auto fetch = ops::Identity(s.WithOpName("fetch"), gelu);
TF_ASSERT_OK(s.ToGraphDef(graph));
}
OpTypePattern GetMatMulBiasAddGeluPattern() {
OpTypePattern pattern_syntax{"Mul", "my_gelu", NodeStatus::kReplace,
{
{"Mul", "my_one_half_times_erf_plus_one", NodeStatus::kRemove,
{
{"Const", "my_one_half", NodeStatus::kRemain},
{"AddV2", "my_erf_plus_one", NodeStatus::kRemove,
{
{"Erf", "my_erf", NodeStatus::kRemove,
{
{"Mul", "my_bias_add_times_const", NodeStatus::kRemove,
{
{"BiasAdd", "my_bias_add", NodeStatus::kRemove},
{"Const", "my_one_over_square_root_two", NodeStatus::kRemain}
}
}
}
},
{"Const", "my_one", NodeStatus::kRemain}
}
}
}
},
{"BiasAdd", "my_bias_add", NodeStatus::kRemove,
{
{"MatMul", "my_matmul", NodeStatus::kRemove},
{"*", "my_bias", NodeStatus::kRemain}
}
}
}
};
return pattern_syntax;
}
class PatternMatcherTest : public ::testing::Test {
protected:
struct NodeConfig {
NodeConfig(string name, string op, std::vector<string> inputs)
: name(std::move(name)), op(std::move(op)), inputs(std::move(inputs)) {}
string name;
string op;
std::vector<string> inputs;
};
static GraphDef CreateGraph(const std::vector<NodeConfig>& nodes) {
GraphDef graph;
for (const NodeConfig& node : nodes) {
NodeDef node_def;
node_def.set_name(node.name);
node_def.set_op(node.op);
for (const string& input : node.inputs) {
node_def.add_input(input);
}
*graph.add_node() = std::move(node_def);
}
return graph;
}
};
TEST_F(PatternMatcherTest, Tree) {
::tensorflow::Status status;
GraphDef graph = CreateGraph({{"e", "E", {"c", "d"}},
{"c", "C", {"b"}},
{"d", "D", {}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{"E", "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove},
{"D", "my_d", NodeStatus::kRemove}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = absl::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
}
TEST_F(PatternMatcherTest, DAG) {
::tensorflow::Status status;
GraphDef graph = CreateGraph({{"e", "E", {"c", "d"}},
{"c", "C", {"b"}},
{"d", "D", {"b"}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{"E", "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
},
{"D", "my_d", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::unordered_set<string> nodes_to_preserve = {"foo"};
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match =
graph_matcher.GetMatchedNodes(pattern, nodes_to_preserve, root_node_view,
&matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = absl::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
nodes_to_preserve.insert({"c", "d"});
matched_nodes_map.clear();
remove_node_indices.clear();
found_match =
graph_matcher.GetMatchedNodes(pattern, nodes_to_preserve, root_node_view,
&matched_nodes_map, &remove_node_indices);
EXPECT_FALSE(found_match);
EXPECT_TRUE(matched_nodes_map.empty());
EXPECT_TRUE(remove_node_indices.empty());
}
TEST_F(PatternMatcherTest, DAGExternalDependent) {
::tensorflow::Status status;
GraphDef graph = CreateGraph({{"f", "F", {"d"}},
{"e", "E", {"c", "d"}},
{"c", "C", {"b"}},
{"d", "D", {"b"}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{"E", "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
},
{"D", "my_d", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_FALSE(found_match);
EXPECT_TRUE(matched_nodes_map.empty());
EXPECT_TRUE(remove_node_indices.empty());
}
TEST_F(PatternMatcherTest, MatMulBiasAddGelu) {
::tensorflow::Status status;
GraphDef graph;
GetMatMulBiasAddGeluGraph(&graph);
OpTypePattern pattern = GetMatMulBiasAddGeluPattern();
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("gelu");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = absl::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
}
TEST_F(PatternMatcherTest, MatMulBiasAddGeluExternalDependent) {
::tensorflow::Status status;
GraphDef graph;
GetMatMulBiasAddGeluGraph(&graph, true);
OpTypePattern pattern = GetMatMulBiasAddGeluPattern();
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("gelu");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_FALSE(found_match);
EXPECT_TRUE(matched_nodes_map.empty());
EXPECT_TRUE(remove_node_indices.empty());
}
TEST_F(PatternMatcherTest, MatMulBiasAddGeluMutation) {
::tensorflow::Status status;
GraphDef graph;
GetMatMulBiasAddGeluGraph(&graph);
OpTypePattern pattern = GetMatMulBiasAddGeluPattern();
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("gelu");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
int num_nodes_before = graph_view.NumNodes();
std::vector<string> remove_node_names;
for (auto const& node_idx : remove_node_indices) {
remove_node_names.push_back(graph_view.GetNode(node_idx)->GetName());
}
Mutation* mutation = graph_view.GetMutationBuilder();
NodeDef fused_node;
fused_node.set_name("gelu");
fused_node.set_op("_FusedMatMul");
fused_node.add_input(graph_view.GetNode("matmul")->node()->input(0));
fused_node.add_input(graph_view.GetNode("matmul")->node()->input(1));
fused_node.add_input(graph_view.GetNode("bias_add")->node()->input(1));
mutation->AddNode(std::move(fused_node), &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(mutation->Apply());
for (auto const& node_idx : remove_node_indices) {
mutation->RemoveNode(graph_view.GetNode(node_idx));
}
TF_EXPECT_OK(mutation->Apply());
int num_nodes_after = graph_view.NumNodes();
EXPECT_EQ(num_nodes_before - remove_node_indices.size(), num_nodes_after);
bool remove_nodes_deleted = true;
for (auto const& node_name : remove_node_names) {
if (graph_view.GetNode(node_name) != nullptr) {
remove_nodes_deleted = false;
break;
}
}
EXPECT_TRUE(remove_nodes_deleted);
bool replace_node_exist = graph_view.HasNode("gelu") ? true : false;
EXPECT_TRUE(replace_node_exist);
}
TEST_F(PatternMatcherTest, CommutativeInputs) {
::tensorflow::Status status;
std::vector<string> commutative_ops = {"Mul", "Add", "AddV2"};
for (string op : commutative_ops) {
for (bool should_swap : {false, true}) {
std::vector<string> commutative_operands =
(should_swap ? std::vector<string>{"d", "c"}
: std::vector<string>{"c", "d"});
GraphDef graph = CreateGraph({{"e", op, commutative_operands},
{"c", "C", {"b"}},
{"d", "D", {"b"}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{op, "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
},
{"D", "my_d", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map,
&remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = absl::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/pattern_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/pattern_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e162909f-8720-485d-a783-1593f03d6009 | cpp | google/langsvr | buffer_writer | src/buffer_writer.cc | src/buffer_writer_test.cc | #include "langsvr/buffer_writer.h"
namespace langsvr {
BufferWriter::BufferWriter() = default;
Result<SuccessType> BufferWriter::Write(const std::byte* in, size_t count) {
size_t at = buffer.size();
buffer.resize(at + count);
memcpy(&buffer[at], in, count);
return Success;
}
std::string_view BufferWriter::BufferString() const {
if (buffer.empty()) {
return "";
}
auto* data = reinterpret_cast<const char*>(&buffer[0]);
static_assert(sizeof(std::byte) == sizeof(char), "length needs calculation");
return std::string_view(data, buffer.size());
}
} | #include "langsvr/buffer_writer.h"
#include "gmock/gmock.h"
namespace langsvr {
namespace {
template <typename T, typename U>
std::vector<T> Cast(const std::vector<U>& in) {
std::vector<T> out;
out.resize(in.size());
for (size_t i = 0, n = in.size(); i < n; i++) {
out[i] = static_cast<T>(in[i]);
}
return out;
}
TEST(BufferWriterTest, String) {
BufferWriter writer;
EXPECT_EQ(writer.String("hello world"), Success);
EXPECT_THAT(Cast<int>(writer.buffer),
testing::ElementsAre(104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100));
}
}
} | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/src/buffer_writer.cc | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/src/buffer_writer_test.cc | 303c526231a90049a3e384549720f3fbd453cf66 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.