ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
7f13d784-1dd3-4570-9e98-e4867d47a9bd | cpp | tensorflow/tensorflow | shared_batch_scheduler | tensorflow/core/kernels/batching_util/shared_batch_scheduler.h | tensorflow/core/kernels/batching_util/shared_batch_scheduler_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_SHARED_BATCH_SCHEDULER_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_SHARED_BATCH_SCHEDULER_H_
#include <stddef.h>
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <deque>
#include <functional>
#include <list>
#include <memory>
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/time/clock.h"
#include "tensorflow/core/kernels/batching_util/batch_input_task.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler_utils.h"
#include "tensorflow/core/kernels/batching_util/batch_stats.h"
#include "tensorflow/core/kernels/batching_util/periodic_function.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/connected_traceme.h"
#include "tensorflow/core/profiler/lib/context_types.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tsl/platform/criticality.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace serving {
namespace internal {
template <typename TaskType>
class Queue;
}
}
}
namespace tensorflow {
namespace serving {
template <typename TaskType>
class SharedBatchScheduler
: public std::enable_shared_from_this<SharedBatchScheduler<TaskType>> {
public:
using BatchTaskUniquePtr = std::unique_ptr<Batch<TaskType>>;
using ProcessBatchCallback =
std::variant<std::function<void(BatchTaskUniquePtr)>,
std::function<void(BatchTaskUniquePtr,
std::vector<std::unique_ptr<TaskType>>)>>;
struct Options {
string thread_pool_name = {"batch_threads"};
int num_batch_threads = port::MaxParallelism();
Env* env = Env::Default();
};
static Status Create(
const Options& options,
std::shared_ptr<SharedBatchScheduler<TaskType>>* scheduler);
virtual ~SharedBatchScheduler();
struct QueueOptions {
size_t input_batch_size_limit = 1000;
int64_t batch_timeout_micros = 0;
size_t max_enqueued_batches = 10;
bool enable_large_batch_splitting = false;
std::function<Status(std::unique_ptr<TaskType>* input_task,
int first_output_task_size, int input_batch_size_limit,
std::vector<std::unique_ptr<TaskType>>* output_tasks)>
split_input_task_func;
size_t max_execution_batch_size = 1000;
std::vector<int32> allowed_batch_sizes;
bool disable_padding = false;
string batch_padding_policy = string(kPadUpPolicy);
ModelBatchStats* model_batch_stats = nullptr;
bool enable_priority_queue = false;
struct PriorityQueueOptions {
size_t max_execution_batch_size = 0;
int64_t batch_timeout_micros = 0;
size_t input_batch_size_limit = 0;
size_t max_enqueued_batches = 0;
std::vector<int32> allowed_batch_sizes;
};
PriorityQueueOptions high_priority_queue_options;
PriorityQueueOptions low_priority_queue_options;
MixedPriorityBatchingPolicy mixed_priority_batching_policy =
MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize;
};
virtual Status AddQueue(const QueueOptions& options,
ProcessBatchCallback process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue);
protected:
explicit SharedBatchScheduler(const Options& options);
private:
void GetNextWorkItem_Locked(internal::Queue<TaskType>** queue_for_batch_out,
BatchTaskUniquePtr* batch_to_process_out)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void ThreadLogic();
Status AddQueueAfterRewritingOptions(
const QueueOptions& options, ProcessBatchCallback process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue);
static bool BatchExists(const BatchTaskUniquePtr& batch_to_process);
const Options options_;
mutex mu_;
using QueueList = std::list<std::unique_ptr<internal::Queue<TaskType>>>;
QueueList queues_ TF_GUARDED_BY(mu_);
typename QueueList::iterator next_queue_to_schedule_ TF_GUARDED_BY(mu_);
condition_variable schedulable_batch_cv_;
std::vector<std::unique_ptr<PeriodicFunction>> batch_threads_;
SharedBatchScheduler(const SharedBatchScheduler&) = delete;
void operator=(const SharedBatchScheduler&) = delete;
};
namespace internal {
template <typename TaskType>
class Queue {
public:
using ProcessBatchCallbackWithoutPaddingTasks =
std::function<void(std::unique_ptr<Batch<TaskType>>)>;
using ProcessBatchCallbackWithPaddingTasks =
std::function<void(std::unique_ptr<Batch<TaskType>>,
std::vector<std::unique_ptr<TaskType>>)>;
using ProcessBatchCallback =
std::variant<ProcessBatchCallbackWithoutPaddingTasks,
ProcessBatchCallbackWithPaddingTasks>;
using SchedulableBatchCallback = std::function<void()>;
using SplitInputTaskIntoSubtasksCallback = std::function<Status(
std::unique_ptr<TaskType>* input_task, int open_batch_remaining_slot,
int max_execution_batch_size,
std::vector<std::unique_ptr<TaskType>>* output_tasks)>;
Queue(const typename SharedBatchScheduler<TaskType>::QueueOptions& options,
Env* env, ProcessBatchCallback process_batch_callback,
SchedulableBatchCallback schedulable_batch_callback);
~Queue();
Status Schedule(std::unique_ptr<TaskType>* task);
size_t NumEnqueuedTasks() const;
size_t SchedulingCapacity() const;
size_t max_task_size() const { return options_.input_batch_size_limit; }
size_t max_execution_batch_size() const { return max_execution_batch_size_; }
typename SharedBatchScheduler<TaskType>::BatchTaskUniquePtr ScheduleBatch();
std::vector<std::unique_ptr<TaskType>> GetLowPriorityTasksForPadding(
size_t batch_size);
void ProcessBatch(std::unique_ptr<Batch<TaskType>> batch,
std::vector<std::unique_ptr<TaskType>> padding_task);
bool IsEmpty() const;
void CloseAndWaitUntilEmpty();
bool closed() const TF_NO_THREAD_SAFETY_ANALYSIS { return closed_.load(); }
private:
static size_t GetMaxExecutionBatchSize(
const typename SharedBatchScheduler<TaskType>::QueueOptions& options) {
if (options.enable_large_batch_splitting) {
return options.max_execution_batch_size;
} else {
return options.input_batch_size_limit;
}
}
bool IsEmptyInternal() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
bool IsLowPriorityTask(std::unique_ptr<TaskType>* task);
Status ScheduleWithoutOrEagerSplitImpl(std::unique_ptr<TaskType>* task)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void StartNewBatch() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
Status SplitInputBatchIntoSubtasks(
std::unique_ptr<TaskType>* input_task,
std::vector<std::unique_ptr<TaskType>>* output_tasks)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
bool IsOpenBatchSchedulable() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
std::unique_ptr<Batch<TaskType>> ScheduleLowPriorityBatch()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
size_t SchedulingCapacityInternal() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
Status ValidateBatchTaskQueueCapacity(TaskType* task) const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
Status ValidateLowPriorityTaskQueueCapacity(const TaskType& task) const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
size_t tail_batch_task_size() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
int64 num_enqueued_batches() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
std::deque<std::unique_ptr<Batch<TaskType>>>& GetBatches()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
const std::deque<std::unique_ptr<Batch<TaskType>>>& GetBatches() const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
TaskQueue<TaskType>& GetLowPriorityTaskQueue()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
std::vector<std::unique_ptr<TaskType>> GetLowPriorityTasks(size_t size);
const typename SharedBatchScheduler<TaskType>::QueueOptions options_;
Env* env_;
const size_t max_execution_batch_size_;
ProcessBatchCallback process_batch_callback_;
SchedulableBatchCallback schedulable_batch_callback_;
mutable mutex mu_;
std::atomic<bool> closed_ TF_GUARDED_BY(mu_){false};
TaskQueue<TaskType> low_priority_tasks_ TF_GUARDED_BY(mu_);
std::deque<std::unique_ptr<Batch<TaskType>>> low_priority_batches_
TF_GUARDED_BY(mu_);
std::deque<std::unique_ptr<Batch<TaskType>>> high_priority_batches_
TF_GUARDED_BY(mu_);
uint64 traceme_context_id_counter_ TF_GUARDED_BY(mu_) = 0;
uint64 open_batch_start_time_micros_ TF_GUARDED_BY(mu_);
bool schedulable_batch_ TF_GUARDED_BY(mu_) = false;
int num_batches_being_processed_ TF_GUARDED_BY(mu_) = 0;
Notification* empty_notification_ TF_GUARDED_BY(mu_) = nullptr;
Queue(const Queue&) = delete;
void operator=(const Queue&) = delete;
};
template <typename TaskType>
class QueueHandle : public BatchScheduler<TaskType> {
public:
QueueHandle(std::shared_ptr<SharedBatchScheduler<TaskType>> scheduler,
Queue<TaskType>* queue);
~QueueHandle() override;
Status Schedule(std::unique_ptr<TaskType>* task) override;
size_t NumEnqueuedTasks() const override;
size_t SchedulingCapacity() const override;
size_t max_task_size() const override { return queue_->max_task_size(); }
private:
std::shared_ptr<SharedBatchScheduler<TaskType>> scheduler_;
Queue<TaskType>* queue_;
QueueHandle(const QueueHandle&) = delete;
void operator=(const QueueHandle&) = delete;
};
}
template <typename TaskType>
Status SharedBatchScheduler<TaskType>::Create(
const Options& options,
std::shared_ptr<SharedBatchScheduler<TaskType>>* scheduler) {
if (options.num_batch_threads < 1) {
return errors::InvalidArgument("num_batch_threads must be positive; was ",
options.num_batch_threads);
}
scheduler->reset(new SharedBatchScheduler<TaskType>(options));
return absl::OkStatus();
}
template <typename TaskType>
SharedBatchScheduler<TaskType>::~SharedBatchScheduler() {
for (;;) {
{
mutex_lock l(mu_);
if (queues_.empty()) {
break;
}
}
const int64_t kSleepTimeMicros = 100;
options_.env->SleepForMicroseconds(kSleepTimeMicros);
}
batch_threads_.clear();
}
template <typename TaskType>
Status SharedBatchScheduler<TaskType>::AddQueue(
const QueueOptions& options, ProcessBatchCallback process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue) {
QueueOptions rewrite_options = options;
if ((!rewrite_options.enable_large_batch_splitting) &&
rewrite_options.max_enqueued_batches == 0) {
rewrite_options.max_enqueued_batches = 1;
}
return AddQueueAfterRewritingOptions(rewrite_options, process_batch_callback,
queue);
}
template <typename TaskType>
Status SharedBatchScheduler<TaskType>::AddQueueAfterRewritingOptions(
const QueueOptions& options, ProcessBatchCallback process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue) {
if (options.input_batch_size_limit == 0) {
return errors::InvalidArgument(
"input_batch_size_limit must be positive; was ",
options.input_batch_size_limit);
}
if (options.batch_timeout_micros < 0) {
return errors::InvalidArgument(
"batch_timeout_micros must be non-negative; was ",
options.batch_timeout_micros);
}
if (options.max_enqueued_batches == 0) {
return errors::InvalidArgument(
"max_enqueued_batches must be positive; was ",
options.max_enqueued_batches);
}
if (options.enable_large_batch_splitting &&
options.split_input_task_func == nullptr) {
return errors::InvalidArgument(
"split_input_task_func must be specified when split_input_task is "
"true: ",
options.enable_large_batch_splitting);
}
if (options.enable_large_batch_splitting &&
(options.input_batch_size_limit < options.max_execution_batch_size)) {
return errors::InvalidArgument(
"When enable_large_batch_splitting is true, input_batch_size_limit "
"must be "
"greater than or equal to max_execution_batch_size.",
options.enable_large_batch_splitting, options.input_batch_size_limit,
options.max_execution_batch_size);
}
auto schedulable_batch_callback = [this] {
mutex_lock l(mu_);
schedulable_batch_cv_.notify_one();
};
auto internal_queue =
std::unique_ptr<internal::Queue<TaskType>>(new internal::Queue<TaskType>(
options, options_.env, process_batch_callback,
schedulable_batch_callback));
auto handle = std::unique_ptr<BatchScheduler<TaskType>>(
new internal::QueueHandle<TaskType>(this->shared_from_this(),
internal_queue.get()));
{
mutex_lock l(mu_);
queues_.push_back(std::move(internal_queue));
if (next_queue_to_schedule_ == queues_.end()) {
next_queue_to_schedule_ = queues_.begin();
}
}
*queue = std::move(handle);
return absl::OkStatus();
}
template <typename TaskType>
SharedBatchScheduler<TaskType>::SharedBatchScheduler(const Options& options)
: options_(options), next_queue_to_schedule_(queues_.end()) {
PeriodicFunction::Options periodic_fn_options;
periodic_fn_options.thread_name_prefix =
strings::StrCat(options.thread_pool_name, "_");
for (int i = 0; i < options.num_batch_threads; ++i) {
std::unique_ptr<PeriodicFunction> thread(new PeriodicFunction(
[this] { this->ThreadLogic(); },
0 , periodic_fn_options));
batch_threads_.push_back(std::move(thread));
}
}
template <typename TaskType>
bool SharedBatchScheduler<TaskType>::BatchExists(
const BatchTaskUniquePtr& batch_to_process) {
return batch_to_process != nullptr;
}
template <typename TaskType>
void SharedBatchScheduler<TaskType>::GetNextWorkItem_Locked(
internal::Queue<TaskType>** queue_for_batch_out,
BatchTaskUniquePtr* batch_to_process_out) {
BatchTaskUniquePtr batch_to_process;
internal::Queue<TaskType>* queue_for_batch = nullptr;
const int num_queues = queues_.size();
for (int num_queues_tried = 0;
!BatchExists(batch_to_process) && num_queues_tried < num_queues;
++num_queues_tried) {
DCHECK(next_queue_to_schedule_ != queues_.end());
const bool queue_closed = (*next_queue_to_schedule_)->closed();
batch_to_process = (*next_queue_to_schedule_)->ScheduleBatch();
if (BatchExists(batch_to_process)) {
queue_for_batch = next_queue_to_schedule_->get();
}
if (queue_closed && (*next_queue_to_schedule_)->IsEmpty() &&
!BatchExists(batch_to_process)) {
DCHECK_NE(queue_for_batch, next_queue_to_schedule_->get());
next_queue_to_schedule_ = queues_.erase(next_queue_to_schedule_);
} else {
++next_queue_to_schedule_;
}
if (next_queue_to_schedule_ == queues_.end() && !queues_.empty()) {
next_queue_to_schedule_ = queues_.begin();
}
}
*queue_for_batch_out = queue_for_batch;
*batch_to_process_out = std::move(batch_to_process);
}
template <typename TaskType>
void SharedBatchScheduler<TaskType>::ThreadLogic() {
BatchTaskUniquePtr batch_to_process;
internal::Queue<TaskType>* queue_for_batch = nullptr;
{
mutex_lock l(mu_);
while (true) {
GetNextWorkItem_Locked(&queue_for_batch, &batch_to_process);
if (BatchExists(batch_to_process)) break;
const int64_t kTimeoutMillis =
1;
WaitForMilliseconds(&l, &schedulable_batch_cv_, kTimeoutMillis);
if (queues_.empty()) return;
}
}
size_t batch_size_to_schedule = batch_to_process->size();
queue_for_batch->ProcessBatch(
std::move(batch_to_process),
queue_for_batch->GetLowPriorityTasksForPadding(batch_size_to_schedule));
}
namespace internal {
template <typename TaskType>
Queue<TaskType>::Queue(
const typename SharedBatchScheduler<TaskType>::QueueOptions& options,
Env* env, ProcessBatchCallback process_batch_callback,
SchedulableBatchCallback schedulable_batch_callback)
: options_(options),
env_(env),
max_execution_batch_size_(GetMaxExecutionBatchSize(options_)),
process_batch_callback_(process_batch_callback),
schedulable_batch_callback_(schedulable_batch_callback) {
traceme_context_id_counter_ = (absl::GetCurrentTimeNanos() & 0xFFFFFFFF)
<< 32;
GetBatches().emplace_back(new Batch<TaskType>);
}
template <typename TaskType>
Queue<TaskType>::~Queue() {
mutex_lock l(mu_);
DCHECK(IsEmptyInternal());
GetBatches().back()->Close();
}
template <typename TaskType>
bool Queue<TaskType>::IsLowPriorityTask(std::unique_ptr<TaskType>* task) {
if (!options_.enable_priority_queue) {
return false;
}
if constexpr (std::is_base_of_v<BatchTask, TaskType>) {
return ((*task)->criticality() ==
tsl::criticality::Criticality::kSheddablePlus ||
(*task)->criticality() ==
tsl::criticality::Criticality::kSheddable);
}
return false;
}
template <typename TaskType>
Status Queue<TaskType>::ScheduleWithoutOrEagerSplitImpl(
std::unique_ptr<TaskType>* task) {
TF_RETURN_IF_ERROR(ValidateBatchTaskQueueCapacity((*task).get()));
std::deque<std::unique_ptr<Batch<TaskType>>>& batches = GetBatches();
const int64_t open_batch_remaining_slot =
max_execution_batch_size() - batches.back()->size();
const int64_t input_task_size = (*task)->size();
std::vector<std::unique_ptr<TaskType>> output_tasks;
if (input_task_size <= open_batch_remaining_slot ||
!options_.enable_large_batch_splitting) {
output_tasks.push_back(std::move(*task));
} else {
TF_RETURN_IF_ERROR(SplitInputBatchIntoSubtasks(task, &output_tasks));
}
for (int i = 0; i < output_tasks.size(); ++i) {
if (batches.back()->size() + output_tasks[i]->size() >
max_execution_batch_size()) {
StartNewBatch();
}
if (batches.back()->empty()) {
open_batch_start_time_micros_ = env_->NowMicros();
}
tsl::profiler::TraceMeProducer trace_me(
[&output_tasks, i] {
return profiler::TraceMeEncode("ScheduleOutputTask",
{{"size", output_tasks[i]->size()}});
},
tsl::profiler::ContextType::kSharedBatchScheduler,
batches.back()->traceme_context_id());
batches.back()->AddTask(std::move(output_tasks[i]));
}
return absl::OkStatus();
}
template <typename TaskType>
Status Queue<TaskType>::Schedule(std::unique_ptr<TaskType>* task) {
const bool large_batch_splitting = options_.enable_large_batch_splitting;
tsl::profiler::TraceMe trace_me([task, large_batch_splitting] {
return profiler::TraceMeEncode(
large_batch_splitting ? "ScheduleWithEagerSplit"
: "ScheduleWithoutSplit",
{{"batching_input_task_size", (*task)->size()}});
});
bool notify_of_schedulable_batch = false;
{
mutex_lock l(mu_);
DCHECK(!closed_);
if (IsLowPriorityTask(task)) {
TF_RETURN_IF_ERROR(ValidateLowPriorityTaskQueueCapacity(**task));
low_priority_tasks_.AddTask(std::move(*task), env_->NowMicros());
} else {
TF_RETURN_IF_ERROR(ScheduleWithoutOrEagerSplitImpl(task));
}
if (!schedulable_batch_) {
if (GetBatches().size() > 1 || IsOpenBatchSchedulable()) {
schedulable_batch_ = true;
notify_of_schedulable_batch = true;
}
}
}
if (notify_of_schedulable_batch) {
schedulable_batch_callback_();
}
return absl::OkStatus();
}
template <typename TaskType>
size_t Queue<TaskType>::NumEnqueuedTasks() const {
size_t num_enqueued_tasks = 0;
mutex_lock l(mu_);
for (const auto& batch : GetBatches()) {
num_enqueued_tasks += batch->num_tasks();
}
return num_enqueued_tasks + low_priority_tasks_.num_tasks();
}
template <typename TaskType>
size_t Queue<TaskType>::SchedulingCapacity() const {
mutex_lock l(mu_);
return SchedulingCapacityInternal();
}
template <typename TaskType>
size_t Queue<TaskType>::SchedulingCapacityInternal() const {
const int64 num_new_batches_schedulable =
static_cast<int64_t>(options_.max_enqueued_batches) -
this->num_enqueued_batches();
const int64 execution_batch_size_limit = max_execution_batch_size();
const int64 open_batch_capacity =
execution_batch_size_limit - this->tail_batch_task_size();
return (num_new_batches_schedulable * execution_batch_size_limit) +
open_batch_capacity;
}
template <typename TaskType>
Status Queue<TaskType>::ValidateBatchTaskQueueCapacity(TaskType* task) const {
if (task->size() > options_.input_batch_size_limit) {
return absl::InvalidArgumentError(absl::StrFormat(
"Task size %d is larger than maximum input batch size %d", task->size(),
options_.input_batch_size_limit));
}
if (options_.enable_large_batch_splitting) {
if (task->size() > SchedulingCapacityInternal()) {
return errors::Unavailable(
"The batch scheduling queue to which this task was submitted is "
"full; task size is ",
task->size(), " but scheduling capacity is only ",
SchedulingCapacityInternal(),
" (num_enqueued_batches=", num_enqueued_batches(),
", max_enqueued_batches=", options_.max_enqueued_batches,
", open_batch_size=", tail_batch_task_size(),
", max_execution_batch_size=", max_execution_batch_size(), ")");
}
return absl::OkStatus();
}
const std::deque<std::unique_ptr<Batch<TaskType>>>& batches = GetBatches();
if (batches.back()->size() + task->size() > options_.input_batch_size_limit) {
if (batches.size() >= options_.max_enqueued_batches) {
return errors::Unavailable(
"The batch scheduling queue to which this task was submitted is "
"full; currently ",
batches.size(), " batches enqueued and max_enqueued_batches is ",
options_.max_enqueued_batches);
}
}
return absl::OkStatus();
}
template <typename TaskType>
Status Queue<TaskType>::ValidateLowPriorityTaskQueueCapacity(
const TaskType& task) const {
if (task.size() >
options_.low_priority_queue_options.max_execution_batch_size) {
return absl::UnavailableError(absl::StrFormat(
"The low priority task queue to which this task was submitted has "
"max_execution_batch_size=%d and the task size is %d",
options_.low_priority_queue_options.max_execution_batch_size,
task.size()));
}
if (low_priority_tasks_.size() + task.size() >
options_.low_priority_queue_options.max_enqueued_batches *
options_.low_priority_queue_options.max_execution_batch_size) {
return absl::UnavailableError(absl::StrFormat(
"The low priority task queue to which this task was submitted does not "
"have the capcity to handle this task; currently the low priority "
"queue has %d tasks enqueued and the submitted task size is %d while "
"max_enqueued_batches=%d and max_execution_batch_size=%d",
low_priority_tasks_.size(), task.size(),
options_.low_priority_queue_options.max_enqueued_batches,
options_.low_priority_queue_options.max_execution_batch_size));
}
return absl::OkStatus();
}
template <typename TaskType>
typename SharedBatchScheduler<TaskType>::BatchTaskUniquePtr
Queue<TaskType>::ScheduleBatch() {
std::unique_ptr<Batch<TaskType>> batch_to_schedule;
{
mutex_lock l(mu_);
std::deque<std::unique_ptr<Batch<TaskType>>>& batches = GetBatches();
if (batches.size() == 1 && IsOpenBatchSchedulable()) {
Batch<TaskType>& old_batch = *batches[0];
std::vector<std::unique_ptr<TaskType>> trimmed_tasks;
MaybeBatchDown(
old_batch,
options_.allowed_batch_sizes,
options_.disable_padding,
options_.batch_padding_policy,
options_.model_batch_stats,
trimmed_tasks);
StartNewBatch();
Batch<TaskType>& new_batch = *batches[1];
for (std::unique_ptr<TaskType>& task : trimmed_tasks) {
new_batch.AddTask(std::move(task));
}
if (!new_batch.empty()) {
double position = static_cast<double>(old_batch.size()) /
(old_batch.size() + new_batch.size());
open_batch_start_time_micros_ +=
(env_->NowMicros() - open_batch_start_time_micros_) * position;
}
}
if (batches.size() >= 2) {
batch_to_schedule = std::move(batches.front());
batches.pop_front();
}
if (batch_to_schedule == nullptr) {
batch_to_schedule = ScheduleLowPriorityBatch();
}
if (batch_to_schedule == nullptr) {
schedulable_batch_ = false;
return batch_to_schedule;
}
++num_batches_being_processed_;
}
return batch_to_schedule;
}
template <typename TaskType>
std::vector<std::unique_ptr<TaskType>> Queue<TaskType>::GetLowPriorityTasks(
size_t size) {
std::vector<std::unique_ptr<TaskType>> low_priority_tasks_to_pad;
if (!options_.enable_priority_queue || size == 0)
return low_priority_tasks_to_pad;
{
mutex_lock l(mu_);
low_priority_tasks_to_pad = GetLowPriorityTaskQueue().RemoveTask(size);
}
return low_priority_tasks_to_pad;
}
template <typename TaskType>
std::vector<std::unique_ptr<TaskType>>
Queue<TaskType>::GetLowPriorityTasksForPadding(size_t batch_size) {
size_t target_batch_size;
switch (options_.mixed_priority_batching_policy) {
case MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize:
target_batch_size = max_execution_batch_size();
break;
case MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithNextAllowedBatchSize:
target_batch_size = GetNextAllowedBatchSize(
batch_size, options_.allowed_batch_sizes, options_.disable_padding);
break;
default:
target_batch_size = 0;
break;
}
if (target_batch_size <= batch_size) {
return {};
}
return GetLowPriorityTasks(target_batch_size - batch_size);
}
template <typename TaskType>
void Queue<TaskType>::ProcessBatch(
std::unique_ptr<Batch<TaskType>> batch,
std::vector<std::unique_ptr<TaskType>> padding_task) {
tsl::profiler::TraceMeConsumer trace_me(
[&] {
return profiler::TraceMeEncode(
"ProcessBatch", {{"batch_size_before_padding", batch->size()},
{"_r", 2} });
},
tsl::profiler::ContextType::kSharedBatchScheduler,
batch->traceme_context_id());
if (std::holds_alternative<ProcessBatchCallbackWithoutPaddingTasks>(
process_batch_callback_)) {
std::get<ProcessBatchCallbackWithoutPaddingTasks>(process_batch_callback_)(
std::move(batch));
} else {
std::get<ProcessBatchCallbackWithPaddingTasks>(process_batch_callback_)(
std::move(batch), std::move(padding_task));
}
{
mutex_lock l(mu_);
--num_batches_being_processed_;
if (empty_notification_ != nullptr && IsEmptyInternal()) {
empty_notification_->Notify();
}
}
}
template <typename TaskType>
bool Queue<TaskType>::IsEmpty() const {
mutex_lock l(mu_);
return IsEmptyInternal();
}
template <typename TaskType>
void Queue<TaskType>::CloseAndWaitUntilEmpty() {
Notification empty;
{
mutex_lock l(mu_);
closed_ = true;
if (IsEmptyInternal()) {
empty.Notify();
} else {
empty_notification_ = ∅
}
}
empty.WaitForNotification();
}
template <typename TaskType>
bool Queue<TaskType>::IsEmptyInternal() const {
const std::deque<std::unique_ptr<Batch<TaskType>>>& batches = GetBatches();
return num_batches_being_processed_ == 0 && batches.size() == 1 &&
batches.back()->empty() && low_priority_tasks_.empty();
}
template <typename TaskType>
void Queue<TaskType>::StartNewBatch() {
std::deque<std::unique_ptr<Batch<TaskType>>>& batches = GetBatches();
batches.back()->Close();
batches.emplace_back(new Batch<TaskType>(++traceme_context_id_counter_));
}
template <typename TaskType>
Status Queue<TaskType>::SplitInputBatchIntoSubtasks(
std::unique_ptr<TaskType>* input_task,
std::vector<std::unique_ptr<TaskType>>* output_tasks) {
const int open_batch_remaining_slot =
max_execution_batch_size() - this->tail_batch_task_size();
return options_.split_input_task_func(
std::move(input_task), open_batch_remaining_slot,
max_execution_batch_size(), std::move(output_tasks));
}
template <typename TaskType>
bool Queue<TaskType>::IsOpenBatchSchedulable() const {
Batch<TaskType>* open_batch = GetBatches().back().get();
if (open_batch->empty()) {
return false;
}
return closed_ || open_batch->size() >= max_execution_batch_size() ||
env_->NowMicros() >=
open_batch_start_time_micros_ + options_.batch_timeout_micros;
}
template <typename TaskType>
std::unique_ptr<Batch<TaskType>> Queue<TaskType>::ScheduleLowPriorityBatch() {
std::unique_ptr<Batch<TaskType>> batch_to_schedule;
if (!options_.enable_priority_queue || low_priority_tasks_.empty()) {
return batch_to_schedule;
}
if (env_->NowMicros() <
*low_priority_tasks_.EarliestTaskStartTime() +
options_.low_priority_queue_options.batch_timeout_micros &&
low_priority_tasks_.size() <
options_.low_priority_queue_options.max_execution_batch_size) {
return batch_to_schedule;
}
if (!GetBatches().empty() && !GetBatches().front()->empty()) {
return batch_to_schedule;
}
batch_to_schedule = std::make_unique<Batch<TaskType>>();
for (std::unique_ptr<TaskType>& task : low_priority_tasks_.RemoveTask(
options_.low_priority_queue_options.max_execution_batch_size)) {
batch_to_schedule->AddTask(std::move(task));
}
batch_to_schedule->Close();
return batch_to_schedule;
}
template <typename TaskType>
size_t Queue<TaskType>::tail_batch_task_size() const {
return GetBatches().back()->size();
}
template <typename TaskType>
int64 Queue<TaskType>::num_enqueued_batches() const {
return GetBatches().size();
}
template <typename TaskType>
std::deque<std::unique_ptr<Batch<TaskType>>>& Queue<TaskType>::GetBatches() {
return high_priority_batches_;
}
template <typename TaskType>
const std::deque<std::unique_ptr<Batch<TaskType>>>&
Queue<TaskType>::GetBatches() const {
return high_priority_batches_;
}
template <typename TaskType>
TaskQueue<TaskType>& Queue<TaskType>::GetLowPriorityTaskQueue() {
return low_priority_tasks_;
}
template <typename TaskType>
QueueHandle<TaskType>::QueueHandle(
std::shared_ptr<SharedBatchScheduler<TaskType>> scheduler,
Queue<TaskType>* queue)
: scheduler_(scheduler), queue_(queue) {}
template <typename TaskType>
QueueHandle<TaskType>::~QueueHandle() {
queue_->CloseAndWaitUntilEmpty();
}
template <typename TaskType>
Status QueueHandle<TaskType>::Schedule(std::unique_ptr<TaskType>* task) {
return queue_->Schedule(task);
}
template <typename TaskType>
size_t QueueHandle<TaskType>::NumEnqueuedTasks() const {
return queue_->NumEnqueuedTasks();
}
template <typename TaskType>
size_t QueueHandle<TaskType>::SchedulingCapacity() const {
return queue_->SchedulingCapacity();
}
}
}
}
#endif | #include "tensorflow/core/kernels/batching_util/shared_batch_scheduler.h"
#include <cstddef>
#include <memory>
#include <string>
#include <thread>
#include <tuple>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/base/call_once.h"
#include "absl/container/fixed_array.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler_utils.h"
#include "tensorflow/core/kernels/batching_util/fake_clock_env.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/platform/criticality.h"
namespace tensorflow {
namespace serving {
namespace {
using ::testing::HasSubstr;
class FakeTask : public BatchTask {
public:
explicit FakeTask(size_t size, tsl::criticality::Criticality criticality =
tsl::criticality::Criticality::kCritical)
: size_(size), criticality_(criticality) {}
~FakeTask() override = default;
size_t size() const override { return size_; }
tsl::criticality::Criticality criticality() const override {
return criticality_;
}
private:
const size_t size_;
const tsl::criticality::Criticality criticality_;
FakeTask(const FakeTask&) = delete;
void operator=(const FakeTask&) = delete;
};
class FakeTaskWithoutCriticality {
public:
explicit FakeTaskWithoutCriticality(size_t size) : size_(size) {}
~FakeTaskWithoutCriticality() = default;
size_t size() const { return size_; }
private:
const size_t size_;
FakeTaskWithoutCriticality(const FakeTaskWithoutCriticality&) = delete;
void operator=(const FakeTaskWithoutCriticality&) = delete;
};
using Queue = BatchScheduler<FakeTask>;
using Scheduler = SharedBatchScheduler<FakeTask>;
using QueueOptions = Scheduler::QueueOptions;
using SplitFunc =
std::function<Status(std::unique_ptr<FakeTask>* input_task,
int first_output_task_size, int input_batch_size_limit,
std::vector<std::unique_ptr<FakeTask>>* output_tasks)>;
Status ScheduleTask(size_t task_size, BatchScheduler<FakeTask>* scheduler,
tsl::criticality::Criticality criticality =
tsl::criticality::Criticality::kCritical) {
std::unique_ptr<FakeTask> task(new FakeTask(task_size, criticality));
Status status = scheduler->Schedule(&task);
CHECK_EQ(status.ok(), task == nullptr);
return status;
}
Status ScheduleTaskWithoutCriticality(
size_t task_size, BatchScheduler<FakeTaskWithoutCriticality>* scheduler) {
std::unique_ptr<FakeTaskWithoutCriticality> task(
new FakeTaskWithoutCriticality(task_size));
Status status = scheduler->Schedule(&task);
CHECK_EQ(status.ok(), task == nullptr);
return status;
}
std::unique_ptr<Thread> CreateFakeClockAdvancerThread(
test_util::FakeClockEnv* env, Notification* start, Notification* stop) {
return std::unique_ptr<Thread>(Env::Default()->StartThread(
{}, "FakeClockAdvancerThread", [env, start, stop] {
start->WaitForNotification();
while (!stop->HasBeenNotified()) {
env->AdvanceByMicroseconds(10);
Env::Default()->SleepForMicroseconds(10);
}
}));
}
std::shared_ptr<Scheduler> CreateSharedBatchScheduler(
int num_batch_threads, Env* env = Env::Default()) {
Scheduler::Options options;
options.num_batch_threads = num_batch_threads;
options.env = env;
std::shared_ptr<Scheduler> shared_batch_scheduler;
TF_CHECK_OK(Scheduler::Create(options, &shared_batch_scheduler));
return shared_batch_scheduler;
}
std::unique_ptr<Queue> CreateQueue(
std::shared_ptr<Scheduler> scheduler, Scheduler::QueueOptions queue_options,
internal::Queue<FakeTask>::ProcessBatchCallback process_batch_callback) {
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_CHECK_OK(
scheduler->AddQueue(queue_options, process_batch_callback, &queue));
return queue;
}
QueueOptions CreateQueueOptions(size_t max_execution_batch_size,
size_t input_batch_size_limit,
size_t batch_timeout_micros,
size_t max_enqueued_batches,
bool enable_large_batch_splitting,
SplitFunc split_func,
bool enable_priority_queue = false) {
QueueOptions queue_options;
queue_options.max_enqueued_batches = max_enqueued_batches;
queue_options.max_execution_batch_size = max_execution_batch_size;
queue_options.input_batch_size_limit = input_batch_size_limit;
queue_options.batch_timeout_micros = batch_timeout_micros;
queue_options.enable_large_batch_splitting = enable_large_batch_splitting;
queue_options.enable_priority_queue = enable_priority_queue;
if (enable_large_batch_splitting) {
queue_options.split_input_task_func = split_func;
}
return queue_options;
}
class SharedBatchSchedulerTestBase {
public:
SharedBatchSchedulerTestBase() = default;
virtual ~SharedBatchSchedulerTestBase() = default;
protected:
QueueOptions CreateQueueOptions(size_t max_execution_batch_size,
size_t input_batch_size_limit,
size_t batch_timeout_micros,
size_t max_enqueued_batches,
bool enable_priority_queue = false) {
return tensorflow::serving::CreateQueueOptions(
max_execution_batch_size, input_batch_size_limit, batch_timeout_micros,
max_enqueued_batches, enable_input_batch_split(), get_split_func(),
enable_priority_queue);
}
virtual bool enable_input_batch_split() const = 0;
SplitFunc get_split_func() const {
if (enable_input_batch_split()) {
return
[](std::unique_ptr<FakeTask>* input_task,
int open_batch_remaining_slot, int max_batch_size,
std::vector<std::unique_ptr<FakeTask>>* output_tasks) -> Status {
std::unique_ptr<FakeTask> owned_input_task = std::move(*input_task);
const int input_task_size = owned_input_task->size();
const internal::InputSplitMetadata input_split_metadata(
input_task_size, open_batch_remaining_slot, max_batch_size);
const absl::FixedArray<int> task_sizes =
input_split_metadata.task_sizes();
const int num_batches = task_sizes.size();
output_tasks->resize(num_batches);
for (int i = 0; i < num_batches; i++) {
(*output_tasks)[i] = std::make_unique<FakeTask>(task_sizes[i]);
}
return absl::OkStatus();
};
}
return nullptr;
}
};
class SharedBatchSchedulerTest : public ::testing::TestWithParam<bool>,
public SharedBatchSchedulerTestBase {
protected:
bool enable_input_batch_split() const override { return GetParam(); }
};
TEST_P(SharedBatchSchedulerTest, Basic) {
for (int num_batch_threads : {1, 2, 3}) {
for (const bool delete_scheduler_early : {false, true}) {
for (const bool delete_queue_1_early : {false, true}) {
bool queue_0_callback_called = false;
auto queue_0_callback =
[&queue_0_callback_called](std::unique_ptr<Batch<FakeTask>> batch) {
queue_0_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(3, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(5, batch->task(2).size());
};
bool queue_1_callback_called = false;
auto queue_1_callback =
[&queue_1_callback_called](std::unique_ptr<Batch<FakeTask>> batch) {
queue_1_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(2, batch->task(0).size());
EXPECT_EQ(4, batch->task(1).size());
};
{
auto scheduler = CreateSharedBatchScheduler(num_batch_threads);
const size_t input_batch_size_limit = 10;
const size_t batch_timeout_micros = 1 * 1000 * 1000;
const size_t max_enqueued_batches = 2;
const auto queue_options =
CreateQueueOptions(input_batch_size_limit, input_batch_size_limit,
batch_timeout_micros, max_enqueued_batches);
auto queue_0 =
CreateQueue(scheduler, queue_options, queue_0_callback);
auto queue_1 =
CreateQueue(scheduler, queue_options, queue_1_callback);
if (delete_scheduler_early) {
scheduler = nullptr;
}
TF_ASSERT_OK(ScheduleTask(1, queue_0.get()));
TF_ASSERT_OK(ScheduleTask(2, queue_1.get()));
TF_ASSERT_OK(ScheduleTask(3, queue_0.get()));
TF_ASSERT_OK(ScheduleTask(4, queue_1.get()));
if (delete_queue_1_early) {
queue_1 = nullptr;
}
TF_ASSERT_OK(ScheduleTask(5, queue_0.get()));
}
EXPECT_TRUE(queue_0_callback_called);
EXPECT_TRUE(queue_1_callback_called);
}
}
}
}
TEST_P(SharedBatchSchedulerTest,
CallbackWithTaskVectorOkWithPriorityQueueEnabled) {
bool queue_0_callback_called = false;
auto queue_0_callback = [&queue_0_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_0_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(3, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(5, batch->task(2).size());
EXPECT_EQ(0, tasks.size());
};
bool queue_1_callback_called = false;
auto queue_1_callback = [&queue_1_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_1_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(2, batch->task(0).size());
EXPECT_EQ(4, batch->task(1).size());
EXPECT_EQ(0, tasks.size());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
const QueueOptions queue_options = CreateQueueOptions(
10, 10,
1 * 1000 * 1000, 2,
true);
std::unique_ptr<Queue> queue_0 =
CreateQueue(scheduler, queue_options, queue_0_callback);
std::unique_ptr<Queue> queue_1 =
CreateQueue(scheduler, queue_options, queue_1_callback);
TF_ASSERT_OK(ScheduleTask(1, queue_0.get()));
TF_ASSERT_OK(ScheduleTask(2, queue_1.get()));
TF_ASSERT_OK(ScheduleTask(3, queue_0.get()));
TF_ASSERT_OK(ScheduleTask(4, queue_1.get()));
TF_ASSERT_OK(ScheduleTask(5, queue_0.get()));
}
EXPECT_TRUE(queue_0_callback_called);
EXPECT_TRUE(queue_1_callback_called);
}
TEST_P(SharedBatchSchedulerTest,
CallbackWithTaskVectorOkWithPriorityQueueDisabled) {
bool queue_0_callback_called = false;
auto queue_0_callback = [&queue_0_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_0_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(3, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(5, batch->task(2).size());
EXPECT_EQ(0, tasks.size());
};
bool queue_1_callback_called = false;
auto queue_1_callback = [&queue_1_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_1_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(2, batch->task(0).size());
EXPECT_EQ(4, batch->task(1).size());
EXPECT_EQ(0, tasks.size());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
const QueueOptions queue_options = CreateQueueOptions(
10, 10,
1 * 1000 * 1000, 2,
false);
std::unique_ptr<Queue> queue_0 =
CreateQueue(scheduler, queue_options, queue_0_callback);
std::unique_ptr<Queue> queue_1 =
CreateQueue(scheduler, queue_options, queue_1_callback);
TF_ASSERT_OK(ScheduleTask(1, queue_0.get()));
TF_ASSERT_OK(ScheduleTask(2, queue_1.get()));
TF_ASSERT_OK(ScheduleTask(3, queue_0.get()));
TF_ASSERT_OK(ScheduleTask(4, queue_1.get()));
TF_ASSERT_OK(ScheduleTask(5, queue_0.get()));
}
EXPECT_TRUE(queue_0_callback_called);
EXPECT_TRUE(queue_1_callback_called);
}
TEST_P(
SharedBatchSchedulerTest,
CallbackWithTaskVectorOkWithPriorityQueueEnabledWithCriticalitylessTask) {
bool queue_0_callback_called = false;
auto queue_0_callback =
[&queue_0_callback_called](
std::unique_ptr<Batch<FakeTaskWithoutCriticality>> batch,
std::vector<std::unique_ptr<FakeTaskWithoutCriticality>> tasks) {
queue_0_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(3, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(5, batch->task(2).size());
EXPECT_EQ(0, tasks.size());
};
bool queue_1_callback_called = false;
auto queue_1_callback =
[&queue_1_callback_called](
std::unique_ptr<Batch<FakeTaskWithoutCriticality>> batch,
std::vector<std::unique_ptr<FakeTaskWithoutCriticality>> tasks) {
queue_1_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(2, batch->task(0).size());
EXPECT_EQ(4, batch->task(1).size());
EXPECT_EQ(0, tasks.size());
};
{
SharedBatchScheduler<FakeTaskWithoutCriticality>::Options options;
options.num_batch_threads = 3;
options.env = Env::Default();
std::shared_ptr<SharedBatchScheduler<FakeTaskWithoutCriticality>>
shared_batch_scheduler;
TF_CHECK_OK(SharedBatchScheduler<FakeTaskWithoutCriticality>::Create(
options, &shared_batch_scheduler));
SharedBatchScheduler<FakeTaskWithoutCriticality>::QueueOptions
queue_options;
queue_options.input_batch_size_limit = 10;
queue_options.batch_timeout_micros = 1000 * 1000;
queue_options.max_enqueued_batches = 2;
queue_options.enable_large_batch_splitting = enable_input_batch_split();
queue_options.split_input_task_func =
[](std::unique_ptr<FakeTaskWithoutCriticality>* input_task,
int open_batch_remaining_slot, int max_batch_size,
std::vector<std::unique_ptr<FakeTaskWithoutCriticality>>*
output_tasks) -> Status {
std::unique_ptr<FakeTaskWithoutCriticality> owned_input_task =
std::move(*input_task);
const int input_task_size = owned_input_task->size();
const internal::InputSplitMetadata input_split_metadata(
input_task_size, open_batch_remaining_slot, max_batch_size);
const absl::FixedArray<int> task_sizes =
input_split_metadata.task_sizes();
const int num_batches = task_sizes.size();
output_tasks->resize(num_batches);
for (int i = 0; i < num_batches; i++) {
(*output_tasks)[i] =
std::make_unique<FakeTaskWithoutCriticality>(task_sizes[i]);
}
return absl::OkStatus();
};
queue_options.max_execution_batch_size = 10;
queue_options.enable_priority_queue = true;
std::unique_ptr<BatchScheduler<FakeTaskWithoutCriticality>> queue_0;
TF_CHECK_OK(shared_batch_scheduler->AddQueue(queue_options,
queue_0_callback, &queue_0));
std::unique_ptr<BatchScheduler<FakeTaskWithoutCriticality>> queue_1;
TF_CHECK_OK(shared_batch_scheduler->AddQueue(queue_options,
queue_1_callback, &queue_1));
TF_ASSERT_OK(ScheduleTaskWithoutCriticality(1, queue_0.get()));
TF_ASSERT_OK(ScheduleTaskWithoutCriticality(2, queue_1.get()));
TF_ASSERT_OK(ScheduleTaskWithoutCriticality(3, queue_0.get()));
TF_ASSERT_OK(ScheduleTaskWithoutCriticality(4, queue_1.get()));
TF_ASSERT_OK(ScheduleTaskWithoutCriticality(5, queue_0.get()));
}
EXPECT_TRUE(queue_0_callback_called);
EXPECT_TRUE(queue_1_callback_called);
}
TEST_P(SharedBatchSchedulerTest, ObeyBatchSizeConstraint) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
mutex mu;
std::vector<std::vector<size_t>> callback_data;
Notification all_batches_processed;
auto callback = [&mu, &callback_data, &all_batches_processed](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
std::vector<size_t> batch_data;
batch_data.reserve(batch->num_tasks());
for (int i = 0; i < batch->num_tasks(); ++i) {
batch_data.push_back(batch->mutable_task(i)->size());
}
{
mutex_lock l(mu);
callback_data.push_back(batch_data);
if (callback_data.size() == 2) {
all_batches_processed.Notify();
}
}
};
{
auto scheduler = CreateSharedBatchScheduler(2, &env);
const size_t input_batch_size_limit = 10;
const size_t batch_timeout_micros = 10 * 1000;
const size_t max_enqueued_batches = 2;
auto queue = CreateQueue(
scheduler,
CreateQueueOptions(input_batch_size_limit, input_batch_size_limit,
batch_timeout_micros, max_enqueued_batches),
callback);
if (enable_input_batch_split()) {
TF_ASSERT_OK(ScheduleTask(3, queue.get()));
TF_ASSERT_OK(ScheduleTask(5, queue.get()));
TF_ASSERT_OK(ScheduleTask(3 , queue.get()));
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
TF_ASSERT_OK(ScheduleTask(6, queue.get()));
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
} else {
TF_ASSERT_OK(ScheduleTask(3, queue.get()));
TF_ASSERT_OK(ScheduleTask(5, queue.get()));
TF_ASSERT_OK(ScheduleTask(3 , queue.get()));
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
TF_ASSERT_OK(ScheduleTask(6, queue.get()));
}
env.AdvanceByMicroseconds(20 * 1000);
all_batches_processed.WaitForNotification();
if (enable_input_batch_split()) {
EXPECT_THAT(
callback_data,
::testing::UnorderedElementsAreArray(std::vector<std::vector<size_t>>{
std::vector<size_t>{3, 5, 2}, std::vector<size_t>{1, 1, 6, 1}}));
} else {
EXPECT_THAT(callback_data,
::testing::UnorderedElementsAreArray(
std::vector<std::vector<size_t>>{{3, 5}, {3, 1, 6}}));
}
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST_P(SharedBatchSchedulerTest, ObeysTimeout) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
Notification first_batch_processed, second_batch_processed,
third_batch_processed;
bool notify_first_batch = false, notify_second_batch = false,
notify_third_batch = false;
auto callback = [&](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
if (notify_first_batch && (!first_batch_processed.HasBeenNotified())) {
first_batch_processed.Notify();
return;
}
if (notify_second_batch && (!second_batch_processed.HasBeenNotified())) {
second_batch_processed.Notify();
return;
}
if (notify_third_batch && (!third_batch_processed.HasBeenNotified())) {
third_batch_processed.Notify();
return;
}
EXPECT_TRUE(false) << "Unexpected condition";
};
auto scheduler = CreateSharedBatchScheduler(1, &env);
const size_t input_batch_size_limit = 4;
const size_t batch_timeout_micros = 10;
const size_t max_enqueued_batches = 2;
QueueOptions options =
CreateQueueOptions(input_batch_size_limit, input_batch_size_limit,
batch_timeout_micros, max_enqueued_batches);
auto queue = CreateQueue(scheduler, options, callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
env.AdvanceByMicroseconds(9);
Env::Default()->SleepForMicroseconds(10 * 1000 );
EXPECT_FALSE(first_batch_processed.HasBeenNotified());
notify_first_batch = true;
env.AdvanceByMicroseconds(1);
first_batch_processed.WaitForNotification();
TF_ASSERT_OK(ScheduleTask(2, queue.get()));
Env::Default()->SleepForMicroseconds(10 * 1000 );
EXPECT_FALSE(second_batch_processed.HasBeenNotified());
notify_second_batch = true;
TF_ASSERT_OK(ScheduleTask(3, queue.get()));
second_batch_processed.WaitForNotification();
env.AdvanceByMicroseconds(9);
Env::Default()->SleepForMicroseconds(10 * 1000 );
EXPECT_FALSE(third_batch_processed.HasBeenNotified());
notify_third_batch = true;
env.AdvanceByMicroseconds(1);
third_batch_processed.WaitForNotification();
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST_P(SharedBatchSchedulerTest, ObeysTimeoutWithRealClock) {
Notification first_batch_processed, second_batch_processed;
auto callback = [&first_batch_processed, &second_batch_processed](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
if (batch->size() == 1) {
first_batch_processed.Notify();
} else if (batch->size() == 2) {
second_batch_processed.Notify();
} else {
EXPECT_TRUE(false) << "Unexpected batch size";
}
};
auto scheduler = CreateSharedBatchScheduler(2);
const size_t input_batch_size_limit = 10;
const size_t batch_timeout_micros = 100 * 1000;
const size_t max_enqueued_batches = 2;
auto queue = CreateQueue(
scheduler,
CreateQueueOptions(input_batch_size_limit, input_batch_size_limit,
batch_timeout_micros, max_enqueued_batches),
callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
first_batch_processed.WaitForNotification();
TF_ASSERT_OK(ScheduleTask(2, queue.get()));
second_batch_processed.WaitForNotification();
}
TEST_P(SharedBatchSchedulerTest,
WithZeroTimeoutBatchesScheduledAsSoonAsThreadIsAvailable) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
Notification first_batch_processed, second_batch_processed;
auto callback = [&first_batch_processed, &second_batch_processed](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
if (batch->size() == 1) {
first_batch_processed.Notify();
} else if (batch->size() == 2) {
second_batch_processed.Notify();
} else {
EXPECT_TRUE(false) << "Unexpected batch size";
}
};
auto scheduler = CreateSharedBatchScheduler(2, &env);
const size_t batch_size_limit = 100;
const size_t batch_timeout_micros = 0;
const size_t max_enqueued_batches = 2;
auto queue = CreateQueue(
scheduler,
CreateQueueOptions(batch_size_limit, batch_size_limit,
batch_timeout_micros, max_enqueued_batches),
callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
first_batch_processed.WaitForNotification();
TF_ASSERT_OK(ScheduleTask(2, queue.get()));
second_batch_processed.WaitForNotification();
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST_P(SharedBatchSchedulerTest, Fairness) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
Notification queue_0_first_batch_scheduled, queue_0_first_batch_proceed,
queue_0_second_batch_scheduled;
auto queue_0_callback = [&queue_0_first_batch_scheduled,
&queue_0_first_batch_proceed,
&queue_0_second_batch_scheduled](
std::unique_ptr<Batch<FakeTask>> batch) {
if (!queue_0_first_batch_scheduled.HasBeenNotified()) {
queue_0_first_batch_scheduled.Notify();
queue_0_first_batch_proceed.WaitForNotification();
} else if (!queue_0_second_batch_scheduled.HasBeenNotified()) {
queue_0_second_batch_scheduled.Notify();
}
};
Notification queue_1_first_batch_scheduled, queue_1_first_batch_proceed;
auto queue_1_callback =
[&queue_1_first_batch_scheduled,
&queue_1_first_batch_proceed](std::unique_ptr<Batch<FakeTask>> batch) {
queue_1_first_batch_scheduled.Notify();
queue_1_first_batch_proceed.WaitForNotification();
};
auto scheduler = CreateSharedBatchScheduler(1, &env);
size_t input_batch_size_limit = 10;
QueueOptions queue_options = CreateQueueOptions(
input_batch_size_limit, input_batch_size_limit,
1 , 100 );
std::vector<std::unique_ptr<BatchScheduler<FakeTask>>> queues(2);
TF_ASSERT_OK(
scheduler->AddQueue(queue_options, queue_0_callback, &queues[0]));
TF_ASSERT_OK(
scheduler->AddQueue(queue_options, queue_1_callback, &queues[1]));
TF_ASSERT_OK(ScheduleTask(10, queues[0].get()));
env.AdvanceByMicroseconds(1);
queue_0_first_batch_scheduled.WaitForNotification();
TF_ASSERT_OK(ScheduleTask(10, queues[0].get()));
TF_ASSERT_OK(ScheduleTask(10, queues[0].get()));
TF_ASSERT_OK(ScheduleTask(1, queues[1].get()));
env.AdvanceByMicroseconds(1);
queue_0_first_batch_proceed.Notify();
queue_1_first_batch_scheduled.WaitForNotification();
Env::Default()->SleepForMicroseconds(10 * 1000 );
EXPECT_FALSE(queue_0_second_batch_scheduled.HasBeenNotified());
queue_1_first_batch_proceed.Notify();
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST_P(SharedBatchSchedulerTest, ConstMethods) {
for (const int max_enqueued_batches : {1, 2, 5}) {
Notification processing, proceed;
auto callback = [&processing,
&proceed](std::unique_ptr<Batch<FakeTask>> batch) {
if (!processing.HasBeenNotified()) {
processing.Notify();
}
proceed.WaitForNotification();
};
auto scheduler = CreateSharedBatchScheduler( 1);
const size_t input_batch_size_limit = 2;
const size_t batch_timeout_micros = 0;
auto queue = CreateQueue(
scheduler,
CreateQueueOptions(input_batch_size_limit, input_batch_size_limit,
batch_timeout_micros, max_enqueued_batches),
callback);
EXPECT_EQ(2, queue->max_task_size());
EXPECT_EQ(0, queue->NumEnqueuedTasks());
EXPECT_EQ(max_enqueued_batches * 2, queue->SchedulingCapacity());
TF_ASSERT_OK(ScheduleTask(2, queue.get()));
processing.WaitForNotification();
EXPECT_EQ(0, queue->NumEnqueuedTasks());
for (int i = 0; i < max_enqueued_batches; ++i) {
EXPECT_EQ(i * 2, queue->NumEnqueuedTasks());
EXPECT_EQ((max_enqueued_batches - i) * 2, queue->SchedulingCapacity());
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
EXPECT_EQ((i * 2) + 1, queue->NumEnqueuedTasks());
EXPECT_EQ((max_enqueued_batches - i) * 2 - 1,
queue->SchedulingCapacity());
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
}
EXPECT_EQ(max_enqueued_batches * 2, queue->NumEnqueuedTasks());
EXPECT_EQ(0, queue->SchedulingCapacity());
EXPECT_THAT(
ScheduleTask(1, queue.get()),
testing::StatusIs(error::UNAVAILABLE,
HasSubstr("The batch scheduling queue to which this "
"task was submitted is full")));
EXPECT_EQ(max_enqueued_batches * 2, queue->NumEnqueuedTasks());
EXPECT_EQ(0, queue->SchedulingCapacity());
proceed.Notify();
}
}
TEST_P(SharedBatchSchedulerTest, OneFullQueueDoesntBlockOtherQueues) {
Notification queue_0_processing, queue_0_proceed;
auto queue_0_callback = [&queue_0_processing, &queue_0_proceed](
std::unique_ptr<Batch<FakeTask>> batch) {
if (!queue_0_processing.HasBeenNotified()) {
queue_0_processing.Notify();
queue_0_proceed.WaitForNotification();
}
};
Notification queue_1_first_batch_processed, queue_1_second_batch_processed,
queue_1_third_batch_processed;
auto queue_1_callback =
[&queue_1_first_batch_processed, &queue_1_second_batch_processed,
&queue_1_third_batch_processed](std::unique_ptr<Batch<FakeTask>> batch) {
if (batch->size() == 1) {
queue_1_first_batch_processed.Notify();
} else if (batch->size() == 2) {
queue_1_second_batch_processed.Notify();
} else if (batch->size() == 3) {
queue_1_third_batch_processed.Notify();
} else {
EXPECT_TRUE(false) << "Unexpected batch size";
}
};
auto scheduler = CreateSharedBatchScheduler( 2);
const size_t input_batch_size_limit = 10;
const size_t batch_timeout_micros = 0;
const size_t max_enqueued_batches = 2;
QueueOptions queue_options =
CreateQueueOptions(input_batch_size_limit, input_batch_size_limit,
batch_timeout_micros, max_enqueued_batches);
std::unique_ptr<BatchScheduler<FakeTask>> queue_0;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_0_callback, &queue_0));
std::unique_ptr<BatchScheduler<FakeTask>> queue_1;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_1_callback, &queue_1));
TF_ASSERT_OK(ScheduleTask(1, queue_0.get()));
queue_0_processing.WaitForNotification();
Status queue_0_status;
do {
queue_0_status = ScheduleTask(1, queue_0.get());
} while (queue_0_status.ok());
EXPECT_EQ(error::UNAVAILABLE, queue_0_status.code());
TF_ASSERT_OK(ScheduleTask(1, queue_1.get()));
queue_1_first_batch_processed.WaitForNotification();
TF_ASSERT_OK(ScheduleTask(2, queue_1.get()));
queue_1_second_batch_processed.WaitForNotification();
TF_ASSERT_OK(ScheduleTask(3, queue_1.get()));
queue_1_third_batch_processed.WaitForNotification();
queue_0_proceed.Notify();
}
TEST_P(SharedBatchSchedulerTest, QueueDestructorBlocksUntilAllTasksProcessed) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
int current_batch = 0;
Notification first_callback_started;
const int kMaxEnqueuedBatches = 3;
std::vector<Notification> callback_proceed(kMaxEnqueuedBatches);
auto callback =
[¤t_batch, &first_callback_started,
&callback_proceed](std::unique_ptr<Batch<FakeTask>> batch) {
if (current_batch == 0) {
first_callback_started.Notify();
}
callback_proceed[current_batch].WaitForNotification();
++current_batch;
};
auto scheduler = CreateSharedBatchScheduler(1, &env);
const size_t batch_size_limit = 10;
const size_t batch_timeout_micros = 0;
const size_t max_enqueued_batches = 2;
QueueOptions queue_options =
CreateQueueOptions(batch_size_limit, batch_size_limit,
batch_timeout_micros, max_enqueued_batches);
auto queue = CreateQueue(scheduler, queue_options, callback);
int num_enqueued_batches = 0;
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
++num_enqueued_batches;
env.AdvanceByMicroseconds(1);
first_callback_started.WaitForNotification();
for (int i = 0; i < 2; ++i) {
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
++num_enqueued_batches;
}
EXPECT_EQ(kMaxEnqueuedBatches, num_enqueued_batches);
EXPECT_EQ(error::UNAVAILABLE, ScheduleTask(10, queue.get()).code());
Notification destroy_queue_thread_started, queue_destroyed;
std::unique_ptr<Thread> destroy_queue_thread(Env::Default()->StartThread(
{}, "DestroyQueueThread",
[&queue, &destroy_queue_thread_started, &queue_destroyed] {
destroy_queue_thread_started.Notify();
queue = nullptr;
queue_destroyed.Notify();
}));
destroy_queue_thread_started.WaitForNotification();
for (int i = 0; i < num_enqueued_batches; ++i) {
Env::Default()->SleepForMicroseconds(10 * 1000 );
EXPECT_FALSE(queue_destroyed.HasBeenNotified());
callback_proceed[i].Notify();
}
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST_P(SharedBatchSchedulerTest, ZeroQueueRewrittenToOneQueue) {
auto callback = [](std::unique_ptr<Batch<FakeTask>> batch) {
};
auto scheduler = CreateSharedBatchScheduler(2);
const size_t input_batch_size_limit = 10;
const size_t batch_timeout_micros = 100 * 1000;
const size_t max_enqueued_batches = 0;
std::unique_ptr<Queue> queue;
if (enable_input_batch_split()) {
EXPECT_THAT(
scheduler->AddQueue(tensorflow::serving::CreateQueueOptions(
input_batch_size_limit, input_batch_size_limit,
batch_timeout_micros, max_enqueued_batches,
enable_input_batch_split(), get_split_func()),
callback, &queue),
testing::StatusIs(error::INVALID_ARGUMENT,
"max_enqueued_batches must be positive; was 0"));
} else {
TF_ASSERT_OK(
scheduler->AddQueue(tensorflow::serving::CreateQueueOptions(
input_batch_size_limit, input_batch_size_limit,
batch_timeout_micros, max_enqueued_batches,
enable_input_batch_split(), get_split_func()),
callback, &queue));
EXPECT_EQ(queue->SchedulingCapacity(), input_batch_size_limit);
}
}
TEST_P(SharedBatchSchedulerTest, BatchPaddingPolicyBatchDown) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
Notification first_batch_processed;
Notification second_batch_processed;
auto callback = [&](std::unique_ptr<Batch<FakeTask>> batch) {
if (!first_batch_processed.HasBeenNotified()) {
EXPECT_EQ(batch->size(), 2);
first_batch_processed.Notify();
return;
}
if (!second_batch_processed.HasBeenNotified()) {
EXPECT_EQ(batch->size(), 1);
second_batch_processed.Notify();
return;
}
ADD_FAILURE() << "Batch callback must not be invoked more than expected";
};
auto scheduler = CreateSharedBatchScheduler(1, &env);
QueueOptions options =
CreateQueueOptions( 10,
10,
10,
10);
options.allowed_batch_sizes = {1, 2, 4, 8};
options.batch_padding_policy = kBatchDownPolicy;
auto queue = CreateQueue(scheduler, options, callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
env.AdvanceByMicroseconds(options.batch_timeout_micros);
first_batch_processed.WaitForNotification();
auto new_batch_timeout_micros = options.batch_timeout_micros * 2 / 3;
env.AdvanceByMicroseconds(new_batch_timeout_micros - 1);
EXPECT_FALSE(second_batch_processed.WaitForNotificationWithTimeout(
absl::Milliseconds(10)));
env.AdvanceByMicroseconds(1);
second_batch_processed.WaitForNotification();
start_teardown.Notify();
}
stop_teardown.Notify();
}
INSTANTIATE_TEST_SUITE_P(Parameter, SharedBatchSchedulerTest,
::testing::Bool());
class SharedBatchSchedulerPriorityTest
: public ::testing::TestWithParam<
std::tuple<bool, MixedPriorityBatchingPolicy>>,
public SharedBatchSchedulerTestBase {
protected:
bool enable_input_batch_split() const override {
return std::get<0>(GetParam());
}
MixedPriorityBatchingPolicy mixed_priority_batching_policy() const {
return std::get<1>(GetParam());
}
};
TEST_P(SharedBatchSchedulerPriorityTest,
InvalidLowPriorityTaskWithPriorityQueueEnabled) {
bool queue_callback_called = false;
auto queue_callback = [&queue_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_callback_called = true;
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
QueueOptions queue_options = CreateQueueOptions(
100, 100,
1 * 1000 * 1000, 2,
true);
queue_options.low_priority_queue_options.max_execution_batch_size = 1;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 1;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy =
mixed_priority_batching_policy();
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
EXPECT_THAT(
ScheduleTask(10, queue.get(),
tsl::criticality::Criticality::kSheddablePlus),
testing::StatusIs(
absl::StatusCode::kUnavailable,
HasSubstr(
"The low priority task queue to which this task was submitted "
"has max_execution_batch_size=1 and the task size is 10")));
}
EXPECT_FALSE(queue_callback_called);
}
TEST_P(SharedBatchSchedulerPriorityTest,
InvalidLowPriorityTaskWithQueueFullWithPriorityQueueEnabledNew) {
Notification processing, proceed;
auto queue_callback = [&processing, &proceed](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
if (!processing.HasBeenNotified()) {
processing.Notify();
}
proceed.WaitForNotification();
};
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(1);
QueueOptions queue_options = CreateQueueOptions(
100, 100,
1 * 1000 * 1000, 2,
true);
queue_options.low_priority_queue_options.max_execution_batch_size = 10;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 10;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy =
mixed_priority_batching_policy();
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(5, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
processing.WaitForNotification();
ASSERT_EQ(0, queue->NumEnqueuedTasks());
TF_ASSERT_OK(ScheduleTask(10, queue.get(),
tsl::criticality::Criticality::kSheddablePlus));
ASSERT_EQ(1, queue->NumEnqueuedTasks());
TF_ASSERT_OK(ScheduleTask(10, queue.get(),
tsl::criticality::Criticality::kSheddablePlus));
ASSERT_EQ(2, queue->NumEnqueuedTasks());
EXPECT_THAT(
ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kSheddablePlus),
testing::StatusIs(
absl::StatusCode::kUnavailable,
HasSubstr("The low priority task queue to which this task was "
"submitted does not have the capcity to handle this task; "
"currently the low priority queue has 20 tasks enqueued "
"and the submitted task size is 1 while "
"max_enqueued_batches=2 and max_execution_batch_size=10")));
proceed.Notify();
}
TEST_P(SharedBatchSchedulerPriorityTest,
CallbackWithTaskVectorOkWithPriorityQueueDisabledWithPrioritySet) {
bool queue_callback_called = false;
auto queue_callback = [&queue_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(3, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(5, batch->task(2).size());
EXPECT_EQ(0, tasks.size());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
const QueueOptions queue_options = CreateQueueOptions(
10, 10,
1 * 1000 * 1000, 2,
false);
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(5, queue.get(),
tsl::criticality::Criticality::kSheddable));
}
EXPECT_TRUE(queue_callback_called);
}
TEST_P(SharedBatchSchedulerPriorityTest,
LowPriorityTaskOnlyAtMaxBatchSizeWithPriorityQueueEnabled) {
bool queue_callback_called = false;
auto queue_callback = [&queue_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(3, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(5, batch->task(2).size());
EXPECT_TRUE(tasks.empty());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
QueueOptions queue_options = CreateQueueOptions(
100, 100,
1 * 1000 * 1000, 2,
true);
queue_options.low_priority_queue_options.max_execution_batch_size = 9;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 10;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy =
mixed_priority_batching_policy();
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kSheddablePlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kSheddablePlus));
TF_ASSERT_OK(ScheduleTask(5, queue.get(),
tsl::criticality::Criticality::kSheddable));
}
EXPECT_TRUE(queue_callback_called);
}
TEST_P(SharedBatchSchedulerPriorityTest,
LowPriorityTaskOnlyAtTimeoutWithPriorityQueueEnabled) {
bool queue_callback_called = false;
auto queue_callback = [&queue_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(3, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(5, batch->task(2).size());
EXPECT_TRUE(tasks.empty());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
QueueOptions queue_options = CreateQueueOptions(
100, 100,
1 * 1000 * 1000, 2,
true);
queue_options.low_priority_queue_options.max_execution_batch_size = 20;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 10;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy =
mixed_priority_batching_policy();
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kSheddablePlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kSheddablePlus));
TF_ASSERT_OK(ScheduleTask(5, queue.get(),
tsl::criticality::Criticality::kSheddable));
}
EXPECT_TRUE(queue_callback_called);
}
INSTANTIATE_TEST_SUITE_P(
Parameter, SharedBatchSchedulerPriorityTest,
::testing::Values(
std::make_tuple(
true,
MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize),
std::make_tuple(true,
MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithNextAllowedBatchSize),
std::make_tuple(
false,
MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize),
std::make_tuple(false,
MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithNextAllowedBatchSize),
std::make_tuple(
false,
MixedPriorityBatchingPolicy::kPriorityIsolation),
std::make_tuple(false,
MixedPriorityBatchingPolicy::kPriorityIsolation)));
using SharedBatchSchedulerPriorityPolicyTest = SharedBatchSchedulerTest;
TEST_P(SharedBatchSchedulerPriorityPolicyTest,
HighPriorityBatchPaddedUptoMaxBatchSize) {
bool queue_callback_called = false;
auto queue_callback = [&queue_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
if (queue_callback_called) return;
queue_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(2, tasks.size());
EXPECT_EQ(3, tasks[0]->size());
EXPECT_EQ(3, tasks[1]->size());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
QueueOptions queue_options = CreateQueueOptions(
10, 10,
1 * 1000 * 1000, 2,
true);
queue_options.low_priority_queue_options.max_execution_batch_size = 10;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 10;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy =
MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize;
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kSheddable));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kSheddable));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kSheddable));
}
EXPECT_TRUE(queue_callback_called);
}
TEST_P(SharedBatchSchedulerPriorityPolicyTest,
HighPriorityBatchPaddedUptoMaxAvailableBatchSize) {
bool queue_callback_called = false;
auto queue_callback = [&queue_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(1, tasks.size());
EXPECT_EQ(3, tasks[0]->size());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
QueueOptions queue_options = CreateQueueOptions(
10, 10,
1 * 1000 * 1000, 2,
true);
queue_options.low_priority_queue_options.max_execution_batch_size = 10;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 10;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy =
MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize;
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kSheddable));
}
EXPECT_TRUE(queue_callback_called);
}
TEST_P(SharedBatchSchedulerPriorityPolicyTest,
HighPriorityBatchPaddedUptoNextAllowedBatchSize) {
bool queue_callback_called = false;
auto queue_callback = [&queue_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
if (queue_callback_called) return;
queue_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(2, tasks.size());
EXPECT_EQ(2, tasks[0]->size());
EXPECT_EQ(2, tasks[1]->size());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
QueueOptions queue_options = CreateQueueOptions(
10, 10,
1 * 1000 * 1000, 2,
true);
queue_options.allowed_batch_sizes = {2, 8, 16};
queue_options.low_priority_queue_options.max_execution_batch_size = 10;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 10;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy = MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithNextAllowedBatchSize;
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(2, queue.get(),
tsl::criticality::Criticality::kSheddable));
TF_ASSERT_OK(ScheduleTask(2, queue.get(),
tsl::criticality::Criticality::kSheddable));
TF_ASSERT_OK(ScheduleTask(2, queue.get(),
tsl::criticality::Criticality::kSheddable));
}
EXPECT_TRUE(queue_callback_called);
}
TEST_P(SharedBatchSchedulerPriorityPolicyTest,
HighPriorityBatchNotPaddedWhenAllowedBatchSizeMissing) {
bool queue_callback_called = false;
auto queue_callback = [&queue_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
if (queue_callback_called) return;
queue_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(0, tasks.size());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
QueueOptions queue_options = CreateQueueOptions(
10, 10,
1 * 1000 * 1000, 2,
true);
queue_options.low_priority_queue_options.max_execution_batch_size = 10;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 10;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy = MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithNextAllowedBatchSize;
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(2, queue.get(),
tsl::criticality::Criticality::kSheddable));
TF_ASSERT_OK(ScheduleTask(2, queue.get(),
tsl::criticality::Criticality::kSheddable));
TF_ASSERT_OK(ScheduleTask(2, queue.get(),
tsl::criticality::Criticality::kSheddable));
}
EXPECT_TRUE(queue_callback_called);
}
TEST_P(SharedBatchSchedulerPriorityPolicyTest,
HighPriorityBatchNotPaddedWithLowPriorityTasks) {
int queue_callback_counter = 0;
auto queue_callback = [&queue_callback_counter](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
if (queue_callback_counter++ == 0) {
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
return;
}
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(3, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
QueueOptions queue_options = CreateQueueOptions(
10, 10,
1 * 1000 * 1000, 2,
true);
queue_options.low_priority_queue_options.max_execution_batch_size = 10;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 10;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy =
MixedPriorityBatchingPolicy::kPriorityIsolation;
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kSheddable));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kSheddable));
}
EXPECT_EQ(queue_callback_counter, 2);
}
INSTANTIATE_TEST_SUITE_P(Parameter, SharedBatchSchedulerPriorityPolicyTest,
::testing::Bool());
#ifdef PLATFORM_GOOGLE
static std::vector<std::unique_ptr<Queue>>* queues =
new std::vector<std::unique_ptr<Queue>>();
static std::vector<std::string>* queue_labels = new std::vector<std::string>();
void CreateQueues() {
auto split_func_for_size_one_task =
[](std::unique_ptr<FakeTask>* input_task, int open_batch_remaining_slot,
int max_batch_size,
std::vector<std::unique_ptr<FakeTask>>* output_tasks) -> Status {
output_tasks->push_back(std::move(*input_task));
Notification notify;
std::thread busy_waiter([&] {
while (!notify.HasBeenNotified()) {
}
});
std::thread notifier([&] {
Env::Default()->SleepForMicroseconds(1);
notify.Notify();
});
busy_waiter.join();
notifier.join();
return absl::OkStatus();
};
internal::Queue<FakeTask>::ProcessBatchCallback process_batch_callback =
[](std::unique_ptr<Batch<FakeTask>> task) {
};
const size_t max_execution_batch_size = 64;
const size_t input_batch_size_limit = 128;
const size_t batch_timeout_micros = 10;
queues->push_back(CreateQueue(
CreateSharedBatchScheduler(5),
CreateQueueOptions(max_execution_batch_size, input_batch_size_limit,
batch_timeout_micros, INT_MAX ,
true ,
split_func_for_size_one_task),
process_batch_callback));
queue_labels->push_back(std::string("EagerSplit"));
queues->push_back(CreateQueue(
CreateSharedBatchScheduler(5),
CreateQueueOptions(max_execution_batch_size, input_batch_size_limit,
batch_timeout_micros, INT_MAX ,
false ,
nullptr ),
process_batch_callback));
queue_labels->push_back(std::string("NoSplit"));
}
void BM_QueueSchedule(::testing::benchmark::State& state) {
static absl::once_flag once;
absl::call_once(once, []() { CreateQueues(); });
const int queue_index = state.range(1);
Queue* queue = (*queues)[queue_index].get();
const string label = strings::StrCat(state.threads(), "-Threads",
(*queue_labels)[queue_index]);
state.SetLabel(label);
for (auto s : state) {
for (int i = 0; i < state.range(0); i++) {
auto batch_task = std::make_unique<FakeTask>(1);
auto status = queue->Schedule(&batch_task);
tensorflow::testing::DoNotOptimize(status);
}
}
}
BENCHMARK(BM_QueueSchedule)->Apply([](benchmark::internal::Benchmark* b) {
b->ThreadRange(1,
port::NumSchedulableCPUs() * tensorflow::port::CPUIDNumSMT());
for (int queue_index : {0, 1, 2}) {
b->ArgPair(10000, queue_index);
}
});
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/shared_batch_scheduler.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/shared_batch_scheduler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ea0cdbc9-2758-4931-92c9-7bad4053a615 | cpp | google/cel-cpp | container_membership_functions | runtime/standard/container_membership_functions.cc | runtime/standard/container_membership_functions_test.cc | #include "runtime/standard/container_membership_functions.h"
#include <array>
#include <cstdint>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "base/builtins.h"
#include "base/function_adapter.h"
#include "common/value.h"
#include "internal/number.h"
#include "internal/status_macros.h"
#include "runtime/function_registry.h"
#include "runtime/register_function_helper.h"
#include "runtime/runtime_options.h"
namespace cel {
namespace {
using ::cel::internal::Number;
static constexpr std::array<absl::string_view, 3> in_operators = {
cel::builtin::kIn,
cel::builtin::kInFunction,
cel::builtin::kInDeprecated,
};
template <class T>
bool ValueEquals(const Value& value, T other);
template <>
bool ValueEquals(const Value& value, bool other) {
if (auto bool_value = As<BoolValue>(value); bool_value) {
return bool_value->NativeValue() == other;
}
return false;
}
template <>
bool ValueEquals(const Value& value, int64_t other) {
if (auto int_value = As<IntValue>(value); int_value) {
return int_value->NativeValue() == other;
}
return false;
}
template <>
bool ValueEquals(const Value& value, uint64_t other) {
if (auto uint_value = As<UintValue>(value); uint_value) {
return uint_value->NativeValue() == other;
}
return false;
}
template <>
bool ValueEquals(const Value& value, double other) {
if (auto double_value = As<DoubleValue>(value); double_value) {
return double_value->NativeValue() == other;
}
return false;
}
template <>
bool ValueEquals(const Value& value, const StringValue& other) {
if (auto string_value = As<StringValue>(value); string_value) {
return string_value->Equals(other);
}
return false;
}
template <>
bool ValueEquals(const Value& value, const BytesValue& other) {
if (auto bytes_value = As<BytesValue>(value); bytes_value) {
return bytes_value->Equals(other);
}
return false;
}
template <typename T>
absl::StatusOr<bool> In(ValueManager& value_factory, T value,
const ListValue& list) {
CEL_ASSIGN_OR_RETURN(auto size, list.Size());
Value element;
for (int i = 0; i < size; i++) {
CEL_RETURN_IF_ERROR(list.Get(value_factory, i, element));
if (ValueEquals<T>(element, value)) {
return true;
}
}
return false;
}
absl::StatusOr<Value> HeterogeneousEqualityIn(ValueManager& value_factory,
const Value& value,
const ListValue& list) {
return list.Contains(value_factory, value);
}
absl::Status RegisterListMembershipFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
for (absl::string_view op : in_operators) {
if (options.enable_heterogeneous_equality) {
CEL_RETURN_IF_ERROR(
(RegisterHelper<BinaryFunctionAdapter<
absl::StatusOr<Value>, const Value&, const ListValue&>>::
RegisterGlobalOverload(op, &HeterogeneousEqualityIn, registry)));
} else {
CEL_RETURN_IF_ERROR(
(RegisterHelper<BinaryFunctionAdapter<absl::StatusOr<bool>, bool,
const ListValue&>>::
RegisterGlobalOverload(op, In<bool>, registry)));
CEL_RETURN_IF_ERROR(
(RegisterHelper<BinaryFunctionAdapter<absl::StatusOr<bool>, int64_t,
const ListValue&>>::
RegisterGlobalOverload(op, In<int64_t>, registry)));
CEL_RETURN_IF_ERROR(
(RegisterHelper<BinaryFunctionAdapter<absl::StatusOr<bool>, uint64_t,
const ListValue&>>::
RegisterGlobalOverload(op, In<uint64_t>, registry)));
CEL_RETURN_IF_ERROR(
(RegisterHelper<BinaryFunctionAdapter<absl::StatusOr<bool>, double,
const ListValue&>>::
RegisterGlobalOverload(op, In<double>, registry)));
CEL_RETURN_IF_ERROR(
(RegisterHelper<BinaryFunctionAdapter<
absl::StatusOr<bool>, const StringValue&, const ListValue&>>::
RegisterGlobalOverload(op, In<const StringValue&>, registry)));
CEL_RETURN_IF_ERROR(
(RegisterHelper<BinaryFunctionAdapter<
absl::StatusOr<bool>, const BytesValue&, const ListValue&>>::
RegisterGlobalOverload(op, In<const BytesValue&>, registry)));
}
}
return absl::OkStatus();
}
absl::Status RegisterMapMembershipFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
const bool enable_heterogeneous_equality =
options.enable_heterogeneous_equality;
auto boolKeyInSet = [enable_heterogeneous_equality](
ValueManager& factory, bool key,
const MapValue& map_value) -> absl::StatusOr<Value> {
auto result = map_value.Has(factory, factory.CreateBoolValue(key));
if (result.ok()) {
return std::move(*result);
}
if (enable_heterogeneous_equality) {
return factory.CreateBoolValue(false);
}
return factory.CreateErrorValue(result.status());
};
auto intKeyInSet = [enable_heterogeneous_equality](
ValueManager& factory, int64_t key,
const MapValue& map_value) -> absl::StatusOr<Value> {
Value int_key = factory.CreateIntValue(key);
auto result = map_value.Has(factory, int_key);
if (enable_heterogeneous_equality) {
if (result.ok() && (*result).Is<BoolValue>() &&
result->GetBool().NativeValue()) {
return std::move(*result);
}
Number number = Number::FromInt64(key);
if (number.LosslessConvertibleToUint()) {
const auto& result =
map_value.Has(factory, factory.CreateUintValue(number.AsUint()));
if (result.ok() && (*result).Is<BoolValue>() &&
result->GetBool().NativeValue()) {
return std::move(*result);
}
}
return factory.CreateBoolValue(false);
}
if (!result.ok()) {
return factory.CreateErrorValue(result.status());
}
return std::move(*result);
};
auto stringKeyInSet =
[enable_heterogeneous_equality](
ValueManager& factory, const StringValue& key,
const MapValue& map_value) -> absl::StatusOr<Value> {
auto result = map_value.Has(factory, key);
if (result.ok()) {
return std::move(*result);
}
if (enable_heterogeneous_equality) {
return factory.CreateBoolValue(false);
}
return factory.CreateErrorValue(result.status());
};
auto uintKeyInSet = [enable_heterogeneous_equality](
ValueManager& factory, uint64_t key,
const MapValue& map_value) -> absl::StatusOr<Value> {
Value uint_key = factory.CreateUintValue(key);
const auto& result = map_value.Has(factory, uint_key);
if (enable_heterogeneous_equality) {
if (result.ok() && (*result).Is<BoolValue>() &&
result->GetBool().NativeValue()) {
return std::move(*result);
}
Number number = Number::FromUint64(key);
if (number.LosslessConvertibleToInt()) {
const auto& result =
map_value.Has(factory, factory.CreateIntValue(number.AsInt()));
if (result.ok() && (*result).Is<BoolValue>() &&
result->GetBool().NativeValue()) {
return std::move(*result);
}
}
return factory.CreateBoolValue(false);
}
if (!result.ok()) {
return factory.CreateErrorValue(result.status());
}
return std::move(*result);
};
auto doubleKeyInSet = [](ValueManager& factory, double key,
const MapValue& map_value) -> absl::StatusOr<Value> {
Number number = Number::FromDouble(key);
if (number.LosslessConvertibleToInt()) {
const auto& result =
map_value.Has(factory, factory.CreateIntValue(number.AsInt()));
if (result.ok() && (*result).Is<BoolValue>() &&
result->GetBool().NativeValue()) {
return std::move(*result);
}
}
if (number.LosslessConvertibleToUint()) {
const auto& result =
map_value.Has(factory, factory.CreateUintValue(number.AsUint()));
if (result.ok() && (*result).Is<BoolValue>() &&
result->GetBool().NativeValue()) {
return std::move(*result);
}
}
return factory.CreateBoolValue(false);
};
for (auto op : in_operators) {
auto status = RegisterHelper<BinaryFunctionAdapter<
absl::StatusOr<Value>, const StringValue&,
const MapValue&>>::RegisterGlobalOverload(op, stringKeyInSet, registry);
if (!status.ok()) return status;
status = RegisterHelper<
BinaryFunctionAdapter<absl::StatusOr<Value>, bool, const MapValue&>>::
RegisterGlobalOverload(op, boolKeyInSet, registry);
if (!status.ok()) return status;
status = RegisterHelper<BinaryFunctionAdapter<absl::StatusOr<Value>,
int64_t, const MapValue&>>::
RegisterGlobalOverload(op, intKeyInSet, registry);
if (!status.ok()) return status;
status = RegisterHelper<BinaryFunctionAdapter<absl::StatusOr<Value>,
uint64_t, const MapValue&>>::
RegisterGlobalOverload(op, uintKeyInSet, registry);
if (!status.ok()) return status;
if (enable_heterogeneous_equality) {
status = RegisterHelper<BinaryFunctionAdapter<absl::StatusOr<Value>,
double, const MapValue&>>::
RegisterGlobalOverload(op, doubleKeyInSet, registry);
if (!status.ok()) return status;
}
}
return absl::OkStatus();
}
}
absl::Status RegisterContainerMembershipFunctions(
FunctionRegistry& registry, const RuntimeOptions& options) {
if (options.enable_list_contains) {
CEL_RETURN_IF_ERROR(RegisterListMembershipFunctions(registry, options));
}
return RegisterMapMembershipFunctions(registry, options);
}
} | #include "runtime/standard/container_membership_functions.h"
#include <array>
#include <vector>
#include "absl/strings/string_view.h"
#include "base/builtins.h"
#include "base/function_descriptor.h"
#include "base/kind.h"
#include "internal/testing.h"
#include "runtime/function_registry.h"
#include "runtime/runtime_options.h"
namespace cel {
namespace {
using ::testing::UnorderedElementsAre;
MATCHER_P3(MatchesDescriptor, name, receiver, expected_kinds, "") {
const FunctionDescriptor& descriptor = *arg;
const std::vector<Kind>& types = expected_kinds;
return descriptor.name() == name && descriptor.receiver_style() == receiver &&
descriptor.types() == types;
}
static constexpr std::array<absl::string_view, 3> kInOperators = {
builtin::kIn, builtin::kInDeprecated, builtin::kInFunction};
TEST(RegisterContainerMembershipFunctions, RegistersHomogeneousInOperator) {
FunctionRegistry registry;
RuntimeOptions options;
options.enable_heterogeneous_equality = false;
ASSERT_OK(RegisterContainerMembershipFunctions(registry, options));
auto overloads = registry.ListFunctions();
for (absl::string_view operator_name : kInOperators) {
EXPECT_THAT(
overloads[operator_name],
UnorderedElementsAre(
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kInt, Kind::kList}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kUint, Kind::kList}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kDouble, Kind::kList}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kString, Kind::kList}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kBytes, Kind::kList}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kBool, Kind::kList}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kInt, Kind::kMap}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kUint, Kind::kMap}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kString, Kind::kMap}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kBool, Kind::kMap})));
}
}
TEST(RegisterContainerMembershipFunctions, RegistersHeterogeneousInOperation) {
FunctionRegistry registry;
RuntimeOptions options;
options.enable_heterogeneous_equality = true;
ASSERT_OK(RegisterContainerMembershipFunctions(registry, options));
auto overloads = registry.ListFunctions();
for (absl::string_view operator_name : kInOperators) {
EXPECT_THAT(
overloads[operator_name],
UnorderedElementsAre(
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kAny, Kind::kList}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kInt, Kind::kMap}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kUint, Kind::kMap}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kDouble, Kind::kMap}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kString, Kind::kMap}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kBool, Kind::kMap})));
}
}
TEST(RegisterContainerMembershipFunctions, RegistersInOperatorListsDisabled) {
FunctionRegistry registry;
RuntimeOptions options;
options.enable_list_contains = false;
ASSERT_OK(RegisterContainerMembershipFunctions(registry, options));
auto overloads = registry.ListFunctions();
for (absl::string_view operator_name : kInOperators) {
EXPECT_THAT(
overloads[operator_name],
UnorderedElementsAre(
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kInt, Kind::kMap}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kUint, Kind::kMap}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kDouble, Kind::kMap}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kString, Kind::kMap}),
MatchesDescriptor(operator_name, false,
std::vector<Kind>{Kind::kBool, Kind::kMap})));
}
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/standard/container_membership_functions.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/standard/container_membership_functions_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
75ea6ca2-c38e-4fd8-ab84-f4778b0ea94e | cpp | tensorflow/tensorflow | all_reduce_splitter | third_party/xla/xla/service/gpu/transforms/all_reduce_splitter.cc | third_party/xla/xla/service/gpu/transforms/all_reduce_splitter_test.cc | #include "xla/service/gpu/transforms/all_reduce_splitter.h"
#include <cstdint>
#include <optional>
#include <string>
#include <variant>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/collective_opt_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
struct ARReplicaGroups {
std::vector<ReplicaGroup> first_ar_replica_groups;
std::vector<ReplicaGroup> second_ar_replica_groups;
};
struct AllReduceRewriteSpec {
int split_dim;
int group_size;
HloAllReduceInstruction* all_reduce;
HloDynamicSliceInstruction* dynamic_slice;
ARReplicaGroups replica_groups;
std::string ToString() {
return absl::Substitute(
"{\n split_dim=$0\n group_size=$1\n all_reduce=$2\n "
"dynamic_slice=$3\n}\n",
split_dim, group_size, all_reduce->ToString(),
dynamic_slice->ToString());
}
};
struct RewriteInfeasibleReason {
const HloInstruction* ar;
std::string message;
};
struct ReplicaGroups {
std::vector<ReplicaGroup> replica_groups;
template <typename H>
friend H AbslHashValue(H h, const ReplicaGroups& rg) {
return H::combine(std::move(h), rg.replica_groups.size());
}
friend bool operator==(const ReplicaGroups& item,
const ReplicaGroups& other) {
if (item.replica_groups.size() != other.replica_groups.size()) {
return false;
}
for (int i = 0; i < item.replica_groups.size(); i++) {
const ReplicaGroup& item_replica_group = item.replica_groups[i];
const ReplicaGroup& other_replica_group = other.replica_groups[i];
for (int i = 0; i < item_replica_group.replica_ids_size(); i++) {
if (item_replica_group.replica_ids(i) !=
other_replica_group.replica_ids(i)) {
return false;
}
}
}
return true;
}
};
using ARReplicaGroupMap =
absl::flat_hash_map<ReplicaGroups,
std::vector<const HloAllReduceInstruction*>>;
using RewriteDecision =
std::variant<AllReduceRewriteSpec, RewriteInfeasibleReason>;
std::optional<int> GetSplitDim(const HloAllReduceInstruction& ar,
const HloDynamicSliceInstruction& ds) {
int split_dim = -1;
int num_dims = 0;
for (int64_t dim = 0; dim < ar.shape().rank(); ++dim) {
if (ar.shape().dimensions(dim) != ds.shape().dimensions(dim)) {
num_dims++;
split_dim = dim;
}
}
if (num_dims != 1) {
VLOG(2) << "No support for multiple nor 0 split dims.";
return std::nullopt;
}
return split_dim;
}
std::optional<int> GetProcessGroupSize(const HloAllReduceInstruction& ar,
const HloDynamicSliceInstruction& ds) {
CHECK(ds.operand(0) == &ar) << "Irrelevant AR + DS pair.";
std::optional<int> split_dim = GetSplitDim(ar, ds);
if (!split_dim.has_value()) {
return std::nullopt;
}
return ar.shape().dimensions(*split_dim) /
ds.dynamic_slice_sizes()[*split_dim];
}
ARReplicaGroupMap GetReplicaGroupsMap(HloComputation& computation) {
ARReplicaGroupMap map;
hlo_query::ForEachInstructionWithOpcode(
computation, HloOpcode::kAllReduce,
[&map](const HloInstruction* instruction) {
const HloAllReduceInstruction* ar =
Cast<HloAllReduceInstruction>(instruction);
auto rgs = ReplicaGroups{ar->replica_groups()};
map[rgs].push_back(ar);
});
return map;
}
ARReplicaGroups GetNewReplicaGroups(int group_size, int num_partitions) {
CHECK_EQ(num_partitions % group_size, 0);
std::vector<ReplicaGroup> first_ar_rgs, second_ar_rgs;
int num_units = num_partitions / group_size;
first_ar_rgs.reserve(num_units);
second_ar_rgs.reserve(group_size);
for (int u = 0; u < group_size * num_units; u += group_size) {
ReplicaGroup& group = first_ar_rgs.emplace_back();
for (int r = u; r < u + group_size; r++) {
group.add_replica_ids(r);
}
}
for (int g = 0; g < group_size; g++) {
ReplicaGroup& group = second_ar_rgs.emplace_back();
for (int r = g; r < group_size * num_units; r += group_size) {
group.add_replica_ids(r);
}
}
return {
first_ar_rgs,
second_ar_rgs,
};
}
bool IsLogicalReduceScatter(const HloModule& module,
const AllReduceRewriteSpec& spec,
HloComputation& computation) {
HloAllReduceInstruction& ar = *spec.all_reduce;
CHECK_EQ(ar.user_count(), 1);
CHECK_EQ(module.config().replica_count(), 1);
HloInstruction* first_ar =
computation.AddInstruction(HloInstruction::CreateAllReduce(
ar.shape(), ar.operands(), ar.to_apply(),
CollectiveDeviceList(spec.replica_groups.first_ar_replica_groups),
ar.constrain_layout(), hlo_query::NextChannelId(module),
ar.use_global_device_ids()));
HloInstruction* ds = ar.users()[0];
auto* old_operand = ds->mutable_operand(0);
if (!ds->ReplaceOperandWith(0, first_ar).ok()) {
return false;
}
absl::Cleanup _ = [&] {
CHECK_OK(ds->ReplaceOperandWith(0, old_operand));
CHECK_OK(computation.RemoveInstruction(first_ar));
};
return MatchReduceScatter(Cast<HloAllReduceInstruction>(first_ar),
module.config().num_partitions(),
module.config().replica_count(),
false,
true)
.has_value();
}
bool IsProfitableToSplit(const ARReplicaGroupMap& replica_map,
const AllReduceRewriteSpec& spec) {
auto new_rgs = spec.replica_groups;
bool first_replica_exists =
replica_map.contains(ReplicaGroups{new_rgs.first_ar_replica_groups});
bool second_replica_exists =
replica_map.contains(ReplicaGroups{new_rgs.second_ar_replica_groups});
return first_replica_exists || second_replica_exists;
}
RewriteDecision CanRewrite(const HloModule& module,
const ARReplicaGroupMap& replica_map,
HloComputation& computation,
HloInstruction& instruction) {
const HloModuleConfig& config = module.config();
if (config.use_auto_spmd_partitioning() || !config.use_spmd_partitioning() ||
config.replica_count() != 1) {
return RewriteInfeasibleReason{
&instruction,
"Supporting only SPMD partitioning scheme.",
};
}
if (instruction.opcode() != HloOpcode::kAllReduce) {
return RewriteInfeasibleReason{
&instruction,
"Cannot rewrite an AllReduce, since it's not AllReduce.",
};
}
auto* ar = Cast<HloAllReduceInstruction>(&instruction);
if (!ar->use_global_device_ids()) {
return RewriteInfeasibleReason{
&instruction,
"Only global ids are supported currently.",
};
}
if (ar->user_count() != 1 ||
ar->users().front()->opcode() != HloOpcode::kDynamicSlice) {
return RewriteInfeasibleReason{
&instruction,
"Cannot rewrite AllReduce if it is not a logical reduce scatter.",
};
}
auto* ds = Cast<HloDynamicSliceInstruction>(ar->users().front());
if (ds->user_count() > 1) {
return RewriteInfeasibleReason{
&instruction,
"Exactly one user of dynamic slice is required for a rewrite.",
};
}
int num_partitions = config.num_partitions();
std::vector<ReplicaGroup> rgs = ar->replica_groups();
if (rgs.size() != 1 || rgs.front().replica_ids_size() != num_partitions) {
return RewriteInfeasibleReason{
&instruction,
absl::StrCat("Cannot determine a valid split with num_partitions: ",
num_partitions),
};
}
std::optional<int> split_dim = GetSplitDim(*ar, *ds);
if (!split_dim.has_value()) {
return RewriteInfeasibleReason{
&instruction,
"Cannot get a split dim.",
};
}
std::optional<int> group_size = GetProcessGroupSize(*ar, *ds);
if (!group_size.has_value()) {
return RewriteInfeasibleReason{
&instruction,
"Cannot determine a group size.",
};
}
if (num_partitions == group_size) {
return RewriteInfeasibleReason{
&instruction,
"Nothing to rewrite",
};
}
if (num_partitions % *group_size != 0) {
return RewriteInfeasibleReason{
&instruction,
"Group size does not evenly divide the number of partitions",
};
}
auto spec = AllReduceRewriteSpec{
*split_dim,
*group_size,
ar,
ds,
GetNewReplicaGroups(*group_size, num_partitions),
};
if (!IsLogicalReduceScatter(module, spec, computation)) {
return RewriteInfeasibleReason{
&instruction,
"Not a logical reduce scatter.",
};
}
if (!IsProfitableToSplit(replica_map, spec)) {
return RewriteInfeasibleReason{
&instruction,
"Splitting is not profitable.",
};
}
return spec;
}
absl::StatusOr<bool> SplitAllReduce(const HloModuleConfig& config,
AllReduceRewriteSpec spec,
HloComputation& computation) {
int64_t next_channel_id =
hlo_query::NextChannelId(*spec.all_reduce->GetModule());
VLOG(1) << "AR splitting spec: " << spec.ToString();
int num_partitions = config.num_partitions();
int group_size = spec.group_size;
CHECK_EQ(num_partitions % group_size, 0);
HloAllReduceInstruction& ar = *spec.all_reduce;
HloDynamicSliceInstruction& ds = *spec.dynamic_slice;
const auto& [first_ar_replica_groups, second_ar_replica_groups] =
spec.replica_groups;
int channel_id = next_channel_id++;
HloInstruction* first_ar =
computation.AddInstruction(HloInstruction::CreateAllReduce(
ar.shape(), ar.operands(), ar.to_apply(),
CollectiveDeviceList(first_ar_replica_groups), ar.constrain_layout(),
channel_id, ar.use_global_device_ids()));
channel_id = next_channel_id++;
HloInstruction* second_ar =
computation.AddInstruction(HloInstruction::CreateAllReduce(
ds.shape(), {&ds}, ar.to_apply(),
CollectiveDeviceList(second_ar_replica_groups), ar.constrain_layout(),
channel_id, ar.use_global_device_ids()));
TF_RETURN_IF_ERROR(computation.ReplaceInstruction(&ar, first_ar));
if (ds.IsRoot()) {
computation.set_root_instruction(second_ar);
}
TF_RETURN_IF_ERROR(ds.ReplaceAllUsesWith(second_ar));
return true;
}
absl::StatusOr<bool> SplitAllReduce(const HloModule& module,
const ARReplicaGroupMap& replica_map,
HloComputation& computation,
HloInstruction& instruction) {
RewriteDecision spec =
CanRewrite(module, replica_map, computation, instruction);
if (std::holds_alternative<RewriteInfeasibleReason>(spec)) {
auto reason = std::get<RewriteInfeasibleReason>(spec);
VLOG(1) << "Cannot process {" << reason.ar->ToString()
<< "} due to : " << reason.message;
return false;
}
return SplitAllReduce(module.config(), std::get<AllReduceRewriteSpec>(spec),
computation);
}
}
absl::StatusOr<bool> AllReduceSplitter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto* computation : module->computations(execution_threads)) {
ARReplicaGroupMap replica_map = GetReplicaGroupsMap(*computation);
for (HloInstruction* instr : computation->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool rewritten, SplitAllReduce(*module, replica_map,
*computation, *instr));
changed |= rewritten;
}
}
return changed;
}
} | #include "xla/service/gpu/transforms/all_reduce_splitter.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/service/gpu/transforms/reduce_scatter_creator.h"
#include "xla/service/hlo_module_config.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::tsl::testing::IsOkAndHolds;
class AllReduceSplitterTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> PrepareModule(
absl::string_view hlo_module, int64_t num_replicas,
int64_t num_partitions) {
HloModuleConfig config = GetModuleConfigForTest(
num_replicas,
num_partitions);
config.set_use_spmd_partitioning(num_partitions > 1);
return ParseAndReturnVerifiedModule(hlo_module, config);
}
size_t AllReduceCount(const HloModule &module) {
return CollectiveCount(module, HloOpcode::kAllReduce);
}
private:
size_t CollectiveCount(const HloModule &module, HloOpcode opcode) {
return absl::c_count_if(
module.entry_computation()->instructions(),
[&opcode](HloInstruction *instr) { return instr->opcode() == opcode; });
}
};
class AllReduceSplitterFilecheckTest : public AllReduceSplitterTest {
public:
absl::Status FileCheck(const std::string &hlo_text,
absl::string_view pattern) {
TF_ASSIGN_OR_RETURN(bool matched, RunFileCheck(hlo_text, pattern));
if (!matched) {
return absl::InternalError("Filecheck failed.");
}
return absl::OkStatus();
}
};
TEST_F(
AllReduceSplitterFilecheckTest,
MatchBasicPatternIfDynamicSliceIsRootAndThereExistsAllReduceWithSameReplicaGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
zero = bf16[] constant(0)
reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(true));
TF_EXPECT_OK(FileCheck(module->ToString(), R"(
CHECK-DAG: %[[P0:.*]] = bf16[2,4096,4096]{2,1,0} parameter(0)
CHECK: %[[AR0:.*]] = bf16[2,4096,4096]{2,1,0} all-reduce(bf16[2,4096,4096]{2,1,0} %[[P0]])
CHECK-SAME: replica_groups={[[DESIRED_RGS:.*]]}
CHECK-DAG: %[[ZERO:.*]] = bf16[] constant(0)
CHECK-DAG: %[[LOCAL_REDUCE:.*]] = bf16[4096]{0} reduce(bf16[2,4096,4096]{2,1,0} %[[AR0]], bf16[] %[[ZERO]])
CHECK: %[[AR1:.*]] = bf16[4096]{0} all-reduce(bf16[4096]{0} %[[LOCAL_REDUCE]])
CHECK-SAME: replica_groups={[[DESIRED_RGS]]}
CHECK: %[[DS:.*]] = bf16[1024]{0} dynamic-slice(bf16[4096]{0} %[[AR1]], s32[] %[[_:.*]])
CHECK-SAME: dynamic_slice_sizes={1024}
CHECK-NEXT: ROOT %[[AR2:.*]] = bf16[1024]{0} all-reduce(bf16[1024]{0} %[[DS]])
CHECK-SAME: replica_groups={{[{]}}{0,4},{1,5},{2,6},{3,7}{{[}]}}
)"));
}
TEST_F(
AllReduceSplitterTest,
DoesNotMatchMatchBasicPatternIfDynamicSliceIsRootAndThereIsNoAllReduceWithSameReplicaGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
zero = bf16[] constant(0)
reduce = bf16[4096] reduce(p, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false));
EXPECT_EQ(AllReduceCount(*module), 1);
}
TEST_F(
AllReduceSplitterFilecheckTest,
MatchBasicPatternIfDynamicSliceIsNotRootAndThereExistsAllReduceWithSameReplicaGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
zero = bf16[] constant(0)
first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
reduce = bf16[4096] reduce(p, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
dynamic_slice = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
broadcast = bf16[1024,1024] broadcast(dynamic_slice), dimensions={0}
ROOT _ = tuple(broadcast, first.ar)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(true));
TF_EXPECT_OK(FileCheck(module->ToString(), R"(
CHECK-DAG: %[[P0:.*]] = bf16[2,4096,4096]{2,1,0} parameter(0)
CHECK-DAG: %[[ZERO:.*]] = bf16[] constant(0)
CHECK-DAG: %[[LOCAL_REDUCE:.*]] = bf16[4096]{0} reduce(bf16[2,4096,4096]{2,1,0} %[[P0]], bf16[] %[[ZERO]])
CHECK: %[[AR0:.*]] = bf16[4096]{0} all-reduce(bf16[4096]{0} %[[LOCAL_REDUCE]])
CHECK-SAME: replica_groups={[[DESIRED_RGS:.*]]}
CHECK: %[[DS:.*]] = bf16[1024]{0} dynamic-slice(bf16[4096]{0} %[[AR0]], s32[] %[[_:.*]])
CHECK-SAME: dynamic_slice_sizes={1024}
CHECK-NEXT: %[[AR1:.*]] = bf16[1024]{0} all-reduce(bf16[1024]{0} %[[DS]])
CHECK-SAME: replica_groups={{[{]}}{0,4},{1,5},{2,6},{3,7}{{[}]}}
CHECK: %[[EXISTING_AR:.*]] = bf16[2,4096,4096]{2,1,0} all-reduce(bf16[2,4096,4096]{2,1,0} %[[P0]])
CHECK-SAME: replica_groups={[[DESIRED_RGS]]}
CHECK: ROOT
CHECK-NOT: %[[AR1]]
CHECK-SAME: %[[EXISTING_AR]]
)"));
}
TEST_F(
AllReduceSplitterTest,
DoesNotMatchBasicPatternIfDynamicSliceIsNotRootAndThereIsNoAllReduceWithSameReplicaGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
p.1 = bf16[2,4096,4096] parameter(1)
zero = bf16[] constant(0)
reduce = bf16[4096] reduce(p, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
dynamic_slice = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
broadcast = bf16[1024,1024] broadcast(dynamic_slice), dimensions={0}
add = bf16[2,4096,4096] add(p,p.1)
ROOT _ = tuple(broadcast, add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false));
EXPECT_EQ(AllReduceCount(*module), 1);
}
TEST_F(AllReduceSplitterTest,
DoesNotMatchBasicPatternIfDynamicSliceIsFullySharded) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
zero = bf16[] constant(0)
reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(512)
offset = s32[] multiply(reshape, slice_size)
ROOT _ = bf16[512] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={512}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false));
EXPECT_EQ(AllReduceCount(*module), 2);
}
TEST_F(AllReduceSplitterTest,
DoesNotMatchBasicPatternIfItIsNotCompiledWithSPMDPartitioning) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
zero = bf16[] constant(0)
reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
}
)";
HloModuleConfig config =
GetModuleConfigForTest(1, 8);
config.set_use_spmd_partitioning(false);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, config));
EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false));
EXPECT_THAT(AllReduceCount(*module), 2);
}
TEST_F(AllReduceSplitterTest,
DoesNotMatchBasicPatternIfUseGlobalDeviceIdsIsFalse) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, channel_id=1
zero = bf16[] constant(0)
reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, channel_id=2
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false));
EXPECT_EQ(AllReduceCount(*module), 2);
}
TEST_F(AllReduceSplitterTest,
DoesNotMatchBasicPatternIfIsNotCrossAllPartitionsAllReduce) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
zero = bf16[] constant(0)
reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false));
EXPECT_EQ(AllReduceCount(*module), 2);
}
TEST_F(
AllReduceSplitterFilecheckTest,
PipelineMatchesBasicPatternWithDynamicSliceAsRootAndRewritesToReduceScatter) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
zero = bf16[] constant(0)
reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
HloPassPipeline pipeline("all-reduce-splitter-rewrite");
pipeline.AddPass<AllReduceSplitter>();
pipeline.AddPass<ReduceScatterCreator>();
EXPECT_THAT(pipeline.Run(module.get()), IsOkAndHolds(true));
TF_EXPECT_OK(FileCheck(module->ToString(), R"(
CHECK-DAG: %[[P0:.*]] = bf16[2,4096,4096]{2,1,0} parameter(0)
CHECK: %[[AR0:.*]] = bf16[2,4096,4096]{2,1,0} all-reduce(bf16[2,4096,4096]{2,1,0} %[[P0]])
CHECK-SAME: replica_groups={[[DESIRED_RGS:.*]]}
CHECK-DAG: %[[ZERO:.*]] = bf16[] constant(0)
CHECK-DAG: %[[LOCAL_REDUCE:.*]] = bf16[4096]{0} reduce(bf16[2,4096,4096]{2,1,0} %[[AR0]], bf16[] %[[ZERO]])
CHECK: %[[REDUCE_SCATTER:.*]] = bf16[1024]{0} reduce-scatter(bf16[4096]{0} %[[LOCAL_REDUCE]])
CHECK-SAME: replica_groups={[[DESIRED_RGS]]}
CHECK-NEXT: ROOT %[[AR2:.*]] = bf16[1024]{0} all-reduce(bf16[1024]{0} %[[REDUCE_SCATTER]])
CHECK-SAME: replica_groups={{[{]}}{0,4},{1,5},{2,6},{3,7}{{[}]}}
)"));
}
TEST_F(
AllReduceSplitterFilecheckTest,
PipelineMatchesBasicPatternWithDynamicSliceNotAsRootAndRewritesToReduceScatter) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT _ = bf16[] add(a,b)
}
ENTRY main {
p = bf16[2,4096,4096] parameter(0)
zero = bf16[] constant(0)
first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
reduce = bf16[4096] reduce(p, zero), dimensions={0,1}, to_apply=sum
all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1
table = s32[8]{0} constant({0,1,2,3,0,1,2,3})
pid = u32[] partition-id()
id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1}
reshape = s32[] reshape(id)
slice_size = s32[] constant(1024)
offset = s32[] multiply(reshape, slice_size)
dynamic_slice = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024}
broadcast = bf16[1024,1024] broadcast(dynamic_slice), dimensions={0}
ROOT _ = tuple(broadcast, first.ar)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
PrepareModule(hlo_string, 1, 8));
HloPassPipeline pipeline("all-reduce-splitter-rewrite");
pipeline.AddPass<AllReduceSplitter>();
pipeline.AddPass<ReduceScatterCreator>();
EXPECT_THAT(pipeline.Run(module.get()), IsOkAndHolds(true));
TF_EXPECT_OK(FileCheck(module->ToString(), R"(
CHECK-DAG: %[[P0:.*]] = bf16[2,4096,4096]{2,1,0} parameter(0)
CHECK-DAG: %[[ZERO:.*]] = bf16[] constant(0)
CHECK-DAG: %[[LOCAL_REDUCE:.*]] = bf16[4096]{0} reduce(bf16[2,4096,4096]{2,1,0} %[[P0]], bf16[] %[[ZERO]])
CHECK: %[[REDUCE_SCATTER:.*]] = bf16[1024]{0} reduce-scatter(bf16[4096]{0} %[[LOCAL_REDUCE]])
CHECK-NEXT: %[[AR1:.*]] = bf16[1024]{0} all-reduce(bf16[1024]{0} %[[REDUCE_SCATTER]])
CHECK-SAME: replica_groups={{[{]}}{0,4},{1,5},{2,6},{3,7}{{[}]}}
CHECK: %[[EXISTING_AR:.*]] = bf16[2,4096,4096]{2,1,0} all-reduce(bf16[2,4096,4096]{2,1,0} %[[P0]])
CHECK: ROOT
CHECK-NOT: %[[AR1]]
CHECK-SAME: %[[EXISTING_AR]]
)"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/all_reduce_splitter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/all_reduce_splitter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2e519ae5-e075-4766-9cc9-255206de4de0 | cpp | google/quiche | quiche_text_utils | quiche/common/quiche_text_utils.cc | quiche/common/quiche_text_utils_test.cc | #include "quiche/common/quiche_text_utils.h"
#include <algorithm>
#include <optional>
#include <string>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
namespace quiche {
void QuicheTextUtils::Base64Encode(const uint8_t* data, size_t data_len,
std::string* output) {
absl::Base64Escape(std::string(reinterpret_cast<const char*>(data), data_len),
output);
size_t len = output->size();
if (len >= 2) {
if ((*output)[len - 1] == '=') {
len--;
if ((*output)[len - 1] == '=') {
len--;
}
output->resize(len);
}
}
}
std::optional<std::string> QuicheTextUtils::Base64Decode(
absl::string_view input) {
std::string output;
if (!absl::Base64Unescape(input, &output)) {
return std::nullopt;
}
return output;
}
std::string QuicheTextUtils::HexDump(absl::string_view binary_data) {
const int kBytesPerLine = 16;
int offset = 0;
const char* p = binary_data.data();
int bytes_remaining = binary_data.size();
std::string output;
while (bytes_remaining > 0) {
const int line_bytes = std::min(bytes_remaining, kBytesPerLine);
absl::StrAppendFormat(&output, "0x%04x: ", offset);
for (int i = 0; i < kBytesPerLine; ++i) {
if (i < line_bytes) {
absl::StrAppendFormat(&output, "%02x",
static_cast<unsigned char>(p[i]));
} else {
absl::StrAppend(&output, " ");
}
if (i % 2) {
absl::StrAppend(&output, " ");
}
}
absl::StrAppend(&output, " ");
for (int i = 0; i < line_bytes; ++i) {
output += absl::ascii_isgraph(p[i]) ? p[i] : '.';
}
bytes_remaining -= line_bytes;
offset += line_bytes;
p += line_bytes;
absl::StrAppend(&output, "\n");
}
return output;
}
} | #include "quiche/common/quiche_text_utils.h"
#include <string>
#include "absl/strings/escaping.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace test {
TEST(QuicheTestUtilsTest, StringPieceCaseHash) {
const auto hasher = StringPieceCaseHash();
EXPECT_EQ(hasher("content-length"), hasher("Content-Length"));
EXPECT_EQ(hasher("Content-Length"), hasher("CONTENT-LENGTH"));
EXPECT_EQ(hasher("CoNteNT-lEngTH"), hasher("content-length"));
EXPECT_NE(hasher("content-length"), hasher("content_length"));
EXPECT_NE(hasher("Türkiye"), hasher("TÜRKİYE"));
EXPECT_EQ(
hasher("This is a string that is too long for inlining and requires a "
"heap allocation. Apparently PowerPC has 128 byte cache lines. "
"Since our inline array is sized according to a cache line, we "
"need this string to be longer than 128 bytes."),
hasher("This Is A String That Is Too Long For Inlining And Requires A "
"Heap Allocation. Apparently PowerPC Has 128 Byte Cache Lines. "
"Since Our Inline Array Is Sized According To A Cache Line, We "
"Need This String To Be Longer Than 128 Bytes."));
}
TEST(QuicheTextUtilsTest, ToLower) {
EXPECT_EQ("lower", quiche::QuicheTextUtils::ToLower("LOWER"));
EXPECT_EQ("lower", quiche::QuicheTextUtils::ToLower("lower"));
EXPECT_EQ("lower", quiche::QuicheTextUtils::ToLower("lOwEr"));
EXPECT_EQ("123", quiche::QuicheTextUtils::ToLower("123"));
EXPECT_EQ("", quiche::QuicheTextUtils::ToLower(""));
}
TEST(QuicheTextUtilsTest, RemoveLeadingAndTrailingWhitespace) {
for (auto* const input : {"text", " text", " text", "text ", "text ",
" text ", " text ", "\r\n\ttext", "text\n\r\t"}) {
absl::string_view piece(input);
quiche::QuicheTextUtils::RemoveLeadingAndTrailingWhitespace(&piece);
EXPECT_EQ("text", piece);
}
}
TEST(QuicheTextUtilsTest, HexDump) {
std::string empty;
ASSERT_TRUE(absl::HexStringToBytes("", &empty));
EXPECT_EQ("", quiche::QuicheTextUtils::HexDump(empty));
char packet[] = {
0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x2c, 0x20, 0x51, 0x55, 0x49, 0x43, 0x21,
0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67,
0x20, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x6c,
0x6f, 0x6e, 0x67, 0x20, 0x65, 0x6e, 0x6f, 0x75, 0x67, 0x68, 0x20, 0x74,
0x6f, 0x20, 0x73, 0x70, 0x61, 0x6e, 0x20, 0x6d, 0x75, 0x6c, 0x74, 0x69,
0x70, 0x6c, 0x65, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x6f, 0x66,
0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x2e, 0x01, 0x02, 0x03, 0x00,
};
EXPECT_EQ(
quiche::QuicheTextUtils::HexDump(packet),
"0x0000: 4865 6c6c 6f2c 2051 5549 4321 2054 6869 Hello,.QUIC!.Thi\n"
"0x0010: 7320 7374 7269 6e67 2073 686f 756c 6420 s.string.should.\n"
"0x0020: 6265 206c 6f6e 6720 656e 6f75 6768 2074 be.long.enough.t\n"
"0x0030: 6f20 7370 616e 206d 756c 7469 706c 6520 o.span.multiple.\n"
"0x0040: 6c69 6e65 7320 6f66 206f 7574 7075 742e lines.of.output.\n"
"0x0050: 0102 03 ...\n");
std::string printable_and_unprintable_chars;
ASSERT_TRUE(
absl::HexStringToBytes("20217e7f", &printable_and_unprintable_chars));
EXPECT_EQ("0x0000: 2021 7e7f .!~.\n",
quiche::QuicheTextUtils::HexDump(printable_and_unprintable_chars));
std::string large_chars;
ASSERT_TRUE(absl::HexStringToBytes("90aaff", &large_chars));
EXPECT_EQ("0x0000: 90aa ff ...\n",
quiche::QuicheTextUtils::HexDump(large_chars));
}
TEST(QuicheTextUtilsTest, Base64Encode) {
std::string output;
std::string input = "Hello";
quiche::QuicheTextUtils::Base64Encode(
reinterpret_cast<const uint8_t*>(input.data()), input.length(), &output);
EXPECT_EQ("SGVsbG8", output);
input =
"Hello, QUIC! This string should be long enough to span"
"multiple lines of output\n";
quiche::QuicheTextUtils::Base64Encode(
reinterpret_cast<const uint8_t*>(input.data()), input.length(), &output);
EXPECT_EQ(
"SGVsbG8sIFFVSUMhIFRoaXMgc3RyaW5nIHNob3VsZCBiZSBsb25n"
"IGVub3VnaCB0byBzcGFubXVsdGlwbGUgbGluZXMgb2Ygb3V0cHV0Cg",
output);
}
TEST(QuicheTextUtilsTest, ContainsUpperCase) {
EXPECT_FALSE(quiche::QuicheTextUtils::ContainsUpperCase("abc"));
EXPECT_FALSE(quiche::QuicheTextUtils::ContainsUpperCase(""));
EXPECT_FALSE(quiche::QuicheTextUtils::ContainsUpperCase("123"));
EXPECT_TRUE(quiche::QuicheTextUtils::ContainsUpperCase("ABC"));
EXPECT_TRUE(quiche::QuicheTextUtils::ContainsUpperCase("aBc"));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_text_utils.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_text_utils_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
ac4662b7-8415-4b1d-9b99-97c649832c2d | cpp | tensorflow/tensorflow | mini_benchmark | tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark.h"
#include <string>
#include <utility>
#include "absl/status/statusor.h"
#include "flatbuffers/flatbuffers.h"
namespace tflite {
namespace acceleration {
namespace {
class NoopMiniBenchmark : public MiniBenchmark {
public:
ComputeSettingsT GetBestAcceleration() override { return ComputeSettingsT(); }
void TriggerMiniBenchmark() override {}
void SetEventTimeoutForTesting(int64_t) override {}
std::vector<MiniBenchmarkEventT> MarkAndGetEventsToLog() override {
return {};
}
int NumRemainingAccelerationTests() override { return -1; }
};
}
std::unique_ptr<MiniBenchmark> CreateMiniBenchmark(
const MinibenchmarkSettings& settings, const std::string& model_namespace,
const std::string& model_id) {
absl::StatusOr<std::unique_ptr<MiniBenchmark>> s_or_mb =
MinibenchmarkImplementationRegistry::CreateByName(
"Impl", settings, model_namespace, model_id);
if (!s_or_mb.ok()) {
return std::unique_ptr<MiniBenchmark>(new NoopMiniBenchmark());
} else {
return std::move(*s_or_mb);
}
}
void MinibenchmarkImplementationRegistry::RegisterImpl(
const std::string& name, CreatorFunction creator_function) {
absl::MutexLock lock(&mutex_);
factories_[name] = creator_function;
}
std::unique_ptr<MiniBenchmark> MinibenchmarkImplementationRegistry::CreateImpl(
const std::string& name, const MinibenchmarkSettings& settings,
const std::string& model_namespace, const std::string& model_id) {
absl::MutexLock lock(&mutex_);
auto it = factories_.find(name);
return (it != factories_.end())
? it->second(settings, model_namespace, model_id)
: nullptr;
}
MinibenchmarkImplementationRegistry*
MinibenchmarkImplementationRegistry::GetSingleton() {
static auto* instance = new MinibenchmarkImplementationRegistry();
return instance;
}
std::unique_ptr<MiniBenchmark>
MinibenchmarkImplementationRegistry::CreateByName(
const std::string& name, const MinibenchmarkSettings& settings,
const std::string& model_namespace, const std::string& model_id) {
auto* const instance = MinibenchmarkImplementationRegistry::GetSingleton();
return instance->CreateImpl(name, settings, model_namespace, model_id);
}
MinibenchmarkImplementationRegistry::Register::Register(
const std::string& name, CreatorFunction creator_function) {
auto* const instance = MinibenchmarkImplementationRegistry::GetSingleton();
instance->RegisterImpl(name, creator_function);
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark.h"
#include <unistd.h>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/acceleration/configuration/configuration.pb.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/acceleration/configuration/proto_to_flatbuffer.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_float_validation_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/nnapi/sl/include/SupportLibrary.h"
namespace tflite {
namespace acceleration {
namespace {
TEST(BasicMiniBenchmarkTest, EmptySettings) {
proto::MinibenchmarkSettings settings_proto;
flatbuffers::FlatBufferBuilder empty_settings_buffer_;
const MinibenchmarkSettings* empty_settings =
ConvertFromProto(settings_proto, &empty_settings_buffer_);
std::unique_ptr<MiniBenchmark> mb(
CreateMiniBenchmark(*empty_settings, "ns", "id"));
mb->TriggerMiniBenchmark();
const ComputeSettingsT acceleration = mb->GetBestAcceleration();
EXPECT_EQ(nullptr, acceleration.tflite_settings);
EXPECT_TRUE(mb->MarkAndGetEventsToLog().empty());
EXPECT_EQ(-1, mb->NumRemainingAccelerationTests());
}
class MiniBenchmarkTest : public ::testing::Test {
protected:
void SetUp() override {
MiniBenchmarkTestHelper helper;
should_perform_test_ = helper.should_perform_test();
if (should_perform_test_) {
mobilenet_model_path_ = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_float_with_validation.tflite",
g_tflite_acceleration_embedded_mobilenet_float_validation_model,
g_tflite_acceleration_embedded_mobilenet_float_validation_model_len);
}
}
void SetupBenchmark(proto::Delegate delegate, const std::string& model_path,
bool reset_storage = true,
const nnapi::NnApiSupportLibrary* nnapi_sl = nullptr) {
proto::MinibenchmarkSettings settings;
proto::TFLiteSettings* tflite_settings = settings.add_settings_to_test();
tflite_settings->set_delegate(delegate);
if ((delegate == proto::Delegate::NNAPI) && nnapi_sl) {
std::cerr << "Using NNAPI SL\n";
tflite_settings->mutable_nnapi_settings()->set_support_library_handle(
reinterpret_cast<int64_t>(nnapi_sl->getFL5()));
}
proto::ModelFile* file = settings.mutable_model_file();
file->set_filename(model_path);
proto::BenchmarkStoragePaths* paths = settings.mutable_storage_paths();
paths->set_storage_file_path(::testing::TempDir() + "/storage.fb");
if (reset_storage) {
(void)unlink(paths->storage_file_path().c_str());
(void)unlink((paths->storage_file_path() + ".extra.fb").c_str());
}
paths->set_data_directory_path(::testing::TempDir());
if (delegate != proto::Delegate::NONE) {
proto::TFLiteSettings* cpu_tflite_settings =
settings.add_settings_to_test();
cpu_tflite_settings->set_disable_default_delegates(false);
}
settings_ = ConvertFromProto(settings, &settings_buffer_);
mb_ = CreateMiniBenchmark(*settings_, ns_, model_id_);
}
void TriggerBenchmark(proto::Delegate delegate, const std::string& model_path,
bool reset_storage = true,
const nnapi::NnApiSupportLibrary* nnapi_sl = nullptr) {
SetupBenchmark(delegate, model_path, reset_storage, nnapi_sl);
mb_->TriggerMiniBenchmark();
}
void WaitForValidationCompletion(
absl::Duration timeout = absl::Seconds(300)) {
absl::Time deadline = absl::Now() + timeout;
while (absl::Now() < deadline) {
if (mb_->NumRemainingAccelerationTests() == 0) return;
absl::SleepFor(absl::Milliseconds(200));
}
ASSERT_NE(0, mb_->NumRemainingAccelerationTests());
}
const std::string ns_ = "org.tensorflow.lite.mini_benchmark.test";
const std::string model_id_ = "test_minibenchmark_model";
bool should_perform_test_ = true;
std::unique_ptr<MiniBenchmark> mb_;
std::string mobilenet_model_path_;
flatbuffers::FlatBufferBuilder settings_buffer_;
const MinibenchmarkSettings* settings_;
};
TEST_F(MiniBenchmarkTest, OnlyCPUSettings) {
if (!should_perform_test_) return;
SetupBenchmark(proto::Delegate::NONE, mobilenet_model_path_);
EXPECT_EQ(-1, mb_->NumRemainingAccelerationTests());
ComputeSettingsT acceleration = mb_->GetBestAcceleration();
EXPECT_EQ(nullptr, acceleration.tflite_settings);
EXPECT_EQ(1, mb_->NumRemainingAccelerationTests());
mb_->TriggerMiniBenchmark();
WaitForValidationCompletion();
acceleration = mb_->GetBestAcceleration();
EXPECT_EQ(nullptr, acceleration.tflite_settings);
}
TEST_F(MiniBenchmarkTest, RunSuccessfully) {
if (!should_perform_test_) return;
TriggerBenchmark(proto::Delegate::XNNPACK, mobilenet_model_path_);
WaitForValidationCompletion();
mb_->MarkAndGetEventsToLog();
const ComputeSettingsT acceleration1 = mb_->GetBestAcceleration();
const ComputeSettingsT acceleration2 = mb_->GetBestAcceleration();
EXPECT_EQ(acceleration1, acceleration2);
#ifndef ADDRESS_SANITIZER
ASSERT_NE(nullptr, acceleration1.tflite_settings);
EXPECT_EQ(tflite::Delegate_XNNPACK, acceleration1.tflite_settings->delegate);
#endif
EXPECT_EQ(model_id_, acceleration1.model_identifier_for_statistics);
EXPECT_EQ(ns_, acceleration1.model_namespace_for_statistics);
auto events = mb_->MarkAndGetEventsToLog();
EXPECT_EQ(1, events.size());
const auto& decision = events.front().best_acceleration_decision;
EXPECT_NE(nullptr, decision);
#ifndef ADDRESS_SANITIZER
EXPECT_EQ(tflite::Delegate_XNNPACK,
decision->min_latency_event->tflite_settings->delegate);
#endif
}
TEST_F(MiniBenchmarkTest, BestAccelerationEventIsMarkedLoggedAfterRestart) {
if (!should_perform_test_) return;
TriggerBenchmark(proto::Delegate::XNNPACK, mobilenet_model_path_);
WaitForValidationCompletion();
mb_->MarkAndGetEventsToLog();
mb_->GetBestAcceleration();
TriggerBenchmark(proto::Delegate::XNNPACK, mobilenet_model_path_,
false);
EXPECT_EQ(0, mb_->NumRemainingAccelerationTests());
const ComputeSettingsT acceleration = mb_->GetBestAcceleration();
#ifndef ADDRESS_SANITIZER
ASSERT_NE(nullptr, acceleration.tflite_settings);
EXPECT_EQ(tflite::Delegate_XNNPACK, acceleration.tflite_settings->delegate);
#endif
EXPECT_EQ(model_id_, acceleration.model_identifier_for_statistics);
EXPECT_EQ(ns_, acceleration.model_namespace_for_statistics);
auto events = mb_->MarkAndGetEventsToLog();
EXPECT_EQ(1, events.size());
}
TEST_F(MiniBenchmarkTest,
BestAccelerationEventIsNotReMarkedLoggedAfterRestart) {
if (!should_perform_test_) return;
TriggerBenchmark(proto::Delegate::XNNPACK, mobilenet_model_path_);
WaitForValidationCompletion();
mb_->GetBestAcceleration();
mb_->MarkAndGetEventsToLog();
TriggerBenchmark(proto::Delegate::XNNPACK, mobilenet_model_path_,
false);
mb_->GetBestAcceleration();
EXPECT_TRUE(mb_->MarkAndGetEventsToLog().empty());
}
TEST_F(MiniBenchmarkTest, DelegatePluginNotSupported) {
if (!should_perform_test_) return;
TriggerBenchmark(proto::Delegate::HEXAGON, mobilenet_model_path_);
WaitForValidationCompletion();
const ComputeSettingsT acceleration = mb_->GetBestAcceleration();
EXPECT_EQ(nullptr, acceleration.tflite_settings);
EXPECT_EQ(model_id_, acceleration.model_identifier_for_statistics);
EXPECT_EQ(ns_, acceleration.model_namespace_for_statistics);
const auto events = mb_->MarkAndGetEventsToLog();
bool is_found = false;
for (const auto& event : events) {
const auto& t = event.benchmark_event;
if (t == nullptr) continue;
if (t->event_type == tflite::BenchmarkEventType_ERROR &&
t->error->mini_benchmark_error_code ==
tflite::acceleration::kMinibenchmarkDelegateNotSupported) {
is_found = true;
break;
}
}
EXPECT_TRUE(is_found);
}
#ifdef __ANDROID__
TEST_F(MiniBenchmarkTest, UseNnApiSl) {
if (!should_perform_test_) return;
std::string nnapi_sl_path_ = MiniBenchmarkTestHelper::DumpToTempFile(
"libnnapi_fake.so", g_nnapi_sl_fake_impl, g_nnapi_sl_fake_impl_len);
std::unique_ptr<const ::tflite::nnapi::NnApiSupportLibrary> nnapi_sl =
::tflite::nnapi::loadNnApiSupportLibrary(nnapi_sl_path_);
ASSERT_TRUE(nnapi_sl);
TriggerBenchmark(proto::Delegate::NNAPI, mobilenet_model_path_,
true, nnapi_sl.get());
WaitForValidationCompletion();
EXPECT_TRUE(tflite::acceleration::WasNnApiSlInvoked());
}
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f478bb32-0f86-4fc7-a0f3-26f1314617d9 | cpp | tensorflow/tensorflow | scope | tensorflow/cc/framework/scope.cc | tensorflow/cc/framework/scope_test.cc | #include <algorithm>
#include <vector>
#include "tensorflow/cc/framework/scope_internal.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
Scope::Scope(Impl* impl) : impl_(impl) {}
Scope::Scope(const Scope& other) : impl_(new Impl(*other.impl())) {}
Scope::~Scope() {}
Scope& Scope::operator=(const Scope& other) {
impl_.reset(new Impl(*other.impl_));
return *this;
}
namespace {
const char kScopeSeparator[] = "/";
const char kSuffixSeparator[] = "_";
}
Scope::Impl::Impl(Graph* graph, Status* status, NameMap* name_map,
ShapeRefiner* refiner, bool disable_shape_inference)
: graph_(graph),
status_(status),
name_map_(name_map),
refiner_(refiner),
scope_used_(nullptr),
colocation_constraints_(),
disable_shape_inference_(disable_shape_inference) {}
Scope::Impl::Impl(const std::shared_ptr<Graph>& graph,
const std::shared_ptr<Status>& status,
const std::shared_ptr<NameMap>& name_map,
const std::shared_ptr<ShapeRefiner>& refiner)
: graph_(graph),
status_(status),
name_map_(name_map),
refiner_(refiner),
scope_used_(nullptr),
colocation_constraints_(),
disable_shape_inference_(refiner_ == nullptr) {}
Scope Scope::NewRootScope() {
Graph* graph = new Graph(OpRegistry::Global());
ShapeRefiner* refiner =
new ShapeRefiner(graph->versions(), graph->op_registry());
return Scope(new Impl(graph, new Status, new Impl::NameMap, refiner,
false));
}
Scope Scope::DisabledShapeInferenceScope() {
Graph* graph = new Graph(OpRegistry::Global());
ShapeRefiner* refiner =
new ShapeRefiner(graph->versions(), graph->op_registry());
return Scope(new Impl(graph, new Status, new Impl::NameMap, refiner,
true));
}
Scope::Impl::Impl(const Scope& other, Tags::ScopeName, const string& name,
bool copy_names)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(copy_names ? other.impl()->name_map_
: std::shared_ptr<NameMap>(new NameMap)),
refiner_(other.impl()->refiner_),
scope_used_(nullptr),
control_deps_(other.impl()->control_deps_),
name_(name),
op_name_(""),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::OpName, const string& name,
const string& op_name)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(name),
op_name_(op_name),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::ControlDeps,
std::vector<Operation> control_deps, bool clear_control_deps)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(
clear_control_deps
? std::vector<Operation>()
: (control_deps.insert(control_deps.begin(),
other.impl()->control_deps_.begin(),
other.impl()->control_deps_.end()),
control_deps)),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::Device, const string& device)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(device),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::SingleUseScope,
const string& op_name)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(new bool(false)),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(op_name),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::ExitOnError)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(true),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::KernelLabel,
const string& kernel_label)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(kernel_label),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::Colocate,
const Operation& colocate_with_op, bool clear_colocations)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(
clear_colocations
? std::unordered_set<string>()
: other.impl()->GetColocationConstraints(colocate_with_op)),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::AssignedDevice,
const string& assigned_device)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(assigned_device),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::XlaCluster,
const string& xla_cluster)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(xla_cluster),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
std::unordered_set<string> Scope::Impl::GetColocationConstraints(
const Operation& colocate_with_op) const {
std::unordered_set<string> current_constraints(colocation_constraints_);
const AttrSlice attrs = colocate_with_op.node()->attrs();
std::vector<string> node_constraints;
if (TryGetNodeAttr(attrs, kColocationAttrName, &node_constraints)) {
for (const string& entry : node_constraints) {
StringPiece s(entry);
if (absl::ConsumePrefix(&s, kColocationGroupPrefix)) {
current_constraints.emplace(s);
}
}
} else {
current_constraints.insert(colocate_with_op.node()->name());
}
return current_constraints;
}
bool Scope::ok() const { return impl()->status_->ok(); }
Graph* Scope::graph() const { return impl()->graph_.get(); }
std::shared_ptr<Graph> Scope::graph_as_shared_ptr() const {
return impl()->graph_;
}
Status Scope::status() const { return *impl()->status_; }
const std::vector<Operation>& Scope::control_deps() const {
return impl()->control_deps_;
}
void Scope::UpdateStatus(const Status& s) const {
impl()->status_->Update(s);
if (impl()->exit_on_error_ && !ok()) {
LOG(FATAL) << *impl()->status_;
}
}
Status Scope::ToGraphDef(GraphDef* gdef, bool include_debug_info) const {
if (!ok()) {
return *impl()->status_;
}
graph()->ToGraphDef(gdef, true, include_debug_info);
return absl::OkStatus();
}
Status Scope::ToGraph(Graph* g, GraphConstructorOptions opts) const {
if (ok()) {
GraphDef graph_def;
graph()->ToGraphDef(&graph_def);
UpdateStatus(ConvertGraphDefToGraph(opts, std::move(graph_def), g));
}
return *impl()->status_;
}
void Scope::UpdateBuilder(NodeBuilder* builder) const {
std::vector<Node*> control_inputs;
for (const auto& op : impl()->control_deps_) {
control_inputs.push_back(op.node());
}
builder->ControlInputs(control_inputs);
if (!impl()->kernel_label_.empty()) {
builder->Attr("_kernel", impl()->kernel_label_);
}
if (!impl()->colocation_constraints_.empty()) {
std::vector<string> constraints(impl()->colocation_constraints_.begin(),
impl()->colocation_constraints_.end());
std::sort(constraints.begin(), constraints.end());
std::transform(constraints.begin(), constraints.end(), constraints.begin(),
[](const string& s) {
return strings::StrCat(kColocationGroupPrefix, s);
});
builder->Attr(kColocationAttrName, constraints);
}
if (!impl()->device_.empty()) {
builder->Device(impl()->device_);
}
if (!impl()->assigned_device_.empty()) {
builder->AssignedDevice(impl()->assigned_device_);
}
if (!impl()->xla_cluster_.empty()) {
builder->XlaCluster(impl()->xla_cluster_);
}
}
string Scope::Impl::GetUniqueName(const string& prefix,
bool check_single_use) const {
if (check_single_use && single_use_scope()) {
if (*scope_used_) {
*status_ =
errors::AlreadyExists(prefix, " already exists in the current scope");
return "";
}
*scope_used_ = true;
return prefix;
}
auto entry = name_map_->find(prefix);
if (entry == name_map_->end()) {
name_map_->insert({prefix, 0});
return prefix;
}
string unique_name;
do {
unique_name = strings::StrCat(prefix, kSuffixSeparator, ++entry->second);
} while (name_map_->find(unique_name) != name_map_->end());
name_map_->insert({unique_name, 0});
return unique_name;
}
string Scope::Impl::GetNameForOp(const string& default_name) const {
const string unique_name =
GetUniqueName(default_name, true );
const string sep =
name_.empty() || unique_name.empty() ? "" : kScopeSeparator;
return strings::StrCat(name_, sep, unique_name);
}
string Scope::GetUniqueNameForOp(const string& default_name) const {
if (impl()->single_use_scope()) {
if (impl()->op_name_.empty() || *impl()->scope_used_) {
*impl()->status_ =
errors::InvalidArgument("Cannot get a unique name in this scope");
return "";
}
*impl()->scope_used_ = true;
return impl()->op_name_;
}
return impl()->op_name_.empty() ? impl()->GetNameForOp(default_name)
: impl()->GetNameForOp(impl()->op_name_);
}
Scope Scope::NewSubScope(const string& child_scope_name) const {
if (child_scope_name.empty()) {
return Scope(new Impl(*this, Impl::Tags::ScopeName(), impl()->name_,
true ));
}
const string unique_name =
impl()->GetUniqueName(child_scope_name, false );
const string sep =
impl()->name_.empty() || unique_name.empty() ? "" : kScopeSeparator;
return Scope(new Impl(*this, Impl::Tags::ScopeName(),
strings::StrCat(impl()->name_, sep, unique_name),
false ));
}
Scope Scope::WithOpNameImpl(const string& op_name) const {
if (impl()->single_use_scope()) {
UpdateStatus(errors::InvalidArgument("Cannot set op name ", op_name,
" on this scope"));
return *this;
}
return Scope(new Impl(*this, Impl::Tags::OpName(), impl()->name_, op_name));
}
Scope Scope::WithControlDependencies(
const absl::Span<const Operation> control_deps) const {
return Scope(
new Impl(*this, Impl::Tags::ControlDeps(),
std::vector<Operation>(control_deps.begin(), control_deps.end()),
false));
}
Scope Scope::WithControlDependencies(const Output& control_dep) const {
return Scope(new Impl(*this, Impl::Tags::ControlDeps(),
std::vector<Operation>(1, control_dep.op()),
false));
}
Scope Scope::WithNoControlDependencies() const {
return Scope(new Impl(*this, Impl::Tags::ControlDeps(),
std::vector<Operation>(),
true));
}
Scope Scope::WithDevice(const string& device) const {
return Scope(new Impl(*this, Impl::Tags::Device(), device));
}
Scope Scope::WithAssignedDevice(const string& assigned_device) const {
return Scope(new Impl(*this, Impl::Tags::AssignedDevice(), assigned_device));
}
Scope Scope::WithXlaCluster(const string& xla_cluster) const {
return Scope(new Impl(*this, Impl::Tags::XlaCluster(), xla_cluster));
}
Scope Scope::ColocateWith(const Operation& op) const {
return Scope(new Impl(*this, Impl::Tags::Colocate(), op,
false));
}
Scope Scope::ClearColocation() const {
return Scope(new Impl(*this, Impl::Tags::Colocate(), Operation(),
true));
}
Scope Scope::ExitOnError() const {
return Scope(new Impl(*this, Impl::Tags::ExitOnError()));
}
Scope Scope::WithKernelLabel(const string& kernel_label) const {
return Scope(new Impl(*this, Impl::Tags::KernelLabel(), kernel_label));
}
CompositeOpScopes Scope::GetCompositeOpScopes(
const string& composite_op_name) const {
if (impl()->op_name_.empty() && composite_op_name.empty()) {
UpdateStatus(errors::InvalidArgument(
"Cannot create composite op scopes with empty name"));
return {*this, *this};
}
if (!impl()->single_use_scope()) {
Scope child = NewSubScope(impl()->op_name_.empty() ? composite_op_name
: impl()->op_name_);
const string child_op_sep = impl()->name_.empty() ? "" : kSuffixSeparator;
const string child_name =
strings::StrCat(impl()->name_, child_op_sep, child.impl()->name_);
return {child,
Scope(new Impl(child, Impl::Tags::SingleUseScope(), child_name))};
} else {
return {Scope(new Impl(*this, Impl::Tags::ScopeName(), impl()->op_name_,
true )),
*this};
}
}
Status Scope::DoShapeInference(Node* node) const {
if (impl_->disable_shape_inference_) return absl::OkStatus();
return impl_->refiner_->AddNode(node);
}
class InternalScope {
public:
static Scope NewScope(Graph* graph, Status* status, ShapeRefiner* refiner) {
Scope::Impl::NameMap* name_map = new Scope::Impl::NameMap;
for (const Node* node : graph->nodes()) {
const string& name = node->name();
(*name_map)[name] = 0;
size_t idx = -1;
while ((idx = name.find(kScopeSeparator, idx + 1)) != string::npos) {
(*name_map)[name.substr(0, idx)] = 0;
}
}
return Scope(new Scope::Impl(
std::shared_ptr<Graph>(graph, [](Graph*) {}),
std::shared_ptr<Status>(status, [](Status*) {}),
std::shared_ptr<Scope::Impl::NameMap>(name_map),
std::shared_ptr<ShapeRefiner>(refiner, [](ShapeRefiner*) {})));
}
};
Scope NewInternalScope(Graph* graph, Status* status, ShapeRefiner* refiner) {
return InternalScope::NewScope(graph, status, refiner);
}
Status CreateOutputWithScope(string op_name,
absl::Span<const ::tensorflow::Input> inputs,
const Scope& scope, Output* output) {
TF_RETURN_IF_ERROR(scope.status());
const auto unique_name = scope.GetUniqueNameForOp(op_name);
auto builder = ::tensorflow::NodeBuilder(unique_name, op_name);
for (const auto& input : inputs) {
TF_RETURN_IF_ERROR(scope.status());
builder = builder.Input(input.node());
}
::tensorflow::Node* ret;
scope.UpdateBuilder(&builder);
TF_RETURN_IF_ERROR(scope.status());
scope.UpdateStatus(builder.Finalize(scope.graph(), &ret));
TF_RETURN_IF_ERROR(scope.status());
*output = Output(ret, 0);
return absl::OkStatus();
}
} | #include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(ScopeTest, BasicNames) {
Scope root = Scope::NewRootScope();
EXPECT_EQ(root.GetUniqueNameForOp("add"), "add");
EXPECT_EQ(root.GetUniqueNameForOp("add"), "add_1");
EXPECT_EQ(root.GetUniqueNameForOp("add"), "add_2");
EXPECT_EQ(root.GetUniqueNameForOp("mul"), "mul");
}
TEST(ScopeTest, OpAndScopeNameCollision) {
Scope root = Scope::NewRootScope();
EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo");
EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo_1");
EXPECT_EQ(root.GetUniqueNameForOp("foo_1"), "foo_1_1");
EXPECT_EQ(root.GetUniqueNameForOp("foo_2"), "foo_2");
EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo_3");
EXPECT_EQ(root.GetUniqueNameForOp("foo_2"), "foo_2_1");
}
TEST(ScopeTest, HierarchicalNames) {
Scope root = Scope::NewRootScope();
Scope child = root.NewSubScope("child");
EXPECT_EQ(child.GetUniqueNameForOp("add"), "child/add");
EXPECT_EQ(child.GetUniqueNameForOp("add"), "child/add_1");
EXPECT_EQ(child.GetUniqueNameForOp("mul"), "child/mul");
Scope child_1 = root.NewSubScope("child");
EXPECT_EQ(child_1.GetUniqueNameForOp("add"), "child_1/add");
EXPECT_EQ(child_1.GetUniqueNameForOp("add"), "child_1/add_1");
EXPECT_EQ(child_1.GetUniqueNameForOp("mul"), "child_1/mul");
Scope c_c = root.NewSubScope("c").NewSubScope("c");
EXPECT_EQ(c_c.GetUniqueNameForOp("add"), "c/c/add");
Scope c_1 = root.NewSubScope("c");
Scope c_1_c = c_1.NewSubScope("c");
EXPECT_EQ(c_1_c.GetUniqueNameForOp("add"), "c_1/c/add");
Scope c_1_c_1 = c_1.NewSubScope("c");
EXPECT_EQ(c_1_c_1.GetUniqueNameForOp("add"), "c_1/c_1/add");
EXPECT_EQ(root.NewSubScope("").NewSubScope("").GetUniqueNameForOp("d"), "d");
EXPECT_EQ(root.NewSubScope("").GetUniqueNameForOp("d"), "d_1");
EXPECT_EQ(root.GetUniqueNameForOp("d"), "d_2");
}
TEST(ScopeTest, ScopeAndOpNames) {
Scope root = Scope::NewRootScope();
Scope child = root.NewSubScope("child");
EXPECT_EQ(child.GetUniqueNameForOp("add"), "child/add");
EXPECT_EQ(root.GetUniqueNameForOp("child"), "child_1");
EXPECT_EQ(root.NewSubScope("child").GetUniqueNameForOp("p"), "child_2/p");
}
namespace {
string LastOp(const Scope& scope) { return scope.GetUniqueNameForOp("Last"); }
std::vector<string> AnotherCompositeOp(const Scope& scope) {
auto cop_scopes = scope.GetCompositeOpScopes("another_cop");
const string c1 = cop_scopes.child.GetUniqueNameForOp("c1");
const string c2 = cop_scopes.child.GetUniqueNameForOp("mul");
return {c1, c2, LastOp(cop_scopes.last)};
}
std::vector<string> LinearOp(const Scope& scope) {
auto cop_scopes = scope.GetCompositeOpScopes("linear");
Scope linear = cop_scopes.child;
const string mul_op_name = linear.GetUniqueNameForOp("mul");
const string bias_add_op_name = linear.GetUniqueNameForOp("bias_add");
auto cop_names = AnotherCompositeOp(cop_scopes.last);
return {mul_op_name, bias_add_op_name, cop_names[0], cop_names[1],
cop_names[2]};
}
}
TEST(ScopeTest, CompositeOp) {
Scope root = Scope::NewRootScope();
const auto names1 = LinearOp(root);
EXPECT_EQ(names1[0], "linear/mul");
EXPECT_EQ(names1[1], "linear/bias_add");
EXPECT_EQ(names1[2], "linear/c1");
EXPECT_EQ(names1[3], "linear/mul_1");
EXPECT_EQ(names1[4], "linear");
EXPECT_EQ(root.GetUniqueNameForOp("linear"), "linear_1");
const auto names2 = LinearOp(root);
EXPECT_EQ(names2[0], "linear_2/mul");
EXPECT_EQ(names2[1], "linear_2/bias_add");
EXPECT_EQ(names2[2], "linear_2/c1");
EXPECT_EQ(names2[3], "linear_2/mul_1");
EXPECT_EQ(names2[4], "linear_2");
const auto names3 = LinearOp(root.WithOpName("c"));
EXPECT_EQ(names3[0], "c/mul");
EXPECT_EQ(names3[1], "c/bias_add");
EXPECT_EQ(names3[2], "c/c1");
EXPECT_EQ(names3[3], "c/mul_1");
EXPECT_EQ(names3[4], "c");
}
TEST(ScopeTest, SingleUseScope) {
Scope root = Scope::NewRootScope();
auto cop_scopes = root.GetCompositeOpScopes("cop");
EXPECT_EQ(cop_scopes.last.GetUniqueNameForOp("foo"), "cop");
cop_scopes.last.GetUniqueNameForOp("foo");
EXPECT_FALSE(cop_scopes.last.ok());
}
TEST(ScopeTest, ControlDeps) {
Scope root = Scope::NewRootScope();
auto c1 = Operation();
auto c2 = Operation();
Scope c = root.WithControlDependencies({c1, c2});
EXPECT_EQ(c.control_deps().size(), 2);
Scope c_c = c.WithControlDependencies({Operation()});
EXPECT_EQ(c_c.control_deps().size(), 3);
}
TEST(ScopeTest, CreateOutput) {
Scope root = Scope::NewRootScope();
Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT);
Output add;
ASSERT_TRUE(
CreateOutputWithScope("Add", {a, a}, root.WithOpName("add"), &add).ok());
EXPECT_EQ(add.node()->name(), "add");
EXPECT_EQ(add.node()->type_string(), "Add");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/framework/scope.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/framework/scope_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cb2f98ee-8a1a-4cd8-a8f6-ce452937a3e9 | cpp | google/quiche | load_balancer_config | quiche/quic/load_balancer/load_balancer_config.cc | quiche/quic/load_balancer/load_balancer_config_test.cc | #include "quiche/quic/load_balancer/load_balancer_config.h"
#include <cstdint>
#include <cstring>
#include <optional>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "openssl/aes.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/load_balancer/load_balancer_server_id.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
namespace quic {
namespace {
bool CommonValidation(const uint8_t config_id, const uint8_t server_id_len,
const uint8_t nonce_len) {
if (config_id >= kNumLoadBalancerConfigs || server_id_len == 0 ||
nonce_len < kLoadBalancerMinNonceLen ||
nonce_len > kLoadBalancerMaxNonceLen ||
server_id_len >
(kQuicMaxConnectionIdWithLengthPrefixLength - nonce_len - 1)) {
QUIC_BUG(quic_bug_433862549_01)
<< "Invalid LoadBalancerConfig "
<< "Config ID " << static_cast<int>(config_id) << " Server ID Length "
<< static_cast<int>(server_id_len) << " Nonce Length "
<< static_cast<int>(nonce_len);
return false;
}
return true;
}
std::optional<AES_KEY> BuildKey(absl::string_view key, bool encrypt) {
if (key.empty()) {
return std::optional<AES_KEY>();
}
AES_KEY raw_key;
if (encrypt) {
if (AES_set_encrypt_key(reinterpret_cast<const uint8_t *>(key.data()),
key.size() * 8, &raw_key) < 0) {
return std::optional<AES_KEY>();
}
} else if (AES_set_decrypt_key(reinterpret_cast<const uint8_t *>(key.data()),
key.size() * 8, &raw_key) < 0) {
return std::optional<AES_KEY>();
}
return raw_key;
}
}
std::optional<LoadBalancerConfig> LoadBalancerConfig::Create(
const uint8_t config_id, const uint8_t server_id_len,
const uint8_t nonce_len, const absl::string_view key) {
if (key.size() != kLoadBalancerKeyLen) {
QUIC_BUG(quic_bug_433862549_02)
<< "Invalid LoadBalancerConfig Key Length: " << key.size();
return std::optional<LoadBalancerConfig>();
}
if (!CommonValidation(config_id, server_id_len, nonce_len)) {
return std::optional<LoadBalancerConfig>();
}
auto new_config =
LoadBalancerConfig(config_id, server_id_len, nonce_len, key);
if (!new_config.IsEncrypted()) {
QUIC_BUG(quic_bug_433862549_03) << "Something went wrong in initializing "
"the load balancing key.";
return std::optional<LoadBalancerConfig>();
}
return new_config;
}
std::optional<LoadBalancerConfig> LoadBalancerConfig::CreateUnencrypted(
const uint8_t config_id, const uint8_t server_id_len,
const uint8_t nonce_len) {
return CommonValidation(config_id, server_id_len, nonce_len)
? LoadBalancerConfig(config_id, server_id_len, nonce_len, "")
: std::optional<LoadBalancerConfig>();
}
bool LoadBalancerConfig::FourPassDecrypt(
absl::Span<const uint8_t> ciphertext,
LoadBalancerServerId& server_id) const {
if (ciphertext.size() < plaintext_len()) {
QUIC_BUG(quic_bug_599862571_02)
<< "Called FourPassDecrypt with a short Connection ID";
return false;
}
if (!key_.has_value()) {
return false;
}
uint8_t* left = server_id.mutable_data();
uint8_t right[kLoadBalancerBlockSize];
uint8_t half_len;
bool is_length_odd =
InitializeFourPass(ciphertext.data(), left, right, &half_len);
uint8_t end_index = (server_id_len_ > nonce_len_) ? 1 : 2;
for (uint8_t index = kNumLoadBalancerCryptoPasses; index >= end_index;
--index) {
EncryptionPass(index, half_len, is_length_odd, left, right);
}
if (server_id_len_ < half_len ||
(server_id_len_ == half_len && !is_length_odd)) {
return true;
}
if (is_length_odd) {
right[0] |= *(left + --half_len);
}
memcpy(server_id.mutable_data() + half_len, right, server_id_len_ - half_len);
return true;
}
QuicConnectionId LoadBalancerConfig::FourPassEncrypt(
absl::Span<uint8_t> plaintext) const {
if (plaintext.size() < total_len()) {
QUIC_BUG(quic_bug_599862571_03)
<< "Called FourPassEncrypt with a short Connection ID";
return QuicConnectionId();
}
if (!key_.has_value()) {
return QuicConnectionId();
}
uint8_t left[kLoadBalancerBlockSize];
uint8_t right[kLoadBalancerBlockSize];
uint8_t half_len;
bool is_length_odd =
InitializeFourPass(plaintext.data() + 1, left, right, &half_len);
for (uint8_t index = 1; index <= kNumLoadBalancerCryptoPasses; ++index) {
EncryptionPass(index, half_len, is_length_odd, left, right);
}
if (is_length_odd) {
right[0] |= left[--half_len];
}
memcpy(plaintext.data() + 1, left, half_len);
memcpy(plaintext.data() + half_len + 1, right, plaintext_len() - half_len);
return QuicConnectionId(reinterpret_cast<char*>(plaintext.data()),
total_len());
}
bool LoadBalancerConfig::BlockEncrypt(
const uint8_t plaintext[kLoadBalancerBlockSize],
uint8_t ciphertext[kLoadBalancerBlockSize]) const {
if (!key_.has_value()) {
return false;
}
AES_encrypt(plaintext, ciphertext, &*key_);
return true;
}
bool LoadBalancerConfig::BlockDecrypt(
const uint8_t ciphertext[kLoadBalancerBlockSize],
uint8_t plaintext[kLoadBalancerBlockSize]) const {
if (!block_decrypt_key_.has_value()) {
return false;
}
AES_decrypt(ciphertext, plaintext, &*block_decrypt_key_);
return true;
}
LoadBalancerConfig::LoadBalancerConfig(const uint8_t config_id,
const uint8_t server_id_len,
const uint8_t nonce_len,
const absl::string_view key)
: config_id_(config_id),
server_id_len_(server_id_len),
nonce_len_(nonce_len),
key_(BuildKey(key, true)),
block_decrypt_key_((server_id_len + nonce_len == kLoadBalancerBlockSize)
? BuildKey(key, false)
: std::optional<AES_KEY>()) {}
bool LoadBalancerConfig::InitializeFourPass(const uint8_t* input, uint8_t* left,
uint8_t* right,
uint8_t* half_len) const {
*half_len = plaintext_len() / 2;
bool is_length_odd;
if (plaintext_len() % 2 == 1) {
++(*half_len);
is_length_odd = true;
} else {
is_length_odd = false;
}
memset(left, 0, kLoadBalancerBlockSize);
memset(right, 0, kLoadBalancerBlockSize);
left[kLoadBalancerBlockSize - 2] = plaintext_len();
right[kLoadBalancerBlockSize - 2] = plaintext_len();
memcpy(left, input, *half_len);
memcpy(right, input + (plaintext_len() / 2), *half_len);
if (is_length_odd) {
left[*half_len - 1] &= 0xf0;
right[0] &= 0x0f;
}
return is_length_odd;
}
void LoadBalancerConfig::EncryptionPass(uint8_t index, uint8_t half_len,
bool is_length_odd, uint8_t* left,
uint8_t* right) const {
uint8_t ciphertext[kLoadBalancerBlockSize];
if (index % 2 == 0) {
right[kLoadBalancerBlockSize - 1] = index;
AES_encrypt(right, ciphertext, &*key_);
for (int i = 0; i < half_len; ++i) {
left[i] ^= ciphertext[i];
}
if (is_length_odd) {
left[half_len - 1] &= 0xf0;
}
return;
}
left[kLoadBalancerBlockSize - 1] = index;
AES_encrypt(left, ciphertext, &*key_);
for (int i = 0; i < half_len; ++i) {
right[i] ^= ciphertext[i];
}
if (is_length_odd) {
right[0] &= 0x0f;
}
}
} | #include "quiche/quic/load_balancer/load_balancer_config.h"
#include <array>
#include <cstdint>
#include <cstring>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/load_balancer/load_balancer_server_id.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
class LoadBalancerConfigPeer {
public:
static bool InitializeFourPass(LoadBalancerConfig& config,
const uint8_t* input, uint8_t* left,
uint8_t* right, uint8_t* half_len) {
return config.InitializeFourPass(input, left, right, half_len);
}
static void EncryptionPass(LoadBalancerConfig& config, uint8_t index,
uint8_t half_len, bool is_length_odd,
uint8_t* left, uint8_t* right) {
config.EncryptionPass(index, half_len, is_length_odd, left, right);
}
};
namespace {
constexpr char raw_key[] = {
0xfd, 0xf7, 0x26, 0xa9, 0x89, 0x3e, 0xc0, 0x5c,
0x06, 0x32, 0xd3, 0x95, 0x66, 0x80, 0xba, 0xf0,
};
class LoadBalancerConfigTest : public QuicTest {};
TEST_F(LoadBalancerConfigTest, InvalidParams) {
EXPECT_QUIC_BUG(
EXPECT_FALSE(LoadBalancerConfig::CreateUnencrypted(7, 4, 10).has_value()),
"Invalid LoadBalancerConfig Config ID 7 Server ID Length 4 "
"Nonce Length 10");
EXPECT_QUIC_BUG(EXPECT_FALSE(LoadBalancerConfig::Create(
2, 0, 10, absl::string_view(raw_key, 16))
.has_value()),
"Invalid LoadBalancerConfig Config ID 2 Server ID Length 0 "
"Nonce Length 10");
EXPECT_QUIC_BUG(
EXPECT_FALSE(LoadBalancerConfig::CreateUnencrypted(6, 16, 4).has_value()),
"Invalid LoadBalancerConfig Config ID 6 Server ID Length 16 "
"Nonce Length 4");
EXPECT_QUIC_BUG(
EXPECT_FALSE(LoadBalancerConfig::CreateUnencrypted(6, 4, 2).has_value()),
"Invalid LoadBalancerConfig Config ID 6 Server ID Length 4 "
"Nonce Length 2");
EXPECT_QUIC_BUG(
EXPECT_FALSE(LoadBalancerConfig::CreateUnencrypted(6, 1, 17).has_value()),
"Invalid LoadBalancerConfig Config ID 6 Server ID Length 1 "
"Nonce Length 17");
EXPECT_QUIC_BUG(
EXPECT_FALSE(LoadBalancerConfig::Create(2, 3, 4, "").has_value()),
"Invalid LoadBalancerConfig Key Length: 0");
EXPECT_QUIC_BUG(EXPECT_FALSE(LoadBalancerConfig::Create(
2, 3, 4, absl::string_view(raw_key, 10))
.has_value()),
"Invalid LoadBalancerConfig Key Length: 10");
EXPECT_QUIC_BUG(EXPECT_FALSE(LoadBalancerConfig::Create(
0, 3, 4, absl::string_view(raw_key, 17))
.has_value()),
"Invalid LoadBalancerConfig Key Length: 17");
}
TEST_F(LoadBalancerConfigTest, ValidParams) {
auto config = LoadBalancerConfig::CreateUnencrypted(0, 3, 4);
EXPECT_TRUE(config.has_value());
EXPECT_EQ(config->config_id(), 0);
EXPECT_EQ(config->server_id_len(), 3);
EXPECT_EQ(config->nonce_len(), 4);
EXPECT_EQ(config->plaintext_len(), 7);
EXPECT_EQ(config->total_len(), 8);
EXPECT_FALSE(config->IsEncrypted());
auto config2 =
LoadBalancerConfig::Create(2, 6, 7, absl::string_view(raw_key, 16));
EXPECT_TRUE(config.has_value());
EXPECT_EQ(config2->config_id(), 2);
EXPECT_EQ(config2->server_id_len(), 6);
EXPECT_EQ(config2->nonce_len(), 7);
EXPECT_EQ(config2->plaintext_len(), 13);
EXPECT_EQ(config2->total_len(), 14);
EXPECT_TRUE(config2->IsEncrypted());
}
TEST_F(LoadBalancerConfigTest, TestEncryptionPassExample) {
auto config =
LoadBalancerConfig::Create(0, 3, 4, absl::string_view(raw_key, 16));
EXPECT_TRUE(config.has_value());
EXPECT_TRUE(config->IsEncrypted());
uint8_t input[] = {0x07, 0x31, 0x44, 0x1a, 0x9c, 0x69, 0xc2, 0x75};
std::array<uint8_t, kLoadBalancerBlockSize> left, right;
uint8_t half_len;
bool is_length_odd = LoadBalancerConfigPeer::InitializeFourPass(
*config, input + 1, left.data(), right.data(), &half_len);
EXPECT_TRUE(is_length_odd);
std::array<std::array<uint8_t, kLoadBalancerBlockSize>,
kNumLoadBalancerCryptoPasses + 1>
expected_left = {{
{0x31, 0x44, 0x1a, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x07, 0x00},
{0x31, 0x44, 0x1a, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x07, 0x01},
{0xd4, 0xa0, 0x48, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x07, 0x01},
{0xd4, 0xa0, 0x48, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x07, 0x03},
{0x67, 0x94, 0x7d, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x07, 0x03},
}};
std::array<std::array<uint8_t, kLoadBalancerBlockSize>,
kNumLoadBalancerCryptoPasses + 1>
expected_right = {{
{0x0c, 0x69, 0xc2, 0x75, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x07, 0x00},
{0x0e, 0x3c, 0x1f, 0xf9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x07, 0x00},
{0x0e, 0x3c, 0x1f, 0xf9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x07, 0x02},
{0x09, 0xbe, 0x05, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x07, 0x02},
{0x09, 0xbe, 0x05, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x07, 0x04},
}};
EXPECT_EQ(left, expected_left[0]);
EXPECT_EQ(right, expected_right[0]);
for (int i = 1; i <= kNumLoadBalancerCryptoPasses; ++i) {
LoadBalancerConfigPeer::EncryptionPass(*config, i, half_len, is_length_odd,
left.data(), right.data());
EXPECT_EQ(left, expected_left[i]);
EXPECT_EQ(right, expected_right[i]);
}
}
TEST_F(LoadBalancerConfigTest, EncryptionPassesAreReversible) {
auto config =
LoadBalancerConfig::Create(0, 3, 4, absl::string_view(raw_key, 16));
std::array<uint8_t, kLoadBalancerBlockSize> start_left = {
0x31, 0x44, 0x1a, 0x90, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00,
};
std::array<uint8_t, kLoadBalancerBlockSize> start_right = {
0x0c, 0x69, 0xc2, 0x75, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00,
};
std::array<uint8_t, kLoadBalancerBlockSize> left = start_left,
right = start_right;
LoadBalancerConfigPeer::EncryptionPass(*config, 1, 4, true, left.data(),
right.data());
LoadBalancerConfigPeer::EncryptionPass(*config, 2, 4, true, left.data(),
right.data());
LoadBalancerConfigPeer::EncryptionPass(*config, 2, 4, true, left.data(),
right.data());
LoadBalancerConfigPeer::EncryptionPass(*config, 1, 4, true, left.data(),
right.data());
left[15] = 0;
right[15] = 0;
EXPECT_EQ(left, start_left);
EXPECT_EQ(right, start_right);
}
TEST_F(LoadBalancerConfigTest, InvalidBlockEncryption) {
uint8_t pt[kLoadBalancerBlockSize + 1], ct[kLoadBalancerBlockSize];
auto pt_config = LoadBalancerConfig::CreateUnencrypted(0, 8, 8);
ASSERT_TRUE(pt_config.has_value());
EXPECT_FALSE(pt_config->BlockEncrypt(pt, ct));
EXPECT_FALSE(pt_config->BlockDecrypt(ct, pt));
EXPECT_TRUE(pt_config->FourPassEncrypt(absl::Span<uint8_t>(pt, sizeof(pt)))
.IsEmpty());
LoadBalancerServerId answer;
EXPECT_FALSE(pt_config->FourPassDecrypt(
absl::Span<uint8_t>(pt, sizeof(pt) - 1), answer));
auto small_cid_config =
LoadBalancerConfig::Create(0, 3, 4, absl::string_view(raw_key, 16));
ASSERT_TRUE(small_cid_config.has_value());
EXPECT_TRUE(small_cid_config->BlockEncrypt(pt, ct));
EXPECT_FALSE(small_cid_config->BlockDecrypt(ct, pt));
auto block_config =
LoadBalancerConfig::Create(0, 8, 8, absl::string_view(raw_key, 16));
ASSERT_TRUE(block_config.has_value());
EXPECT_TRUE(block_config->BlockEncrypt(pt, ct));
EXPECT_TRUE(block_config->BlockDecrypt(ct, pt));
}
TEST_F(LoadBalancerConfigTest, BlockEncryptionExample) {
const uint8_t ptext[] = {0xed, 0x79, 0x3a, 0x51, 0xd4, 0x9b, 0x8f, 0x5f,
0xee, 0x08, 0x0d, 0xbf, 0x48, 0xc0, 0xd1, 0xe5};
const uint8_t ctext[] = {0x4d, 0xd2, 0xd0, 0x5a, 0x7b, 0x0d, 0xe9, 0xb2,
0xb9, 0x90, 0x7a, 0xfb, 0x5e, 0xcf, 0x8c, 0xc3};
const char key[] = {0x8f, 0x95, 0xf0, 0x92, 0x45, 0x76, 0x5f, 0x80,
0x25, 0x69, 0x34, 0xe5, 0x0c, 0x66, 0x20, 0x7f};
uint8_t result[sizeof(ptext)];
auto config = LoadBalancerConfig::Create(0, 8, 8, absl::string_view(key, 16));
EXPECT_TRUE(config->BlockEncrypt(ptext, result));
EXPECT_EQ(memcmp(result, ctext, sizeof(ctext)), 0);
EXPECT_TRUE(config->BlockDecrypt(ctext, result));
EXPECT_EQ(memcmp(result, ptext, sizeof(ptext)), 0);
}
TEST_F(LoadBalancerConfigTest, ConfigIsCopyable) {
const uint8_t ptext[] = {0xed, 0x79, 0x3a, 0x51, 0xd4, 0x9b, 0x8f, 0x5f,
0xee, 0x08, 0x0d, 0xbf, 0x48, 0xc0, 0xd1, 0xe5};
const uint8_t ctext[] = {0x4d, 0xd2, 0xd0, 0x5a, 0x7b, 0x0d, 0xe9, 0xb2,
0xb9, 0x90, 0x7a, 0xfb, 0x5e, 0xcf, 0x8c, 0xc3};
const char key[] = {0x8f, 0x95, 0xf0, 0x92, 0x45, 0x76, 0x5f, 0x80,
0x25, 0x69, 0x34, 0xe5, 0x0c, 0x66, 0x20, 0x7f};
uint8_t result[sizeof(ptext)];
auto config = LoadBalancerConfig::Create(0, 8, 8, absl::string_view(key, 16));
auto config2 = config;
EXPECT_TRUE(config->BlockEncrypt(ptext, result));
EXPECT_EQ(memcmp(result, ctext, sizeof(ctext)), 0);
EXPECT_TRUE(config2->BlockEncrypt(ptext, result));
EXPECT_EQ(memcmp(result, ctext, sizeof(ctext)), 0);
}
TEST_F(LoadBalancerConfigTest, FourPassInputTooShort) {
auto config =
LoadBalancerConfig::Create(0, 3, 4, absl::string_view(raw_key, 16));
uint8_t input[] = {0x0d, 0xd2, 0xd0, 0x5a, 0x7b, 0x0d, 0xe9};
LoadBalancerServerId answer;
bool decrypt_result;
EXPECT_QUIC_BUG(
decrypt_result = config->FourPassDecrypt(
absl::Span<const uint8_t>(input, sizeof(input) - 1), answer),
"Called FourPassDecrypt with a short Connection ID");
EXPECT_FALSE(decrypt_result);
QuicConnectionId encrypt_result;
EXPECT_QUIC_BUG(encrypt_result = config->FourPassEncrypt(
absl::Span<uint8_t>(input, sizeof(input))),
"Called FourPassEncrypt with a short Connection ID");
EXPECT_TRUE(encrypt_result.IsEmpty());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/load_balancer/load_balancer_config.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/load_balancer/load_balancer_config_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
93519a1a-1102-481f-8561-353d6512ea43 | cpp | tensorflow/tensorflow | random_standard_normal_custom | tensorflow/lite/kernels/random_standard_normal_custom.cc | tensorflow/lite/kernels/random_standard_normal_custom_test.cc | #include <cmath>
#include <random>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace random_standard_normal {
struct OpData {
std::default_random_engine rng;
};
namespace {
constexpr int kShapeTensor = 0;
constexpr int kOutputTensor = 0;
template <typename T>
TfLiteStatus RandomStandardNormalSample(std::default_random_engine& rng,
T* output, size_t output_size) {
std::normal_distribution<T> dist;
std::generate(output, output + output_size, [&]() { return dist(rng); });
return kTfLiteOk;
}
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return new OpData();
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, NumInputs(node) == 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* shape = GetInput(context, node, kShapeTensor);
TF_LITE_ENSURE_EQ(context, shape->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, NumDimensions(shape), 1);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (!IsConstantOrPersistentTensor(shape)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
TfLiteIntArray* output_shape;
TF_LITE_ENSURE_OK(context,
GetOutputShapeFromInput(context, shape, &output_shape));
return context->ResizeTensor(context, output, output_shape);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* params = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE(context, params != nullptr);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (IsDynamicTensor(output)) {
const TfLiteTensor* shape = GetInput(context, node, kShapeTensor);
TfLiteIntArray* output_shape;
TF_LITE_ENSURE_OK(context,
GetOutputShapeFromInput(context, shape, &output_shape));
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_shape));
}
const size_t output_size = NumElements(output);
switch (output->type) {
case kTfLiteFloat32:
RandomStandardNormalSample<float>(
params->rng, GetTensorData<float>(output), output_size);
break;
case kTfLiteFloat64:
RandomStandardNormalSample<double>(
params->rng, GetTensorData<double>(output), output_size);
break;
default:
TF_LITE_KERNEL_LOG(
context, "Unsupported output datatype for RandomStandardNormal: %s",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_RANDOM_STANDARD_NORMAL() {
static TfLiteRegistration r = {
random_standard_normal::Init, random_standard_normal::Free,
random_standard_normal::Prepare, random_standard_normal::Eval};
return &r;
}
}
}
} | #include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/custom_ops_register.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace {
template <typename T>
TensorType GetTTEnum();
template <>
TensorType GetTTEnum<float>() {
return TensorType_FLOAT32;
}
template <>
TensorType GetTTEnum<double>() {
return TensorType_FLOAT64;
}
class RandomStandardNormalOpModel : public SingleOpModel {
public:
RandomStandardNormalOpModel(const std::initializer_list<int>& input,
TensorData output, bool dynamic_input) {
if (dynamic_input) {
input_ = AddInput({TensorType_INT32, {3}});
} else {
input_ = AddConstInput(TensorType_INT32, input,
{static_cast<int>(input.size())});
}
output_ = AddOutput(output);
SetCustomOp("RandomStandardNormal", {},
ops::custom::Register_RANDOM_STANDARD_NORMAL);
BuildInterpreter({GetShape(input_)});
if (dynamic_input) {
PopulateTensor<int32_t>(input_, std::vector<int32_t>(input));
}
}
int input_;
int output_;
int input() { return input_; }
int output() { return output_; }
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
};
}
}
template <typename InputType>
struct RandomStandardNormalTest : public ::testing::Test {
using Type = InputType;
};
using TestTypes = ::testing::Types<float, double>;
TYPED_TEST_SUITE(RandomStandardNormalTest, TestTypes);
TYPED_TEST(RandomStandardNormalTest, TestOutput) {
using Type = typename TestFixture::Type;
for (const auto dynamic : {false, true}) {
tflite::RandomStandardNormalOpModel m(
{1000, 50, 5}, {tflite::GetTTEnum<Type>(), {}}, dynamic);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput<Type>();
EXPECT_EQ(output.size(), 1000 * 50 * 5);
double sum = 0;
for (auto r : output) {
sum += r;
}
double avg = sum / output.size();
ASSERT_LT(std::abs(avg), 0.05);
double sum_squared = 0;
for (auto r : output) {
sum_squared += std::pow(r - avg, 2);
}
double var = sum_squared / output.size();
EXPECT_LT(std::abs(1 - var), 0.05);
}
}
TYPED_TEST(RandomStandardNormalTest, TestOutputDistributionRange) {
using Type = typename TestFixture::Type;
tflite::RandomStandardNormalOpModel m({1000, 50, 5},
{tflite::GetTTEnum<Type>(), {}}, false);
const std::vector<Type> output_data(1000 * 50 * 5,
std::numeric_limits<Type>::infinity());
m.PopulateTensor(m.output(), output_data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput<Type>();
EXPECT_EQ(output.size(), 1000 * 50 * 5);
double sum = 0;
for (auto r : output) {
sum += r;
}
double avg = sum / output.size();
ASSERT_LT(std::abs(avg), 0.05);
double sum_squared = 0;
for (auto r : output) {
sum_squared += std::pow(r - avg, 2);
}
double var = sum_squared / output.size();
EXPECT_LT(std::abs(1 - var), 0.05);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/random_standard_normal_custom.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/random_standard_normal_custom_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aefc07d7-fcd2-4f1e-9cd4-616943a4cc7b | cpp | google/tsl | setround | tsl/platform/setround.cc | tsl/platform/setround_test.cc | #include "tsl/platform/setround.h"
#include "tsl/platform/logging.h"
namespace tsl {
namespace port {
#if defined(TF_BROKEN_CFENV)
ScopedSetRound::ScopedSetRound(const int mode) : original_mode_(mode) {
DCHECK_EQ(mode, FE_TONEAREST);
}
ScopedSetRound::~ScopedSetRound() {}
#else
ScopedSetRound::ScopedSetRound(const int mode) {
original_mode_ = std::fegetround();
if (original_mode_ < 0) {
original_mode_ = FE_TONEAREST;
}
std::fesetround(mode);
}
ScopedSetRound::~ScopedSetRound() { std::fesetround(original_mode_); }
#endif
}
} | #include "tsl/platform/setround.h"
#include <cmath>
#include "tsl/platform/test.h"
#if !defined(__clang__) || !defined(__OPTIMIZE__)
namespace tsl {
namespace {
void CheckDownward() {
EXPECT_EQ(12, std::nearbyint(12.0));
EXPECT_EQ(12, std::nearbyint(12.1));
EXPECT_EQ(-13, std::nearbyint(-12.1));
EXPECT_EQ(12, std::nearbyint(12.5));
EXPECT_EQ(12, std::nearbyint(12.9));
EXPECT_EQ(-13, std::nearbyint(-12.9));
EXPECT_EQ(13, std::nearbyint(13.0));
}
void CheckToNearest() {
EXPECT_EQ(12, std::nearbyint(12.0));
EXPECT_EQ(12, std::nearbyint(12.1));
EXPECT_EQ(-12, std::nearbyint(-12.1));
EXPECT_EQ(12, std::nearbyint(12.5));
EXPECT_EQ(13, std::nearbyint(12.9));
EXPECT_EQ(-13, std::nearbyint(-12.9));
EXPECT_EQ(13, std::nearbyint(13.0));
}
void CheckTowardZero() {
EXPECT_EQ(12, std::nearbyint(12.0));
EXPECT_EQ(12, std::nearbyint(12.1));
EXPECT_EQ(-12, std::nearbyint(-12.1));
EXPECT_EQ(12, std::nearbyint(12.5));
EXPECT_EQ(12, std::nearbyint(12.9));
EXPECT_EQ(-12, std::nearbyint(-12.9));
EXPECT_EQ(13, std::nearbyint(13.0));
}
void CheckUpward() {
EXPECT_EQ(12, std::nearbyint(12.0));
EXPECT_EQ(13, std::nearbyint(12.1));
EXPECT_EQ(-12, std::nearbyint(-12.1));
EXPECT_EQ(13, std::nearbyint(12.5));
EXPECT_EQ(13, std::nearbyint(12.9));
EXPECT_EQ(-12, std::nearbyint(-12.9));
EXPECT_EQ(13, std::nearbyint(13.0));
}
TEST(SetScopedSetRound, Downward) {
port::ScopedSetRound round(FE_DOWNWARD);
CheckDownward();
}
TEST(SetScopedSetRound, ToNearest) {
port::ScopedSetRound round(FE_TONEAREST);
CheckToNearest();
}
TEST(SetScopedSetRound, TowardZero) {
port::ScopedSetRound round(FE_TOWARDZERO);
CheckTowardZero();
}
TEST(SetScopedSetRound, Upward) {
port::ScopedSetRound round(FE_UPWARD);
CheckUpward();
}
TEST(SetScopedSetRound, Scoped) {
std::fesetround(FE_TONEAREST);
CheckToNearest();
{
port::ScopedSetRound round(FE_UPWARD);
CheckUpward();
}
CheckToNearest();
}
}
}
#endif | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/setround.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/setround_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
df71feca-e632-41c2-9e0a-72b8077361c8 | cpp | tensorflow/tensorflow | map_defun_op | tensorflow/core/kernels/data/map_defun_op.cc | tensorflow/core/kernels/data/map_defun_op_test.cc | #include "tensorflow/core/kernels/data/map_defun_op.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/util/batch_util.h"
#include "tensorflow/core/util/reffed_status_callback.h"
namespace tensorflow {
namespace data {
constexpr const char* const MapDefunOp::kArguments;
constexpr const char* const MapDefunOp::kCapturedInputs;
constexpr const char* const MapDefunOp::kTarguments;
constexpr const char* const MapDefunOp::kTcaptured;
constexpr const char* const MapDefunOp::kOutputTypes;
constexpr const char* const MapDefunOp::kOutputShapes;
constexpr const char* const MapDefunOp::kFunc;
constexpr const char* const MapDefunOp::kMaxIntraOpParallelism;
constexpr char kOutput[] = "output";
struct MapDefunOp::ComputeOptions {
OpInputList args;
const std::vector<TensorShape> arg_shapes;
OpInputList captured_inputs;
const int64_t batch_size;
std::function<void(std::function<void()>)> runner;
std::vector<PartialTensorShape> output_shapes TF_GUARDED_BY(mu);
OpOutputList output TF_GUARDED_BY(mu);
mutex mu;
ComputeOptions(OpKernelContext* ctx, OpInputList args,
OpInputList captured_inputs,
std::vector<TensorShape> arg_shapes, int64_t batch_size,
const std::vector<PartialTensorShape>& output_shapes_attr,
int max_parallelism)
: args(args),
arg_shapes(std::move(arg_shapes)),
captured_inputs(captured_inputs),
batch_size(batch_size),
output_shapes(output_shapes_attr) {
if (max_parallelism >= 1) {
runner = RunnerWithMaxParallelism(*ctx->runner(), max_parallelism);
}
}
};
class MapDefunOp::MapFunctionCallFrame : public CallFrameInterface {
public:
MapFunctionCallFrame(ComputeOptions* compute_opts, OpKernel* kernel,
size_t iter)
: compute_opts_(compute_opts),
kernel_(kernel),
iter_(iter),
sliced_args_(compute_opts_->args.size()) {}
~MapFunctionCallFrame() override = default;
size_t num_args() const override {
return compute_opts_->args.size() + compute_opts_->captured_inputs.size();
}
size_t num_retvals() const override {
return static_cast<size_t>(kernel_->num_outputs());
}
Status GetArg(int index, const Tensor** val) override {
if (index < 0 || index >= compute_opts_->args.size() +
compute_opts_->captured_inputs.size()) {
return errors::InvalidArgument("Mismatch in number of function inputs.");
}
if (index >= compute_opts_->args.size()) {
*val =
&compute_opts_->captured_inputs[index - compute_opts_->args.size()];
return absl::OkStatus();
}
mutex_lock l(mu_);
bool result = sliced_args_[index].CopyFrom(
compute_opts_->args[index].Slice(iter_, iter_ + 1),
compute_opts_->arg_shapes.at(index));
if (!result) {
return errors::Internal("GetArg failed.");
} else if (!sliced_args_[index].IsAligned()) {
sliced_args_[index] = tensor::DeepCopy(sliced_args_[index]);
}
*val = &sliced_args_[index];
return absl::OkStatus();
}
Status SetRetval(int index, const Tensor& val) override {
if (index < 0 || index >= kernel_->num_outputs()) {
return errors::InvalidArgument("Mismatch in number of function outputs.");
}
if (val.dtype() != kernel_->output_type(index)) {
return errors::InvalidArgument(
"Mismatch in function return type and expected output type for "
"output: ",
index);
}
Tensor* out;
{
mutex_lock l(compute_opts_->mu);
if (!compute_opts_->output_shapes.at(index).IsCompatibleWith(
val.shape())) {
return errors::InvalidArgument(
"Mismatch in function retval shape, ", val.shape(),
", and expected output shape, ",
compute_opts_->output_shapes.at(index).DebugString(), ".");
}
if (!compute_opts_->output_shapes.at(index).IsFullyDefined()) {
compute_opts_->output_shapes.at(index) = val.shape();
TensorShape actual_shape = val.shape();
actual_shape.InsertDim(0, compute_opts_->batch_size);
TF_RETURN_IF_ERROR(
compute_opts_->output.allocate(index, actual_shape, &out));
} else {
out = (compute_opts_->output)[index];
}
}
return batch_util::CopyElementToSlice(val, out, iter_);
}
private:
ComputeOptions* const compute_opts_;
const OpKernel* kernel_;
const size_t iter_;
mutex mu_;
std::vector<Tensor> sliced_args_ TF_GUARDED_BY(mu_);
};
MapDefunOp::MapDefunOp(OpKernelConstruction* ctx) : AsyncOpKernel(ctx) {
auto func_lib = ctx->function_library();
OP_REQUIRES(ctx, func_lib != nullptr,
errors::Internal("No function library."));
const NameAttrList* func;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kFunc, &func));
OP_REQUIRES_OK(ctx,
func_lib->Instantiate(func->name(), AttrSlice(&func->attr()),
&func_handle_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES_OK(
ctx, ctx->GetAttr(kMaxIntraOpParallelism, &max_intra_op_parallelism_));
OP_REQUIRES(ctx, ctx->num_inputs() >= 0,
errors::InvalidArgument("Must have at least one input."));
OP_REQUIRES(ctx, ctx->num_outputs() >= 0,
errors::InvalidArgument("Must have at least one output."));
OP_REQUIRES(ctx, ctx->num_outputs() == output_shapes_.size(),
errors::InvalidArgument(
"Length of output_shapes and output_types must match."));
}
void MapDefunOp::ComputeAsync(OpKernelContext* ctx, DoneCallback done) {
ComputeOptions* compute_opts = nullptr;
OP_REQUIRES_OK_ASYNC(ctx, SetupArgs(ctx, &compute_opts), done);
Status s = SetupOutputs(ctx, compute_opts);
if (!s.ok()) delete compute_opts;
OP_REQUIRES_OK_ASYNC(ctx, s, done);
FunctionLibraryRuntime::Options opts;
SetRunOptions(ctx, &opts, compute_opts, false);
StatusCallback callback = std::bind(
[](OpKernelContext* ctx, ComputeOptions* compute_opts, DoneCallback& done,
const Status& status) {
delete compute_opts;
ctx->SetStatus(status);
done();
},
ctx, compute_opts, std::move(done), std::placeholders::_1);
auto* refcounted = new ReffedStatusCallback(std::move(callback));
CancellationManager* parent_mgr = ctx->cancellation_manager();
for (size_t i = 0; i < static_cast<size_t>(compute_opts->batch_size); ++i) {
CancellationManager* c_mgr = new CancellationManager(parent_mgr);
opts.cancellation_manager = c_mgr;
auto* call_frame = new MapFunctionCallFrame(compute_opts, this, i);
refcounted->Ref();
ctx->function_library()->Run(
opts, func_handle_, call_frame,
[call_frame, refcounted, c_mgr](const Status& func_status) {
delete c_mgr;
delete call_frame;
refcounted->UpdateStatus(func_status);
refcounted->Unref();
});
}
refcounted->Unref();
}
void MapDefunOp::SetRunOptions(OpKernelContext* ctx,
FunctionLibraryRuntime::Options* opts,
ComputeOptions* compute_opts,
bool always_collect_stats) {
opts->rendezvous = ctx->rendezvous();
if (always_collect_stats) {
opts->stats_collector = ctx->stats_collector();
}
if (max_intra_op_parallelism_ >= 1) {
opts->runner = &compute_opts->runner;
} else {
opts->runner = ctx->runner();
}
opts->run_all_kernels_inline = ctx->run_all_kernels_inline();
}
Status MapDefunOp::SetupArgs(OpKernelContext* ctx,
ComputeOptions** compute_opts) {
OpInputList arguments;
TF_RETURN_IF_ERROR(ctx->input_list(kArguments, &arguments));
OpInputList captured_inputs;
TF_RETURN_IF_ERROR(ctx->input_list(kCapturedInputs, &captured_inputs));
int64_t batch_size = arguments[0].dims() > 0 ? arguments[0].dim_size(0) : -1;
for (size_t i = 0; i < arguments.size(); ++i) {
if (arguments[i].dims() == 0) {
return errors::InvalidArgument(
"All inputs must have rank at least 1. Input ", i,
" has a rank of 0.");
} else if (arguments[i].dim_size(0) != batch_size) {
return errors::InvalidArgument(
"All inputs must have the same dimension 0. Input ", i,
" has leading dimension ", ctx->input(i).dim_size(0),
", while all previous inputs have leading dimension ", batch_size);
}
}
std::vector<TensorShape> arg_shapes;
arg_shapes.reserve(arguments.size());
for (size_t i = 0; i < arguments.size(); ++i) {
arg_shapes.push_back(arguments[i].shape());
arg_shapes.at(i).RemoveDim(0);
}
*compute_opts =
new ComputeOptions(ctx, arguments, captured_inputs, std::move(arg_shapes),
batch_size, output_shapes_, max_intra_op_parallelism_);
return absl::OkStatus();
}
Status MapDefunOp::SetupOutputs(OpKernelContext* ctx, ComputeOptions* opts) {
mutex_lock l(opts->mu);
TF_RETURN_IF_ERROR(ctx->output_list(kOutput, &opts->output));
for (size_t i = 0; i < output_types().size(); ++i) {
if (output_shapes_.at(i).IsFullyDefined()) {
Tensor* out = nullptr;
TensorShape output_shape;
output_shapes_.at(i).AsTensorShape(&output_shape);
output_shape.InsertDim(0, opts->batch_size);
TF_RETURN_IF_ERROR(opts->output.allocate(i, output_shape, &out));
}
}
return absl::OkStatus();
}
namespace {
REGISTER_KERNEL_BUILDER(Name("MapDefun").Device(DEVICE_CPU), MapDefunOp);
}
}
} | #include "tensorflow/core/kernels/data/map_defun_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "map_defun";
constexpr char kOpName[] = "MapDefun";
class MapDefunOpParams : public DatasetParams {
public:
MapDefunOpParams(std::vector<Tensor> arguments,
std::vector<Tensor> captured_inputs,
DataTypeVector type_arguments, DataTypeVector type_captured,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib,
int max_intra_op_parallelism, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
arguments_(std::move(arguments)),
captured_inputs_(std::move(captured_inputs)),
type_arguments_(std::move(type_arguments)),
type_captured_(std::move(type_captured)),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
max_intra_op_parallelism_(max_intra_op_parallelism) {}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors = arguments_;
input_tensors.insert(input_tensors.end(), captured_inputs_.begin(),
captured_inputs_.end());
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->reserve(arguments_.size() + captured_inputs_.size());
for (int i = 0; i < arguments_.size(); ++i) {
input_names->emplace_back(
strings::StrCat(MapDefunOp::kArguments, "_", i));
}
for (int i = 0; i < captured_inputs_.size(); ++i) {
input_names->emplace_back(
strings::StrCat(MapDefunOp::kCapturedInputs, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {
{MapDefunOp::kTarguments, type_arguments_},
{MapDefunOp::kTcaptured, type_captured_},
{MapDefunOp::kOutputShapes, output_shapes_},
{MapDefunOp::kOutputTypes, output_dtypes_},
{MapDefunOp::kFunc, func_},
{MapDefunOp::kMaxIntraOpParallelism, max_intra_op_parallelism_}};
return absl::OkStatus();
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
string dataset_type() const override { return "MapDef"; }
private:
std::vector<Tensor> arguments_;
std::vector<Tensor> captured_inputs_;
DataTypeVector type_arguments_;
DataTypeVector type_captured_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
int max_intra_op_parallelism_;
};
class MapDefunOpTest : public DatasetOpsTestBase {
protected:
Status CreateMapDefunOpKernel(const MapDefunOpParams& params,
std::unique_ptr<OpKernel>* map_defun_kernel) {
std::vector<string> input_namess;
TF_RETURN_IF_ERROR(params.GetInputNames(&input_namess));
AttributeVector attributes;
TF_RETURN_IF_ERROR(params.GetAttributes(&attributes));
NodeDef node_def =
test::function::NDef(kNodeName, kOpName, input_namess, attributes);
TF_RETURN_IF_ERROR(CreateOpKernel(node_def, map_defun_kernel));
return absl::OkStatus();
}
Status CreateMapDefunContext(
OpKernel* const op_kernel,
absl::InlinedVector<TensorValue, 4UL>* const inputs,
std::unique_ptr<OpKernelContext>* context) {
TF_RETURN_IF_ERROR(CheckOpKernelInput(*op_kernel, *inputs));
TF_RETURN_IF_ERROR(CreateOpKernelContext(op_kernel, inputs, context));
return absl::OkStatus();
}
};
struct TestCase {
MapDefunOpParams map_defun_op_params;
std::vector<Tensor> expected_outputs;
};
TestCase TestCase1() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5})},
{},
{DT_INT64},
{},
{DT_INT64},
{PartialTensorShape({2})},
{FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_INT64}})},
{test::function::XTimesTwo()},
2, kNodeName),
{CreateTensor<int64_t>(TensorShape({3, 2}), {0, 2, 4, 6, 8, 10})}};
}
TestCase TestCase2() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 10, 20, 30, 40, 50})},
{},
{DT_INT64, DT_INT64},
{},
{DT_INT64},
{PartialTensorShape({2})},
{FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}})},
{test::function::XAddY()},
2, kNodeName),
{CreateTensor<int64_t>(TensorShape({3, 2}), {0, 11, 22, 33, 44, 55})}};
}
TestCase TestCase3() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5})},
{CreateTensor<int64_t>(TensorShape({2}), {10, 100})},
{DT_INT64},
{DT_INT64},
{DT_INT64},
{PartialTensorShape({2})},
{FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}})},
{test::function::XAddY()},
2, kNodeName),
{CreateTensor<int64_t>(TensorShape({3, 2}),
{10, 101, 12, 103, 14, 105})}};
}
TestCase InvalidOutputTypes() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5})},
{CreateTensor<int64_t>(TensorShape({2}), {10, 100})},
{DT_INT64},
{DT_INT64},
{DT_FLOAT},
{PartialTensorShape({2})},
{FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}})},
{test::function::XAddY()},
2, kNodeName),
{}};
}
TestCase InvalidOutputShapes() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5})},
{CreateTensor<int64_t>(TensorShape({2}), {10, 100})},
{DT_INT64},
{DT_INT64},
{DT_INT64},
{PartialTensorShape({2, 2})},
{FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}})},
{test::function::XAddY()},
2, kNodeName),
{}};
}
TestCase InvalidInputs() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5}),
CreateTensor<int64_t>(TensorShape({2, 2}),
{0, 1, 2, 3})},
{},
{DT_INT64, DT_INT64},
{},
{DT_INT64},
{PartialTensorShape({2})},
{FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}})},
{test::function::XAddY()},
2, kNodeName),
{}};
}
class ParameterizedMapDefunOpTest
: public MapDefunOpTest,
public ::testing::WithParamInterface<TestCase> {};
TEST_P(ParameterizedMapDefunOpTest, NormalTests) {
TestCase test_case = GetParam();
TF_ASSERT_OK(InitializeRuntime(test_case.map_defun_op_params));
auto input_tensors = test_case.map_defun_op_params.GetInputTensors();
absl::InlinedVector<TensorValue, 4UL> input_values;
for (auto& input : input_tensors) {
input_values.push_back(TensorValue(&input));
}
std::unique_ptr<OpKernel> map_defun_kernel;
TF_ASSERT_OK(
CreateMapDefunOpKernel(test_case.map_defun_op_params, &map_defun_kernel));
std::unique_ptr<OpKernelContext> context;
TF_ASSERT_OK(
CreateMapDefunContext(map_defun_kernel.get(), &input_values, &context));
TF_ASSERT_OK(RunOpKernel(map_defun_kernel.get(), context.get()));
EXPECT_EQ(context->num_outputs(), test_case.expected_outputs.size());
for (int i = 0; i < context->num_outputs(); ++i) {
TF_EXPECT_OK(ExpectEqual(*context->mutable_output(i),
test_case.expected_outputs[i]));
}
}
INSTANTIATE_TEST_SUITE_P(MapDefunOpTest, ParameterizedMapDefunOpTest,
::testing::ValuesIn(std::vector<TestCase>(
{TestCase1(), TestCase2(), TestCase3()})));
TEST_F(MapDefunOpTest, InvalidArguments) {
std::vector<TestCase> test_cases = {InvalidOutputTypes(),
InvalidOutputShapes(), InvalidInputs()};
for (auto& test_case : test_cases) {
TF_ASSERT_OK(InitializeRuntime(test_case.map_defun_op_params));
auto input_tensors = test_case.map_defun_op_params.GetInputTensors();
absl::InlinedVector<TensorValue, 4UL> input_values;
for (auto& input : input_tensors) {
input_values.push_back(TensorValue(&input));
}
std::unique_ptr<OpKernel> map_defun_kernel;
TF_ASSERT_OK(CreateMapDefunOpKernel(test_case.map_defun_op_params,
&map_defun_kernel));
std::unique_ptr<OpKernelContext> context;
TF_ASSERT_OK(
CreateMapDefunContext(map_defun_kernel.get(), &input_values, &context));
EXPECT_EQ(RunOpKernel(map_defun_kernel.get(), context.get()).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/map_defun_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/map_defun_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3700447d-c783-4304-81d0-298c510d58bf | cpp | google/quiche | quic_packets | quiche/quic/core/quic_packets.cc | quiche/quic/core/quic_packets_test.cc | #include "quiche/quic/core/quic_packets.h"
#include <algorithm>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_flags.h"
namespace quic {
QuicConnectionId GetServerConnectionIdAsRecipient(
const QuicPacketHeader& header, Perspective perspective) {
if (perspective == Perspective::IS_SERVER) {
return header.destination_connection_id;
}
return header.source_connection_id;
}
QuicConnectionId GetClientConnectionIdAsRecipient(
const QuicPacketHeader& header, Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.destination_connection_id;
}
return header.source_connection_id;
}
QuicConnectionId GetServerConnectionIdAsSender(const QuicPacketHeader& header,
Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.destination_connection_id;
}
return header.source_connection_id;
}
QuicConnectionIdIncluded GetServerConnectionIdIncludedAsSender(
const QuicPacketHeader& header, Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.destination_connection_id_included;
}
return header.source_connection_id_included;
}
QuicConnectionId GetClientConnectionIdAsSender(const QuicPacketHeader& header,
Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.source_connection_id;
}
return header.destination_connection_id;
}
QuicConnectionIdIncluded GetClientConnectionIdIncludedAsSender(
const QuicPacketHeader& header, Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.source_connection_id_included;
}
return header.destination_connection_id_included;
}
uint8_t GetIncludedConnectionIdLength(
QuicConnectionId connection_id,
QuicConnectionIdIncluded connection_id_included) {
QUICHE_DCHECK(connection_id_included == CONNECTION_ID_PRESENT ||
connection_id_included == CONNECTION_ID_ABSENT);
return connection_id_included == CONNECTION_ID_PRESENT
? connection_id.length()
: 0;
}
uint8_t GetIncludedDestinationConnectionIdLength(
const QuicPacketHeader& header) {
return GetIncludedConnectionIdLength(
header.destination_connection_id,
header.destination_connection_id_included);
}
uint8_t GetIncludedSourceConnectionIdLength(const QuicPacketHeader& header) {
return GetIncludedConnectionIdLength(header.source_connection_id,
header.source_connection_id_included);
}
size_t GetPacketHeaderSize(QuicTransportVersion version,
const QuicPacketHeader& header) {
return GetPacketHeaderSize(
version, GetIncludedDestinationConnectionIdLength(header),
GetIncludedSourceConnectionIdLength(header), header.version_flag,
header.nonce != nullptr, header.packet_number_length,
header.retry_token_length_length, header.retry_token.length(),
header.length_length);
}
size_t GetPacketHeaderSize(
QuicTransportVersion version, uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool include_version,
bool include_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length) {
if (include_version) {
size_t size = kPacketHeaderTypeSize + kConnectionIdLengthSize +
destination_connection_id_length +
source_connection_id_length + packet_number_length +
kQuicVersionSize;
if (include_diversification_nonce) {
size += kDiversificationNonceSize;
}
if (VersionHasLengthPrefixedConnectionIds(version)) {
size += kConnectionIdLengthSize;
}
QUICHE_DCHECK(
QuicVersionHasLongHeaderLengths(version) ||
retry_token_length_length + retry_token_length + length_length == 0);
if (QuicVersionHasLongHeaderLengths(version)) {
size += retry_token_length_length + retry_token_length + length_length;
}
return size;
}
return kPacketHeaderTypeSize + destination_connection_id_length +
packet_number_length;
}
size_t GetStartOfEncryptedData(QuicTransportVersion version,
const QuicPacketHeader& header) {
return GetPacketHeaderSize(version, header);
}
size_t GetStartOfEncryptedData(
QuicTransportVersion version, uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool include_version,
bool include_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length) {
return GetPacketHeaderSize(
version, destination_connection_id_length, source_connection_id_length,
include_version, include_diversification_nonce, packet_number_length,
retry_token_length_length, retry_token_length, length_length);
}
QuicPacketHeader::QuicPacketHeader()
: destination_connection_id(EmptyQuicConnectionId()),
destination_connection_id_included(CONNECTION_ID_PRESENT),
source_connection_id(EmptyQuicConnectionId()),
source_connection_id_included(CONNECTION_ID_ABSENT),
reset_flag(false),
version_flag(false),
has_possible_stateless_reset_token(false),
packet_number_length(PACKET_4BYTE_PACKET_NUMBER),
type_byte(0),
version(UnsupportedQuicVersion()),
nonce(nullptr),
form(GOOGLE_QUIC_PACKET),
long_packet_type(INITIAL),
possible_stateless_reset_token({}),
retry_token_length_length(quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0),
retry_token(absl::string_view()),
length_length(quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0),
remaining_packet_length(0) {}
QuicPacketHeader::QuicPacketHeader(const QuicPacketHeader& other) = default;
QuicPacketHeader::~QuicPacketHeader() {}
QuicPacketHeader& QuicPacketHeader::operator=(const QuicPacketHeader& other) =
default;
QuicPublicResetPacket::QuicPublicResetPacket()
: connection_id(EmptyQuicConnectionId()), nonce_proof(0) {}
QuicPublicResetPacket::QuicPublicResetPacket(QuicConnectionId connection_id)
: connection_id(connection_id), nonce_proof(0) {}
QuicVersionNegotiationPacket::QuicVersionNegotiationPacket()
: connection_id(EmptyQuicConnectionId()) {}
QuicVersionNegotiationPacket::QuicVersionNegotiationPacket(
QuicConnectionId connection_id)
: connection_id(connection_id) {}
QuicVersionNegotiationPacket::QuicVersionNegotiationPacket(
const QuicVersionNegotiationPacket& other) = default;
QuicVersionNegotiationPacket::~QuicVersionNegotiationPacket() {}
QuicIetfStatelessResetPacket::QuicIetfStatelessResetPacket()
: stateless_reset_token({}) {}
QuicIetfStatelessResetPacket::QuicIetfStatelessResetPacket(
const QuicPacketHeader& header, StatelessResetToken token)
: header(header), stateless_reset_token(token) {}
QuicIetfStatelessResetPacket::QuicIetfStatelessResetPacket(
const QuicIetfStatelessResetPacket& other) = default;
QuicIetfStatelessResetPacket::~QuicIetfStatelessResetPacket() {}
std::ostream& operator<<(std::ostream& os, const QuicPacketHeader& header) {
os << "{ destination_connection_id: " << header.destination_connection_id
<< " ("
<< (header.destination_connection_id_included == CONNECTION_ID_PRESENT
? "present"
: "absent")
<< "), source_connection_id: " << header.source_connection_id << " ("
<< (header.source_connection_id_included == CONNECTION_ID_PRESENT
? "present"
: "absent")
<< "), packet_number_length: "
<< static_cast<int>(header.packet_number_length)
<< ", reset_flag: " << header.reset_flag
<< ", version_flag: " << header.version_flag;
if (header.version_flag) {
os << ", version: " << ParsedQuicVersionToString(header.version);
if (header.long_packet_type != INVALID_PACKET_TYPE) {
os << ", long_packet_type: "
<< QuicUtils::QuicLongHeaderTypetoString(header.long_packet_type);
}
if (header.retry_token_length_length !=
quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0) {
os << ", retry_token_length_length: "
<< static_cast<int>(header.retry_token_length_length);
}
if (header.retry_token.length() != 0) {
os << ", retry_token_length: " << header.retry_token.length();
}
if (header.length_length != quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0) {
os << ", length_length: " << static_cast<int>(header.length_length);
}
if (header.remaining_packet_length != 0) {
os << ", remaining_packet_length: " << header.remaining_packet_length;
}
}
if (header.nonce != nullptr) {
os << ", diversification_nonce: "
<< absl::BytesToHexString(
absl::string_view(header.nonce->data(), header.nonce->size()));
}
os << ", packet_number: " << header.packet_number << " }\n";
return os;
}
QuicData::QuicData(const char* buffer, size_t length)
: buffer_(buffer), length_(length), owns_buffer_(false) {}
QuicData::QuicData(const char* buffer, size_t length, bool owns_buffer)
: buffer_(buffer), length_(length), owns_buffer_(owns_buffer) {}
QuicData::QuicData(absl::string_view packet_data)
: buffer_(packet_data.data()),
length_(packet_data.length()),
owns_buffer_(false) {}
QuicData::~QuicData() {
if (owns_buffer_) {
delete[] const_cast<char*>(buffer_);
}
}
QuicPacket::QuicPacket(
char* buffer, size_t length, bool owns_buffer,
uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool includes_version,
bool includes_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length)
: QuicData(buffer, length, owns_buffer),
buffer_(buffer),
destination_connection_id_length_(destination_connection_id_length),
source_connection_id_length_(source_connection_id_length),
includes_version_(includes_version),
includes_diversification_nonce_(includes_diversification_nonce),
packet_number_length_(packet_number_length),
retry_token_length_length_(retry_token_length_length),
retry_token_length_(retry_token_length),
length_length_(length_length) {}
QuicPacket::QuicPacket(QuicTransportVersion , char* buffer,
size_t length, bool owns_buffer,
const QuicPacketHeader& header)
: QuicPacket(buffer, length, owns_buffer,
GetIncludedDestinationConnectionIdLength(header),
GetIncludedSourceConnectionIdLength(header),
header.version_flag, header.nonce != nullptr,
header.packet_number_length, header.retry_token_length_length,
header.retry_token.length(), header.length_length) {}
QuicEncryptedPacket::QuicEncryptedPacket(const char* buffer, size_t length)
: QuicData(buffer, length) {}
QuicEncryptedPacket::QuicEncryptedPacket(const char* buffer, size_t length,
bool owns_buffer)
: QuicData(buffer, length, owns_buffer) {}
QuicEncryptedPacket::QuicEncryptedPacket(absl::string_view data)
: QuicData(data) {}
std::unique_ptr<QuicEncryptedPacket> QuicEncryptedPacket::Clone() const {
char* buffer = new char[this->length()];
std::copy(this->data(), this->data() + this->length(), buffer);
return std::make_unique<QuicEncryptedPacket>(buffer, this->length(), true);
}
std::ostream& operator<<(std::ostream& os, const QuicEncryptedPacket& s) {
os << s.length() << "-byte data";
return os;
}
QuicReceivedPacket::QuicReceivedPacket(const char* buffer, size_t length,
QuicTime receipt_time)
: QuicReceivedPacket(buffer, length, receipt_time,
false ) {}
QuicReceivedPacket::QuicReceivedPacket(const char* buffer, size_t length,
QuicTime receipt_time, bool owns_buffer)
: QuicReceivedPacket(buffer, length, receipt_time, owns_buffer, 0 ,
true ) {}
QuicReceivedPacket::QuicReceivedPacket(const char* buffer, size_t length,
QuicTime receipt_time, bool owns_buffer,
int ttl, bool ttl_valid)
: quic::QuicReceivedPacket(buffer, length, receipt_time, owns_buffer, ttl,
ttl_valid, nullptr ,
0 ,
false , ECN_NOT_ECT) {}
QuicReceivedPacket::QuicReceivedPacket(const char* buffer, size_t length,
QuicTime receipt_time, bool owns_buffer,
int ttl, bool ttl_valid,
char* packet_headers,
size_t headers_length,
bool owns_header_buffer)
: quic::QuicReceivedPacket(buffer, length, receipt_time, owns_buffer, ttl,
ttl_valid, packet_headers, headers_length,
owns_header_buffer, ECN_NOT_ECT) {}
QuicReceivedPacket::QuicReceivedPacket(
const char* buffer, size_t length, QuicTime receipt_time, bool owns_buffer,
int ttl, bool ttl_valid, char* packet_headers, size_t headers_length,
bool owns_header_buffer, QuicEcnCodepoint ecn_codepoint)
: QuicEncryptedPacket(buffer, length, owns_buffer),
receipt_time_(receipt_time),
ttl_(ttl_valid ? ttl : -1),
packet_headers_(packet_headers),
headers_length_(headers_length),
owns_header_buffer_(owns_header_buffer),
ecn_codepoint_(ecn_codepoint) {}
QuicReceivedPacket::~QuicReceivedPacket() {
if (owns_header_buffer_) {
delete[] static_cast<char*>(packet_headers_);
}
}
std::unique_ptr<QuicReceivedPacket> QuicReceivedPacket::Clone() const {
char* buffer = new char[this->length()];
memcpy(buffer, this->data(), this->length());
if (this->packet_headers()) {
char* headers_buffer = new char[this->headers_length()];
memcpy(headers_buffer, this->packet_headers(), this->headers_length());
return std::make_unique<QuicReceivedPacket>(
buffer, this->length(), receipt_time(), true, ttl(), ttl() >= 0,
headers_buffer, this->headers_length(), true, this->ecn_codepoint());
}
return std::make_unique<QuicReceivedPacket>(
buffer, this->length(), receipt_time(), true, ttl(), ttl() >= 0, nullptr,
0, false, this->ecn_codepoint());
}
std::ostream& operator<<(std::ostream& os, const QuicReceivedPacket& s) {
os << s.length() << "-byte data";
return os;
}
absl::string_view QuicPacket::AssociatedData(
QuicTransportVersion version) const {
return absl::string_view(
data(),
GetStartOfEncryptedData(version, destination_connection_id_length_,
source_connection_id_length_, includes_version_,
includes_diversification_nonce_,
packet_number_length_, retry_token_length_length_,
retry_token_length_, length_length_));
}
absl::string_view QuicPacket::Plaintext(QuicTransportVersion version) const {
const size_t start_of_encrypted_data = GetStartOfEncryptedData(
version, destination_connection_id_length_, source_connection_id_length_,
includes_version_, includes_diversification_nonce_, packet_number_length_,
retry_token_length_length_, retry_token_length_, length_length_);
return absl::string_view(data() + start_of_encrypted_data,
length() - start_of_encrypted_data);
}
SerializedPacket::SerializedPacket(QuicPacketNumber packet_number,
QuicPacketNumberLength packet_number_length,
const char* encrypted_buffer,
QuicPacketLength encrypted_length,
bool has_ack, bool has_stop_waiting)
: encrypted_buffer(encrypted_buffer),
encrypted_length(encrypted_length),
has_crypto_handshake(NOT_HANDSHAKE),
packet_number(packet_number),
packet_number_length(packet_number_length),
encryption_level(ENCRYPTION_INITIAL),
has_ack(has_ack),
has_stop_waiting(has_stop_waiting),
transmission_type(NOT_RETRANSMISSION),
has_ack_frame_copy(false),
has_ack_frequency(false),
has_message(false),
fate(SEND_TO_WRITER) {}
SerializedPacket::SerializedPacket(SerializedPacket&& other)
: has_crypto_handshake(other.has_crypto_handshake),
packet_number(other.packet_number),
packet_number_length(other.packet_number_length),
encryption_level(other.encryption_level),
has_ack(other.has_ack),
has_stop_waiting(other.has_stop_waiting),
has_ack_ecn(other.has_ack_ecn),
transmission_type(other.transmission_type),
largest_acked(other.largest_acked),
has_ack_frame_copy(other.has_ack_frame_copy),
has_ack_frequency(other.has_ack_frequency),
has_message(other.has_message),
fate(other.fate),
peer_address(other.peer_address),
bytes_not_retransmitted(other.bytes_not_retransmitted),
initial_header(other.initial_header) {
if (this != &other) {
if (release_encrypted_buffer && encrypted_buffer != nullptr) {
release_encrypted_buffer(encrypted_buffer);
}
encrypted_buffer = other.encrypted_buffer;
encrypted_length = other.encrypted_length;
release_encrypted_buffer = std::move(other.release_encrypted_buffer);
other.release_encrypted_buffer = nullptr;
retransmittable_frames.swap(other.retransmittable_frames);
nonretransmittable_frames.swap(other.nonretransmittable_frames);
}
}
SerializedPacket::~SerializedPacket() {
if (release_encrypted_buffer && encrypted_buffer != nullptr) {
release_encrypted_buffer(encrypted_buffer);
}
if (!retransmittable_frames.empty()) {
DeleteFrames(&retransmittable_frames);
}
for (auto& frame : nonretransmittable_frames) {
if (!has_ack_frame_copy && frame.type == ACK_FRAME) {
continue;
}
DeleteFrame(&frame);
}
}
SerializedPacket* CopySerializedPacket(const SerializedPacket& serialized,
quiche::QuicheBufferAllocator* allocator,
bool copy_buffer) {
SerializedPacket* copy = new SerializedPacket(
serialized.packet_number, serialized.packet_number_length,
serialized.encrypted_buffer, serialized.encrypted_length,
serialized.has_ack, serialized.has_stop_waiting);
copy->has_crypto_handshake = serialized.has_crypto_handshake;
copy->encryption_level = serialized.encryption_level;
copy->transmission_type = serialized.transmission_type;
copy->largest_acked = serialized.largest_acked;
copy->has_ack_frequency = serialized.has_ack_frequency;
copy->has_message = serialized.has_message;
copy->fate = serialized.fate;
copy->peer_address = serialized.peer_address;
copy->bytes_not_retransmitted = serialized.bytes_not_retransmitted;
copy->initial_header = serialized.initial_header;
copy->has_ack_ecn = serialized.has_ack_ecn;
if (copy_buffer) {
copy->encrypted_buffer = CopyBuffer(serialized);
copy->release_encrypted_buffer = [](const char* p) { delete[] p; };
}
copy->retransmittable_frames =
CopyQuicFrames(allocator, serialized.retransmittable_frames);
QUICHE_DCHECK(copy->nonretransmittable_frames.empty());
for (const auto& frame : serialized.nonretransmittable_frames) {
if (frame.type == ACK_FRAME) {
copy->has_ack_frame_copy = true;
}
copy->nonretransmittable_frames.push_back(CopyQuicFrame(allocator, frame));
}
return copy;
}
char* CopyBuffer(const SerializedPacket& packet) {
return CopyBuffer(packet.encrypted_buffer, packet.encrypted_length);
}
char* CopyBuffer(const char* encrypted_buffer,
QuicPacketLength encrypted_length) {
char* dst_buffer = new char[encrypted_length];
memcpy(dst_buffer, encrypted_buffer, encrypted_length);
return dst_buffer;
}
ReceivedPacketInfo::ReceivedPacketInfo(const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
const QuicReceivedPacket& packet)
: self_address(self_address),
peer_address(peer_address),
packet(packet),
form(GOOGLE_QUIC_PACKET),
long_packet_type(INVALID_PACKET_TYPE),
version_flag(false),
use_length_prefix(false),
version_label(0),
version(ParsedQuicVersion::Unsupported()),
destination_connection_id(EmptyQuicConnectionId()),
source_connection_id(EmptyQuicConnectionId()) {}
ReceivedPacketInfo::~ReceivedPacketInfo() {}
std::string ReceivedPacketInfo::ToString() const {
std::string output =
absl::StrCat("{ self_address: ", self_address.ToString(),
", peer_address: ", peer_address.ToString(),
", packet_length: ", packet.length(),
", header_format: ", form, ", version_flag: ", version_flag);
if (version_flag) {
absl::StrAppend(&output, ", version: ", ParsedQuicVersionToString(version));
}
absl::StrAppend(
&output,
", destination_connection_id: ", destination_connection_id.ToString(),
", source_connection_id: ", source_connection_id.ToString(), " }\n");
return output;
}
std::ostream& operator<<(std::ostream& os,
const ReceivedPacketInfo& packet_info) {
os << packet_info.ToString();
return os;
}
bool QuicPacketHeader::operator==(const QuicPacketHeader& other) const {
return destination_connection_id == other.destination_connection_id &&
destination_connection_id_included ==
other.destination_connection_id_included &&
source_connection_id == other.source_connection_id &&
source_connection_id_included == other.source_connection_id_included &&
reset_flag == other.reset_flag && version_flag == other.version_flag &&
has_possible_stateless_reset_token ==
other.has_possible_stateless_reset_token &&
packet_number_length == other.packet_number_length &&
type_byte == other.type_byte && version == other.version &&
nonce == other.nonce &&
((!packet_number.IsInitialized() &&
!other.packet_number.IsInitialized()) ||
(packet_number.IsInitialized() &&
other.packet_number.IsInitialized() &&
packet_number == other.packet_number)) &&
form == other.form && long_packet_type == other.long_packet_type &&
possible_stateless_reset_token ==
other.possible_stateless_reset_token &&
retry_token_length_length == other.retry_token_length_length &&
retry_token == other.retry_token &&
length_length == other.length_length &&
remaining_packet_length == other.remaining_packet_length;
}
bool QuicPacketHeader::operator!=(const QuicPacketHeader& other) const {
return !operator==(other);
}
} | #include "quiche/quic/core/quic_packets.h"
#include <memory>
#include <string>
#include "absl/memory/memory.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quic {
namespace test {
namespace {
QuicPacketHeader CreateFakePacketHeader() {
QuicPacketHeader header;
header.destination_connection_id = TestConnectionId(1);
header.destination_connection_id_included = CONNECTION_ID_PRESENT;
header.source_connection_id = TestConnectionId(2);
header.source_connection_id_included = CONNECTION_ID_ABSENT;
return header;
}
class QuicPacketsTest : public QuicTest {};
TEST_F(QuicPacketsTest, GetServerConnectionIdAsRecipient) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(TestConnectionId(1),
GetServerConnectionIdAsRecipient(header, Perspective::IS_SERVER));
EXPECT_EQ(TestConnectionId(2),
GetServerConnectionIdAsRecipient(header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetServerConnectionIdAsSender) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(TestConnectionId(2),
GetServerConnectionIdAsSender(header, Perspective::IS_SERVER));
EXPECT_EQ(TestConnectionId(1),
GetServerConnectionIdAsSender(header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetServerConnectionIdIncludedAsSender) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(CONNECTION_ID_ABSENT, GetServerConnectionIdIncludedAsSender(
header, Perspective::IS_SERVER));
EXPECT_EQ(CONNECTION_ID_PRESENT, GetServerConnectionIdIncludedAsSender(
header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetClientConnectionIdIncludedAsSender) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(CONNECTION_ID_PRESENT, GetClientConnectionIdIncludedAsSender(
header, Perspective::IS_SERVER));
EXPECT_EQ(CONNECTION_ID_ABSENT, GetClientConnectionIdIncludedAsSender(
header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetClientConnectionIdAsRecipient) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(TestConnectionId(2),
GetClientConnectionIdAsRecipient(header, Perspective::IS_SERVER));
EXPECT_EQ(TestConnectionId(1),
GetClientConnectionIdAsRecipient(header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetClientConnectionIdAsSender) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(TestConnectionId(1),
GetClientConnectionIdAsSender(header, Perspective::IS_SERVER));
EXPECT_EQ(TestConnectionId(2),
GetClientConnectionIdAsSender(header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, CopyQuicPacketHeader) {
QuicPacketHeader header;
QuicPacketHeader header2 = CreateFakePacketHeader();
EXPECT_NE(header, header2);
QuicPacketHeader header3(header2);
EXPECT_EQ(header2, header3);
}
TEST_F(QuicPacketsTest, CopySerializedPacket) {
std::string buffer(1000, 'a');
quiche::SimpleBufferAllocator allocator;
SerializedPacket packet(QuicPacketNumber(1), PACKET_1BYTE_PACKET_NUMBER,
buffer.data(), buffer.length(), false,
false);
packet.retransmittable_frames.push_back(QuicFrame(QuicWindowUpdateFrame()));
packet.retransmittable_frames.push_back(QuicFrame(QuicStreamFrame()));
QuicAckFrame ack_frame(InitAckFrame(1));
packet.nonretransmittable_frames.push_back(QuicFrame(&ack_frame));
packet.nonretransmittable_frames.push_back(QuicFrame(QuicPaddingFrame(-1)));
std::unique_ptr<SerializedPacket> copy = absl::WrapUnique<SerializedPacket>(
CopySerializedPacket(packet, &allocator, true));
EXPECT_EQ(quic::QuicPacketNumber(1), copy->packet_number);
EXPECT_EQ(PACKET_1BYTE_PACKET_NUMBER, copy->packet_number_length);
ASSERT_EQ(2u, copy->retransmittable_frames.size());
EXPECT_EQ(WINDOW_UPDATE_FRAME, copy->retransmittable_frames[0].type);
EXPECT_EQ(STREAM_FRAME, copy->retransmittable_frames[1].type);
ASSERT_EQ(2u, copy->nonretransmittable_frames.size());
EXPECT_EQ(ACK_FRAME, copy->nonretransmittable_frames[0].type);
EXPECT_EQ(PADDING_FRAME, copy->nonretransmittable_frames[1].type);
EXPECT_EQ(1000u, copy->encrypted_length);
quiche::test::CompareCharArraysWithHexError(
"encrypted_buffer", copy->encrypted_buffer, copy->encrypted_length,
packet.encrypted_buffer, packet.encrypted_length);
std::unique_ptr<SerializedPacket> copy2 = absl::WrapUnique<SerializedPacket>(
CopySerializedPacket(packet, &allocator, false));
EXPECT_EQ(packet.encrypted_buffer, copy2->encrypted_buffer);
EXPECT_EQ(1000u, copy2->encrypted_length);
}
TEST_F(QuicPacketsTest, CloneReceivedPacket) {
char header[4] = "bar";
QuicReceivedPacket packet("foo", 3, QuicTime::Zero(), false, 0, true, header,
sizeof(header) - 1, false,
QuicEcnCodepoint::ECN_ECT1);
std::unique_ptr<QuicReceivedPacket> copy = packet.Clone();
EXPECT_EQ(packet.ecn_codepoint(), copy->ecn_codepoint());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_packets.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_packets_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
8239efd6-6647-44aa-ab01-c4a65b5b5ec6 | cpp | google/glog | signalhandler | src/signalhandler.cc | src/signalhandler_unittest.cc | #include <algorithm>
#include <csignal>
#include <cstring>
#include <ctime>
#include <mutex>
#include <sstream>
#include <thread>
#include "config.h"
#include "glog/logging.h"
#include "glog/platform.h"
#include "stacktrace.h"
#include "symbolize.h"
#include "utilities.h"
#ifdef HAVE_UCONTEXT_H
# include <ucontext.h>
#endif
#ifdef HAVE_SYS_UCONTEXT_H
# include <sys/ucontext.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
namespace google {
namespace {
const struct {
int number;
const char* name;
} kFailureSignals[] = {
{SIGSEGV, "SIGSEGV"}, {SIGILL, "SIGILL"},
{SIGFPE, "SIGFPE"}, {SIGABRT, "SIGABRT"},
#if !defined(GLOG_OS_WINDOWS)
{SIGBUS, "SIGBUS"},
#endif
{SIGTERM, "SIGTERM"},
};
static bool kFailureSignalHandlerInstalled = false;
#if !defined(GLOG_OS_WINDOWS)
void* GetPC(void* ucontext_in_void) {
# if (defined(HAVE_UCONTEXT_H) || defined(HAVE_SYS_UCONTEXT_H)) && \
defined(PC_FROM_UCONTEXT)
if (ucontext_in_void != nullptr) {
ucontext_t* context = reinterpret_cast<ucontext_t*>(ucontext_in_void);
return (void*)context->PC_FROM_UCONTEXT;
}
# else
(void)ucontext_in_void;
# endif
return nullptr;
}
#endif
class MinimalFormatter {
public:
MinimalFormatter(char* buffer, size_t size)
: buffer_(buffer), cursor_(buffer), end_(buffer + size) {}
std::size_t num_bytes_written() const {
return static_cast<std::size_t>(cursor_ - buffer_);
}
void AppendString(const char* str) {
ptrdiff_t i = 0;
while (str[i] != '\0' && cursor_ + i < end_) {
cursor_[i] = str[i];
++i;
}
cursor_ += i;
}
void AppendUint64(uint64 number, unsigned radix) {
unsigned i = 0;
while (cursor_ + i < end_) {
const uint64 tmp = number % radix;
number /= radix;
cursor_[i] = static_cast<char>(tmp < 10 ? '0' + tmp : 'a' + tmp - 10);
++i;
if (number == 0) {
break;
}
}
std::reverse(cursor_, cursor_ + i);
cursor_ += i;
}
void AppendHexWithPadding(uint64 number, int width) {
char* start = cursor_;
AppendString("0x");
AppendUint64(number, 16);
if (cursor_ < start + width) {
const int64 delta = start + width - cursor_;
std::copy(start, cursor_, start + delta);
std::fill(start, start + delta, ' ');
cursor_ = start + width;
}
}
private:
char* buffer_;
char* cursor_;
const char* const end_;
};
void WriteToStderr(const char* data, size_t size) {
if (write(fileno(stderr), data, size) < 0) {
}
}
void (*g_failure_writer)(const char* data, size_t size) = WriteToStderr;
void DumpTimeInfo() {
time_t time_in_sec = time(nullptr);
char buf[256];
MinimalFormatter formatter(buf, sizeof(buf));
formatter.AppendString("*** Aborted at ");
formatter.AppendUint64(static_cast<uint64>(time_in_sec), 10);
formatter.AppendString(" (unix time)");
formatter.AppendString(" try \"date -d @");
formatter.AppendUint64(static_cast<uint64>(time_in_sec), 10);
formatter.AppendString("\" if you are using GNU date ***\n");
g_failure_writer(buf, formatter.num_bytes_written());
}
#if defined(HAVE_STACKTRACE) && defined(HAVE_SIGACTION)
void DumpSignalInfo(int signal_number, siginfo_t* siginfo) {
const char* signal_name = nullptr;
for (auto kFailureSignal : kFailureSignals) {
if (signal_number == kFailureSignal.number) {
signal_name = kFailureSignal.name;
}
}
char buf[256];
MinimalFormatter formatter(buf, sizeof(buf));
formatter.AppendString("*** ");
if (signal_name) {
formatter.AppendString(signal_name);
} else {
formatter.AppendString("Signal ");
formatter.AppendUint64(static_cast<uint64>(signal_number), 10);
}
formatter.AppendString(" (@0x");
formatter.AppendUint64(reinterpret_cast<uintptr_t>(siginfo->si_addr), 16);
formatter.AppendString(")");
formatter.AppendString(" received by PID ");
formatter.AppendUint64(static_cast<uint64>(getpid()), 10);
formatter.AppendString(" (TID ");
std::ostringstream oss;
oss << std::showbase << std::hex << std::this_thread::get_id();
formatter.AppendString(oss.str().c_str());
formatter.AppendString(") ");
# ifdef GLOG_OS_LINUX
formatter.AppendString("from PID ");
formatter.AppendUint64(static_cast<uint64>(siginfo->si_pid), 10);
formatter.AppendString("; ");
# endif
formatter.AppendString("stack trace: ***\n");
g_failure_writer(buf, formatter.num_bytes_written());
}
#endif
void DumpStackFrameInfo(const char* prefix, void* pc) {
const char* symbol = "(unknown)";
#if defined(HAVE_SYMBOLIZE)
char symbolized[1024];
if (Symbolize(reinterpret_cast<char*>(pc) - 1, symbolized,
sizeof(symbolized))) {
symbol = symbolized;
}
#else
# pragma message( \
"Symbolize functionality is not available for target platform: stack dump will contain empty frames.")
#endif
char buf[1024];
MinimalFormatter formatter(buf, sizeof(buf));
formatter.AppendString(prefix);
formatter.AppendString("@ ");
const int width = 2 * sizeof(void*) + 2;
formatter.AppendHexWithPadding(reinterpret_cast<uintptr_t>(pc), width);
formatter.AppendString(" ");
formatter.AppendString(symbol);
formatter.AppendString("\n");
g_failure_writer(buf, formatter.num_bytes_written());
}
void InvokeDefaultSignalHandler(int signal_number) {
#ifdef HAVE_SIGACTION
struct sigaction sig_action;
memset(&sig_action, 0, sizeof(sig_action));
sigemptyset(&sig_action.sa_mask);
sig_action.sa_handler = SIG_DFL;
sigaction(signal_number, &sig_action, nullptr);
kill(getpid(), signal_number);
#elif defined(GLOG_OS_WINDOWS)
signal(signal_number, SIG_DFL);
raise(signal_number);
#endif
}
static std::once_flag signaled;
static void HandleSignal(int signal_number
#if !defined(GLOG_OS_WINDOWS)
,
siginfo_t* signal_info, void* ucontext
#endif
) {
DumpTimeInfo();
#if !defined(GLOG_OS_WINDOWS)
void* pc = GetPC(ucontext);
DumpStackFrameInfo("PC: ", pc);
#endif
#ifdef HAVE_STACKTRACE
void* stack[32];
const int depth = GetStackTrace(stack, ARRAYSIZE(stack), 1);
# ifdef HAVE_SIGACTION
DumpSignalInfo(signal_number, signal_info);
# elif !defined(GLOG_OS_WINDOWS)
(void)signal_info;
# endif
for (int i = 0; i < depth; ++i) {
DumpStackFrameInfo(" ", stack[i]);
}
#elif !defined(GLOG_OS_WINDOWS)
(void)signal_info;
#endif
FlushLogFilesUnsafe(GLOG_INFO);
InvokeDefaultSignalHandler(signal_number);
}
#if defined(GLOG_OS_WINDOWS)
void FailureSignalHandler(int signal_number)
#else
void FailureSignalHandler(int signal_number, siginfo_t* signal_info,
void* ucontext)
#endif
{
std::call_once(signaled, &HandleSignal, signal_number
#if !defined(GLOG_OS_WINDOWS)
,
signal_info, ucontext
#endif
);
}
}
bool IsFailureSignalHandlerInstalled() {
#ifdef HAVE_SIGACTION
struct sigaction sig_action;
memset(&sig_action, 0, sizeof(sig_action));
sigemptyset(&sig_action.sa_mask);
sigaction(SIGABRT, nullptr, &sig_action);
if (sig_action.sa_sigaction == &FailureSignalHandler) {
return true;
}
#elif defined(GLOG_OS_WINDOWS)
return kFailureSignalHandlerInstalled;
#endif
return false;
}
void InstallFailureSignalHandler() {
#ifdef HAVE_SIGACTION
struct sigaction sig_action;
memset(&sig_action, 0, sizeof(sig_action));
sigemptyset(&sig_action.sa_mask);
sig_action.sa_flags |= SA_SIGINFO;
sig_action.sa_sigaction = &FailureSignalHandler;
for (auto kFailureSignal : kFailureSignals) {
CHECK_ERR(sigaction(kFailureSignal.number, &sig_action, nullptr));
}
kFailureSignalHandlerInstalled = true;
#elif defined(GLOG_OS_WINDOWS)
for (size_t i = 0; i < ARRAYSIZE(kFailureSignals); ++i) {
CHECK_NE(signal(kFailureSignals[i].number, &FailureSignalHandler), SIG_ERR);
}
kFailureSignalHandlerInstalled = true;
#endif
}
void InstallFailureWriter(void (*writer)(const char* data, size_t size)) {
#if defined(HAVE_SIGACTION) || defined(GLOG_OS_WINDOWS)
g_failure_writer = writer;
#endif
}
} | #include <csignal>
#include <cstdio>
#include <cstdlib>
#include <sstream>
#include <string>
#include <thread>
#include "config.h"
#include "glog/logging.h"
#include "stacktrace.h"
#include "symbolize.h"
#if defined(HAVE_UNISTD_H)
# include <unistd.h>
#endif
#ifdef GLOG_USE_GFLAGS
# include <gflags/gflags.h>
using namespace GFLAGS_NAMESPACE;
#endif
#if defined(_MSC_VER)
# include <io.h>
#endif
using namespace google;
static void DieInThread(int* a) {
std::ostringstream oss;
oss << std::showbase << std::hex << std::this_thread::get_id();
fprintf(stderr, "%s is dying\n", oss.str().c_str());
int b = 1 / *a;
fprintf(stderr, "We should have died: b=%d\n", b);
}
static void WriteToStdout(const char* data, size_t size) {
if (write(fileno(stdout), data, size) < 0) {
}
}
int main(int argc, char** argv) {
#if defined(HAVE_STACKTRACE) && defined(HAVE_SYMBOLIZE)
InitGoogleLogging(argv[0]);
# ifdef GLOG_USE_GFLAGS
ParseCommandLineFlags(&argc, &argv, true);
# endif
InstallFailureSignalHandler();
const std::string command = argc > 1 ? argv[1] : "none";
if (command == "segv") {
LOG(INFO) << "create the log file";
LOG(INFO) << "a message before segv";
int* a = (int*)0xDEAD;
*a = 0;
} else if (command == "loop") {
fprintf(stderr, "looping\n");
while (true)
;
} else if (command == "die_in_thread") {
std::thread t{&DieInThread, nullptr};
t.join();
} else if (command == "dump_to_stdout") {
InstallFailureWriter(WriteToStdout);
abort();
} else if (command == "installed") {
fprintf(stderr, "signal handler installed: %s\n",
IsFailureSignalHandlerInstalled() ? "true" : "false");
} else {
puts("OK");
}
#endif
return 0;
} | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/signalhandler.cc | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/signalhandler_unittest.cc | de309c08c05382fee0792380de7df1bd65332da2 |
147250cd-0c15-4f1e-8224-6ffd1c121a75 | cpp | tensorflow/tensorflow | shape | tensorflow/lite/delegates/gpu/common/shape.cc | tensorflow/lite/delegates/gpu/common/shape_test.cc | #include "tensorflow/lite/delegates/gpu/common/shape.h"
#include <stdint.h>
#include <string>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
namespace tflite {
namespace gpu {
namespace {
struct GetAxisByIndexFunc {
template <Layout T>
Axis operator()() const {
return GetAxis<T>(index);
}
int32_t index;
};
struct GetIndexByAxisFunc {
template <Layout T>
int operator()() const {
return GetAxisIndex<T>(axis);
}
Axis axis;
};
struct NumAxisFunc {
template <Layout T>
int operator()() const {
return Size<T>();
}
};
}
std::string ToString(Axis axis) {
switch (axis) {
case Axis::BATCH:
return "batch";
case Axis::CHANNELS:
return "channels";
case Axis::INPUT_CHANNELS:
return "input_channels";
case Axis::OUTPUT_CHANNELS:
return "output_channels";
case Axis::HEIGHT:
return "height";
case Axis::WIDTH:
return "width";
case Axis::VALUE:
return "value";
case Axis::DEPTH:
return "depth";
case Axis::UNKNOWN:
return "unknown";
}
return "undefined";
}
std::string ToString(Layout layout) {
switch (layout) {
case Layout::SCALAR:
return "scalar";
case Layout::LINEAR:
return "linear";
case Layout::HW:
return "hw";
case Layout::HWD:
return "hwd";
case Layout::CHW:
return "chw";
case Layout::HWC:
return "hwc";
case Layout::HWDC:
return "hwdc";
case Layout::OHWI:
return "ohwi";
case Layout::IHWO:
return "ihwo";
case Layout::OIHW:
return "oihw";
case Layout::IOHW:
return "iohw";
case Layout::BHWC:
return "bhwc";
case Layout::BHWDC:
return "bhwdc";
case Layout::OHWDI:
return "ohwdi";
case Layout::HWIO:
return "hwio";
case Layout::UNKNOWN:
return "unknown";
}
return "undefined";
}
Axis GetAxis(Layout layout, int32_t index) {
return DispatchByLayout(layout, GetAxisByIndexFunc{index});
}
int GetAxisIndex(Layout layout, Axis axis) {
return DispatchByLayout(layout, GetIndexByAxisFunc{axis});
}
bool HasAxis(Layout layout, Axis axis) {
return GetAxisIndex(layout, axis) >= 0;
}
int Size(Layout layout) { return DispatchByLayout(layout, NumAxisFunc()); }
std::string ToString(const Shape& s) {
return absl::StrCat("{", ToString(s.layout), ", {",
absl::StrJoin(s.dimensions, ", "), "}}");
}
}
} | #include "tensorflow/lite/delegates/gpu/common/shape.h"
#include <stdint.h>
#include <vector>
#include <gtest/gtest.h>
namespace tflite {
namespace gpu {
namespace {
TEST(OIHW, Smoke) {
OIHW OIHW;
OIHW.i = 1;
ASSERT_TRUE(OIHW.set<Axis::OUTPUT_CHANNELS>(2));
ASSERT_TRUE(OIHW.set(Axis::HEIGHT, 3));
ASSERT_TRUE(OIHW.set(3, 4));
ASSERT_FALSE(OIHW.set(5, 10));
ASSERT_FALSE(OIHW.set(Axis::CHANNELS, 10));
ASSERT_FALSE(OIHW.set<Axis::CHANNELS>(10));
EXPECT_EQ(1, OIHW.get(Axis::INPUT_CHANNELS));
EXPECT_EQ(2, OIHW.o);
EXPECT_EQ(3, OIHW.get(2));
EXPECT_EQ(4, OIHW.get<Axis::WIDTH>());
EXPECT_EQ(-1, OIHW.get(5));
EXPECT_EQ(-1, OIHW.get(Axis::CHANNELS));
EXPECT_EQ(-1, OIHW.get<Axis::CHANNELS>());
ASSERT_EQ(4, OIHW.size());
std::vector<Axis> expected = {Axis::OUTPUT_CHANNELS, Axis::INPUT_CHANNELS,
Axis::HEIGHT, Axis::WIDTH};
for (int i = 0; i < OIHW.size(); ++i) {
Axis axis = OIHW.axis(i);
ASSERT_EQ(expected[i], axis);
ASSERT_EQ(i, OIHW.index(axis));
}
OHWI ohwi;
ASSERT_TRUE(ohwi.CopyAllDefinedAxis(OIHW));
EXPECT_EQ(ohwi.o, OIHW.o);
EXPECT_EQ(ohwi.i, OIHW.i);
EXPECT_EQ(ohwi.h, OIHW.h);
EXPECT_EQ(ohwi.w, OIHW.w);
ohwi = OHWI(10, 20, 30, 40);
ASSERT_TRUE(OIHW.CopyAllGivenAxis(ohwi));
EXPECT_EQ(ohwi.o, OIHW.o);
EXPECT_EQ(ohwi.i, OIHW.i);
EXPECT_EQ(ohwi.h, OIHW.h);
EXPECT_EQ(ohwi.w, OIHW.w);
EXPECT_TRUE(ohwi.has(Axis::WIDTH));
EXPECT_FALSE(ohwi.has(Axis::DEPTH));
}
TEST(Layout, Smoke) {
EXPECT_EQ(4, Size<Layout::OIHW>());
EXPECT_EQ(4, Size(Layout::OIHW));
std::vector<Axis> expected = {Axis::OUTPUT_CHANNELS, Axis::INPUT_CHANNELS,
Axis::HEIGHT, Axis::WIDTH};
for (int i = 0; i < Size<Layout::OIHW>(); ++i) {
Axis axis = GetAxis<Layout::OIHW>(i);
ASSERT_EQ(expected[i], axis);
ASSERT_EQ(axis, GetAxis(Layout::OIHW, i));
ASSERT_EQ(i, GetAxisIndex<Layout::OIHW>(axis));
ASSERT_EQ(i, GetAxisIndex(Layout::OIHW, axis));
}
EXPECT_EQ(Axis::UNKNOWN, GetAxis(Layout::OIHW, 5));
EXPECT_EQ(-1, GetAxisIndex<Layout::OIHW>(Axis::CHANNELS));
EXPECT_EQ(-1, GetAxisIndex<Layout::OIHW>(Axis::CHANNELS));
EXPECT_TRUE(HasAxis<Layout::OHWDI>(Axis::DEPTH));
EXPECT_FALSE(HasAxis<Layout::OHWDI>(Axis::CHANNELS));
}
TEST(Shape, Smoke) {
Shape s(Layout::OIHW, {1, 2, 3, 4});
EXPECT_TRUE(s.set(Axis::HEIGHT, 10));
EXPECT_TRUE(s.set<Axis::WIDTH>(20));
EXPECT_FALSE(s.set(Axis::BATCH, 10));
EXPECT_FALSE(s.set<Axis::BATCH>(20));
ASSERT_EQ(10, s.get<Axis::HEIGHT>());
ASSERT_EQ(20, s.get(Axis::WIDTH));
EXPECT_EQ(20, s.dimensions[3]);
EXPECT_TRUE(s.has(Axis::HEIGHT));
EXPECT_FALSE(s.has(Axis::DEPTH));
OIHW oihw(1, 2, 10, 20);
Shape s2 = oihw.ToShape();
EXPECT_EQ(s2.layout, oihw.layout);
EXPECT_EQ(s.layout, s2.layout);
EXPECT_EQ(s.dimensions, s2.dimensions);
OHWI ohwi;
ASSERT_TRUE(ohwi.Adopt(s2));
EXPECT_EQ(1, ohwi.o);
EXPECT_EQ(2, ohwi.i);
EXPECT_EQ(10, ohwi.h);
EXPECT_EQ(20, ohwi.w);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/shape.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/shape_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c40de239-0441-4f1d-97f9-7210d0c66dc3 | cpp | google/quiche | structured_headers | quiche/common/structured_headers.cc | quiche/common/structured_headers_test.cc | #include "quiche/common/structured_headers.h"
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/ascii.h"
#include "absl/strings/escaping.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace quiche {
namespace structured_headers {
namespace {
#define DIGIT "0123456789"
#define LCALPHA "abcdefghijklmnopqrstuvwxyz"
#define UCALPHA "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#define TCHAR DIGIT LCALPHA UCALPHA "!#$%&'*+-.^_`|~"
constexpr char kTokenChars09[] = DIGIT UCALPHA LCALPHA "_-.:%*/";
constexpr char kTokenChars[] = TCHAR ":/";
constexpr char kKeyChars09[] = DIGIT LCALPHA "_-";
constexpr char kKeyChars[] = DIGIT LCALPHA "_-.*";
constexpr char kSP[] = " ";
constexpr char kOWS[] = " \t";
#undef DIGIT
#undef LCALPHA
#undef UCALPHA
constexpr int64_t kMaxInteger = 999'999'999'999'999L;
constexpr int64_t kMinInteger = -999'999'999'999'999L;
constexpr double kTooLargeDecimal = 1e12 - 0.0005;
void StripLeft(absl::string_view& s, absl::string_view remove) {
size_t i = s.find_first_not_of(remove);
if (i == absl::string_view::npos) {
i = s.size();
}
s.remove_prefix(i);
}
class StructuredHeaderParser {
public:
enum DraftVersion {
kDraft09,
kFinal,
};
explicit StructuredHeaderParser(absl::string_view str, DraftVersion version)
: input_(str), version_(version) {
SkipWhitespaces();
}
StructuredHeaderParser(const StructuredHeaderParser&) = delete;
StructuredHeaderParser& operator=(const StructuredHeaderParser&) = delete;
bool FinishParsing() {
SkipWhitespaces();
return input_.empty();
}
std::optional<ListOfLists> ReadListOfLists() {
QUICHE_CHECK_EQ(version_, kDraft09);
ListOfLists result;
while (true) {
std::vector<Item> inner_list;
while (true) {
std::optional<Item> item(ReadBareItem());
if (!item) return std::nullopt;
inner_list.push_back(std::move(*item));
SkipWhitespaces();
if (!ConsumeChar(';')) break;
SkipWhitespaces();
}
result.push_back(std::move(inner_list));
SkipWhitespaces();
if (!ConsumeChar(',')) break;
SkipWhitespaces();
}
return result;
}
std::optional<List> ReadList() {
QUICHE_CHECK_EQ(version_, kFinal);
List members;
while (!input_.empty()) {
std::optional<ParameterizedMember> member(ReadItemOrInnerList());
if (!member) return std::nullopt;
members.push_back(std::move(*member));
SkipOWS();
if (input_.empty()) break;
if (!ConsumeChar(',')) return std::nullopt;
SkipOWS();
if (input_.empty()) return std::nullopt;
}
return members;
}
std::optional<ParameterizedItem> ReadItem() {
std::optional<Item> item = ReadBareItem();
if (!item) return std::nullopt;
std::optional<Parameters> parameters = ReadParameters();
if (!parameters) return std::nullopt;
return ParameterizedItem(std::move(*item), std::move(*parameters));
}
std::optional<Item> ReadBareItem() {
if (input_.empty()) {
QUICHE_DVLOG(1) << "ReadBareItem: unexpected EOF";
return std::nullopt;
}
switch (input_.front()) {
case '"':
return ReadString();
case '*':
if (version_ == kDraft09) return ReadByteSequence();
return ReadToken();
case ':':
if (version_ == kFinal) return ReadByteSequence();
return std::nullopt;
case '?':
return ReadBoolean();
default:
if (input_.front() == '-' || absl::ascii_isdigit(input_.front()))
return ReadNumber();
if (absl::ascii_isalpha(input_.front())) return ReadToken();
return std::nullopt;
}
}
std::optional<Dictionary> ReadDictionary() {
QUICHE_CHECK_EQ(version_, kFinal);
Dictionary members;
while (!input_.empty()) {
std::optional<std::string> key(ReadKey());
if (!key) return std::nullopt;
std::optional<ParameterizedMember> member;
if (ConsumeChar('=')) {
member = ReadItemOrInnerList();
if (!member) return std::nullopt;
} else {
std::optional<Parameters> parameters = ReadParameters();
if (!parameters) return std::nullopt;
member = ParameterizedMember{Item(true), std::move(*parameters)};
}
members[*key] = std::move(*member);
SkipOWS();
if (input_.empty()) break;
if (!ConsumeChar(',')) return std::nullopt;
SkipOWS();
if (input_.empty()) return std::nullopt;
}
return members;
}
std::optional<ParameterisedList> ReadParameterisedList() {
QUICHE_CHECK_EQ(version_, kDraft09);
ParameterisedList items;
while (true) {
std::optional<ParameterisedIdentifier> item =
ReadParameterisedIdentifier();
if (!item) return std::nullopt;
items.push_back(std::move(*item));
SkipWhitespaces();
if (!ConsumeChar(',')) return items;
SkipWhitespaces();
}
}
private:
std::optional<ParameterisedIdentifier> ReadParameterisedIdentifier() {
QUICHE_CHECK_EQ(version_, kDraft09);
std::optional<Item> primary_identifier = ReadToken();
if (!primary_identifier) return std::nullopt;
ParameterisedIdentifier::Parameters parameters;
SkipWhitespaces();
while (ConsumeChar(';')) {
SkipWhitespaces();
std::optional<std::string> name = ReadKey();
if (!name) return std::nullopt;
Item value;
if (ConsumeChar('=')) {
auto item = ReadBareItem();
if (!item) return std::nullopt;
value = std::move(*item);
}
if (!parameters.emplace(*name, std::move(value)).second) {
QUICHE_DVLOG(1) << "ReadParameterisedIdentifier: duplicated parameter: "
<< *name;
return std::nullopt;
}
SkipWhitespaces();
}
return ParameterisedIdentifier(std::move(*primary_identifier),
std::move(parameters));
}
std::optional<ParameterizedMember> ReadItemOrInnerList() {
QUICHE_CHECK_EQ(version_, kFinal);
bool member_is_inner_list = (!input_.empty() && input_.front() == '(');
if (member_is_inner_list) {
return ReadInnerList();
} else {
auto item = ReadItem();
if (!item) return std::nullopt;
return ParameterizedMember(std::move(item->item),
std::move(item->params));
}
}
std::optional<Parameters> ReadParameters() {
Parameters parameters;
absl::flat_hash_set<std::string> keys;
while (ConsumeChar(';')) {
SkipWhitespaces();
std::optional<std::string> name = ReadKey();
if (!name) return std::nullopt;
bool is_duplicate_key = !keys.insert(*name).second;
Item value{true};
if (ConsumeChar('=')) {
auto item = ReadBareItem();
if (!item) return std::nullopt;
value = std::move(*item);
}
if (is_duplicate_key) {
for (auto& param : parameters) {
if (param.first == name) {
param.second = std::move(value);
break;
}
}
} else {
parameters.emplace_back(std::move(*name), std::move(value));
}
}
return parameters;
}
std::optional<ParameterizedMember> ReadInnerList() {
QUICHE_CHECK_EQ(version_, kFinal);
if (!ConsumeChar('(')) return std::nullopt;
std::vector<ParameterizedItem> inner_list;
while (true) {
SkipWhitespaces();
if (ConsumeChar(')')) {
std::optional<Parameters> parameters = ReadParameters();
if (!parameters) return std::nullopt;
return ParameterizedMember(std::move(inner_list), true,
std::move(*parameters));
}
auto item = ReadItem();
if (!item) return std::nullopt;
inner_list.push_back(std::move(*item));
if (input_.empty() || (input_.front() != ' ' && input_.front() != ')'))
return std::nullopt;
}
QUICHE_NOTREACHED();
return std::nullopt;
}
std::optional<std::string> ReadKey() {
if (version_ == kDraft09) {
if (input_.empty() || !absl::ascii_islower(input_.front())) {
LogParseError("ReadKey", "lcalpha");
return std::nullopt;
}
} else {
if (input_.empty() ||
(!absl::ascii_islower(input_.front()) && input_.front() != '*')) {
LogParseError("ReadKey", "lcalpha | *");
return std::nullopt;
}
}
const char* allowed_chars =
(version_ == kDraft09 ? kKeyChars09 : kKeyChars);
size_t len = input_.find_first_not_of(allowed_chars);
if (len == absl::string_view::npos) len = input_.size();
std::string key(input_.substr(0, len));
input_.remove_prefix(len);
return key;
}
std::optional<Item> ReadToken() {
if (input_.empty() ||
!(absl::ascii_isalpha(input_.front()) || input_.front() == '*')) {
LogParseError("ReadToken", "ALPHA");
return std::nullopt;
}
size_t len = input_.find_first_not_of(version_ == kDraft09 ? kTokenChars09
: kTokenChars);
if (len == absl::string_view::npos) len = input_.size();
std::string token(input_.substr(0, len));
input_.remove_prefix(len);
return Item(std::move(token), Item::kTokenType);
}
std::optional<Item> ReadNumber() {
bool is_negative = ConsumeChar('-');
bool is_decimal = false;
size_t decimal_position = 0;
size_t i = 0;
for (; i < input_.size(); ++i) {
if (i > 0 && input_[i] == '.' && !is_decimal) {
is_decimal = true;
decimal_position = i;
continue;
}
if (!absl::ascii_isdigit(input_[i])) break;
}
if (i == 0) {
LogParseError("ReadNumber", "DIGIT");
return std::nullopt;
}
if (!is_decimal) {
if (version_ == kFinal && i > 15) {
LogParseError("ReadNumber", "integer too long");
return std::nullopt;
}
} else {
if (version_ != kFinal && i > 16) {
LogParseError("ReadNumber", "float too long");
return std::nullopt;
}
if (version_ == kFinal && decimal_position > 12) {
LogParseError("ReadNumber", "decimal too long");
return std::nullopt;
}
if (i - decimal_position > (version_ == kFinal ? 4 : 7)) {
LogParseError("ReadNumber", "too many digits after decimal");
return std::nullopt;
}
if (i == decimal_position) {
LogParseError("ReadNumber", "no digits after decimal");
return std::nullopt;
}
}
std::string output_number_string(input_.substr(0, i));
input_.remove_prefix(i);
if (is_decimal) {
double f;
if (!absl::SimpleAtod(output_number_string, &f)) return std::nullopt;
return Item(is_negative ? -f : f);
} else {
int64_t n;
if (!absl::SimpleAtoi(output_number_string, &n)) return std::nullopt;
QUICHE_CHECK(version_ != kFinal ||
(n <= kMaxInteger && n >= kMinInteger));
return Item(is_negative ? -n : n);
}
}
std::optional<Item> ReadString() {
std::string s;
if (!ConsumeChar('"')) {
LogParseError("ReadString", "'\"'");
return std::nullopt;
}
while (!ConsumeChar('"')) {
size_t i = 0;
for (; i < input_.size(); ++i) {
if (!absl::ascii_isprint(input_[i])) {
QUICHE_DVLOG(1) << "ReadString: non printable-ASCII character";
return std::nullopt;
}
if (input_[i] == '"' || input_[i] == '\\') break;
}
if (i == input_.size()) {
QUICHE_DVLOG(1) << "ReadString: missing closing '\"'";
return std::nullopt;
}
s.append(std::string(input_.substr(0, i)));
input_.remove_prefix(i);
if (ConsumeChar('\\')) {
if (input_.empty()) {
QUICHE_DVLOG(1) << "ReadString: backslash at string end";
return std::nullopt;
}
if (input_[0] != '"' && input_[0] != '\\') {
QUICHE_DVLOG(1) << "ReadString: invalid escape";
return std::nullopt;
}
s.push_back(input_.front());
input_.remove_prefix(1);
}
}
return s;
}
std::optional<Item> ReadByteSequence() {
char delimiter = (version_ == kDraft09 ? '*' : ':');
if (!ConsumeChar(delimiter)) {
LogParseError("ReadByteSequence", "delimiter");
return std::nullopt;
}
size_t len = input_.find(delimiter);
if (len == absl::string_view::npos) {
QUICHE_DVLOG(1) << "ReadByteSequence: missing closing delimiter";
return std::nullopt;
}
std::string base64(input_.substr(0, len));
base64.resize((base64.size() + 3) / 4 * 4, '=');
std::string binary;
if (!absl::Base64Unescape(base64, &binary)) {
QUICHE_DVLOG(1) << "ReadByteSequence: failed to decode base64: "
<< base64;
return std::nullopt;
}
input_.remove_prefix(len);
ConsumeChar(delimiter);
return Item(std::move(binary), Item::kByteSequenceType);
}
std::optional<Item> ReadBoolean() {
if (!ConsumeChar('?')) {
LogParseError("ReadBoolean", "'?'");
return std::nullopt;
}
if (ConsumeChar('1')) {
return Item(true);
}
if (ConsumeChar('0')) {
return Item(false);
}
return std::nullopt;
}
void SkipWhitespaces() {
if (version_ == kDraft09) {
StripLeft(input_, kOWS);
} else {
StripLeft(input_, kSP);
}
}
void SkipOWS() { StripLeft(input_, kOWS); }
bool ConsumeChar(char expected) {
if (!input_.empty() && input_.front() == expected) {
input_.remove_prefix(1);
return true;
}
return false;
}
void LogParseError(const char* func, const char* expected) {
QUICHE_DVLOG(1) << func << ": " << expected << " expected, got "
<< (input_.empty()
? "EOS"
: "'" + std::string(input_.substr(0, 1)) + "'");
}
absl::string_view input_;
DraftVersion version_;
};
class StructuredHeaderSerializer {
public:
StructuredHeaderSerializer() = default;
~StructuredHeaderSerializer() = default;
StructuredHeaderSerializer(const StructuredHeaderSerializer&) = delete;
StructuredHeaderSerializer& operator=(const StructuredHeaderSerializer&) =
delete;
std::string Output() { return output_.str(); }
bool WriteList(const List& value) {
bool first = true;
for (const auto& member : value) {
if (!first) output_ << ", ";
if (!WriteParameterizedMember(member)) return false;
first = false;
}
return true;
}
bool WriteItem(const ParameterizedItem& value) {
if (!WriteBareItem(value.item)) return false;
return WriteParameters(value.params);
}
bool WriteBareItem(const Item& value) {
if (value.is_string()) {
output_ << "\"";
for (const char& c : value.GetString()) {
if (!absl::ascii_isprint(c)) return false;
if (c == '\\' || c == '\"') output_ << "\\";
output_ << c;
}
output_ << "\"";
return true;
}
if (value.is_token()) {
if (!IsValidToken(value.GetString())) {
return false;
}
output_ << value.GetString();
return true;
}
if (value.is_byte_sequence()) {
output_ << ":";
output_ << absl::Base64Escape(value.GetString());
output_ << ":";
return true;
}
if (value.is_integer()) {
if (value.GetInteger() > kMaxInteger || value.GetInteger() < kMinInteger)
return false;
output_ << value.GetInteger();
return true;
}
if (value.is_decimal()) {
double decimal_value = value.GetDecimal();
if (!std::isfinite(decimal_value) ||
fabs(decimal_value) >= kTooLargeDecimal)
return false;
if (decimal_value < 0) output_ << "-";
decimal_value = fabs(decimal_value);
double remainder = fmod(decimal_value, 0.002);
if (remainder == 0.0005) {
decimal_value -= 0.0005;
} else if (remainder == 0.0015) {
decimal_value += 0.0005;
} else {
decimal_value = round(decimal_value * 1000.0) / 1000.0;
}
char buffer[17];
absl::SNPrintF(buffer, std::size(buffer), "%#.3f", decimal_value);
absl::string_view formatted_number(buffer);
auto truncate_index = formatted_number.find_last_not_of('0');
if (formatted_number[truncate_index] == '.') truncate_index++;
output_ << formatted_number.substr(0, truncate_index + 1);
return true;
}
if (value.is_boolean()) {
output_ << (value.GetBoolean() ? "?1" : "?0");
return true;
}
return false;
}
bool WriteDictionary(const Dictionary& value) {
bool first = true;
for (const auto& [dict_key, dict_value] : value) {
if (!first) output_ << ", ";
if (!WriteKey(dict_key)) return false;
first = false;
if (!dict_value.member_is_inner_list && !dict_value.member.empty() &&
dict_value.member.front().item.is_boolean() &&
dict_value.member.front().item.GetBoolean()) {
if (!WriteParameters(dict_value.params)) return false;
} else {
output_ << "=";
if (!WriteParameterizedMember(dict_value)) return false;
}
}
return true;
}
private:
bool WriteParameterizedMember(const ParameterizedMember& value) {
if (value.member_is_inner_list) {
if (!WriteInnerList(value.member)) return false;
} else {
QUICHE_CHECK_EQ(value.member.size(), 1UL);
if (!WriteItem(value.member[0])) return false;
}
return WriteParameters(value.params);
}
bool WriteInnerList(const std::vector<ParameterizedItem>& value) {
output_ << "(";
bool first = true;
for (const ParameterizedItem& member : value) {
if (!first) output_ << " ";
if (!WriteItem(member)) return false;
first = false;
}
output_ << ")";
return true;
}
bool WriteParameters(const Parameters& value) {
for (const auto& param_name_and_value : value) {
const std::string& param_name = param_name_and_value.first;
const Item& param_value = param_name_and_value.second;
output_ << ";";
if (!WriteKey(param_name)) return false;
if (!param_value.is_null()) {
if (param_value.is_boolean() && param_value.GetBoolean()) continue;
output_ << "=";
if (!WriteBareItem(param_value)) return false;
}
}
return true;
}
bool WriteKey(const std::string& value) {
if (value.empty()) return false;
if (value.find_first_not_of(kKeyChars) != std::string::npos) return false;
if (!absl::ascii_islower(value[0]) && value[0] != '*') return false;
output_ << value;
return true;
}
std::ostringstream output_;
};
}
absl::string_view ItemTypeToString(Item::ItemType type) {
switch (type) {
case Item::kNullType:
return "null";
case Item::kIntegerType:
return "integer";
case Item::kDecimalType:
return "decimal";
case Item::kStringType:
return "string";
case Item::kTokenType:
return "token";
case Item::kByteSequenceType:
return "byte sequence";
case Item::kBooleanType:
return "boolean";
}
return "[invalid type]";
}
bool IsValidToken(absl::string_view str) {
if (str.empty() ||
!(absl::ascii_isalpha(str.front()) || str.front() == '*')) {
return false;
}
if (str.find_first_not_of(kTokenChars) != std::string::npos) {
return false;
}
return true;
}
Item::Item() {}
Item::Item(std::string value, Item::ItemType type) {
switch (type) {
case kStringType:
value_.emplace<kStringType>(std::move(value));
break;
case kTokenType:
value_.emplace<kTokenType>(std::move(value));
break;
case kByteSequenceType:
value_.emplace<kByteSequenceType>(std::move(value));
break;
default:
QUICHE_CHECK(false);
break;
}
}
Item::Item(const char* value, Item::ItemType type)
: Item(std::string(value), type) {}
Item::Item(int64_t value) : value_(value) {}
Item::Item(double value) : value_(value) {}
Item::Item(bool value) : value_(value) {}
bool operator==(const Item& lhs, const Item& rhs) {
return lhs.value_ == rhs.value_;
}
ParameterizedItem::ParameterizedItem() = default;
ParameterizedItem::ParameterizedItem(const ParameterizedItem&) = default;
ParameterizedItem& ParameterizedItem::operator=(const ParameterizedItem&) =
default;
ParameterizedItem::ParameterizedItem(Item id, Parameters ps)
: item(std::move(id)), params(std::move(ps)) {}
ParameterizedItem::~ParameterizedItem() = default;
ParameterizedMember::ParameterizedMember() = default;
ParameterizedMember::ParameterizedMember(const ParameterizedMember&) = default;
ParameterizedMember& ParameterizedMember::operator=(
const ParameterizedMember&) = default;
ParameterizedMember::ParameterizedMember(std::vector<ParameterizedItem> id,
bool member_is_inner_list,
Parameters ps)
: member(std::move(id)),
member_is_inner_list(member_is_inner_list),
params(std::move(ps)) {}
ParameterizedMember::ParameterizedMember(std::vector<ParameterizedItem> id,
Parameters ps)
: member(std::move(id)),
member_is_inner_list(true),
params(std::move(ps)) {}
ParameterizedMember::ParameterizedMember(Item id, Parameters ps)
: member({{std::move(id), {}}}),
member_is_inner_list(false),
params(std::move(ps)) {}
ParameterizedMember::~ParameterizedMember() = default;
ParameterisedIdentifier::ParameterisedIdentifier() = default;
ParameterisedIdentifier::ParameterisedIdentifier(
const ParameterisedIdentifier&) = default;
ParameterisedIdentifier& ParameterisedIdentifier::operator=(
const ParameterisedIdentifier&) = default;
ParameterisedIdentifier::ParameterisedIdentifier(Item id, Parameters ps)
: identifier(std::move(id)), params(std::move(ps)) {}
ParameterisedIdentifier::~ParameterisedIdentifier() = default;
Dictionary::Dictionary() = default;
Dictionary::Dictionary(const Dictionary&) = default;
Dictionary::Dictionary(Dictionary&&) = default;
Dictionary::Dictionary(std::vector<DictionaryMember> members)
: members_(std::move(members)) {}
Dictionary::~Dictionary() = default;
Dictionary::iterator Dictionary::begin() { return members_.begin(); }
Dictionary::const_iterator Dictionary::begin() const {
return members_.begin();
}
Dictionary::iterator Dictionary::end() { return members_.end(); }
Dictionary::const_iterator Dictionary::end() const { return members_.end(); }
ParameterizedMember& Dictionary::operator[](std::size_t idx) {
return members_[idx].second;
}
const ParameterizedMember& Dictionary::operator[](std::size_t idx) const {
return members_[idx].second;
}
ParameterizedMember& Dictionary::at(std::size_t idx) { return (*this)[idx]; }
const ParameterizedMember& Dictionary::at(std::size_t idx) const {
return (*this)[idx];
}
ParameterizedMember& Dictionary::operator[](absl::string_view key) {
auto it = find(key);
if (it != end()) return it->second;
return members_.emplace_back(key, ParameterizedMember()).second;
}
ParameterizedMember& Dictionary::at(absl::string_view key) {
auto it = find(key);
QUICHE_CHECK(it != end()) << "Provided key not found in dictionary";
return it->second;
}
const ParameterizedMember& Dictionary::at(absl::string_view key) const {
auto it = find(key);
QUICHE_CHECK(it != end()) << "Provided key not found in dictionary";
return it->second;
}
Dictionary::const_iterator Dictionary::find(absl::string_view key) const {
return absl::c_find_if(
members_, [key](const auto& member) { return member.first == key; });
}
Dictionary::iterator Dictionary::find(absl::string_view key) {
return absl::c_find_if(
members_, [key](const auto& member) { return member.first == key; });
}
bool Dictionary::empty() const { return members_.empty(); }
std::size_t Dictionary::size() const { return members_.size(); }
bool Dictionary::contains(absl::string_view key) const {
return find(key) != end();
}
void Dictionary::clear() { members_.clear(); }
std::optional<ParameterizedItem> ParseItem(absl::string_view str) {
StructuredHeaderParser parser(str, StructuredHeaderParser::kFinal);
std::optional<ParameterizedItem> item = parser.ReadItem();
if (item && parser.FinishParsing()) return item;
return std::nullopt;
}
std::optional<Item> ParseBareItem(absl::string_view str) {
StructuredHeaderParser parser(str, StructuredHeaderParser::kFinal);
std::optional<Item> item = parser.ReadBareItem();
if (item && parser.FinishParsing()) return item;
return std::nullopt;
}
std::optional<ParameterisedList> ParseParameterisedList(absl::string_view str) {
StructuredHeaderParser parser(str, StructuredHeaderParser::kDraft09);
std::optional<ParameterisedList> param_list = parser.ReadParameterisedList();
if (param_list && parser.FinishParsing()) return param_list;
return std::nullopt;
}
std::optional<ListOfLists> ParseListOfLists(absl::string_view str) {
StructuredHeaderParser parser(str, StructuredHeaderParser::kDraft09);
std::optional<ListOfLists> list_of_lists = parser.ReadListOfLists();
if (list_of_lists && parser.FinishParsing()) return list_of_lists;
return std::nullopt;
}
std::optional<List> ParseList(absl::string_view str) {
StructuredHeaderParser parser(str, StructuredHeaderParser::kFinal);
std::optional<List> list = parser.ReadList();
if (list && parser.FinishParsing()) return list;
return std::nullopt;
}
std::optional<Dictionary> ParseDictionary(absl::string_view str) {
StructuredHeaderParser parser(str, StructuredHeaderParser::kFinal);
std::optional<Dictionary> dictionary = parser.ReadDictionary();
if (dictionary && parser.FinishParsing()) return dictionary;
return std::nullopt;
}
std::optional<std::string> SerializeItem(const Item& value) {
StructuredHeaderSerializer s;
if (s.WriteItem(ParameterizedItem(value, {}))) return s.Output();
return std::nullopt;
}
std::optional<std::string> SerializeItem(const ParameterizedItem& value) {
StructuredHeaderSerializer s;
if (s.WriteItem(value)) return s.Output();
return std::nullopt;
}
std::optional<std::string> SerializeList(const List& value) {
StructuredHeaderSerializer s;
if (s.WriteList(value)) return s.Output();
return std::nullopt;
}
std::optional<std::string> SerializeDictionary(const Dictionary& value) {
StructuredHeaderSerializer s;
if (s.WriteDictionary(value)) return s.Output();
return std::nullopt;
}
}
} | #include "quiche/common/structured_headers.h"
#include <math.h>
#include <limits>
#include <optional>
#include <string>
#include <utility>
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace structured_headers {
namespace {
Item Token(std::string value) { return Item(value, Item::kTokenType); }
Item Integer(int64_t value) { return Item(value); }
std::pair<std::string, Item> NullParam(std::string key) {
return std::make_pair(key, Item());
}
std::pair<std::string, Item> BooleanParam(std::string key, bool value) {
return std::make_pair(key, Item(value));
}
std::pair<std::string, Item> DoubleParam(std::string key, double value) {
return std::make_pair(key, Item(value));
}
std::pair<std::string, Item> Param(std::string key, int64_t value) {
return std::make_pair(key, Item(value));
}
std::pair<std::string, Item> Param(std::string key, std::string value) {
return std::make_pair(key, Item(value));
}
std::pair<std::string, Item> ByteSequenceParam(std::string key,
std::string value) {
return std::make_pair(key, Item(value, Item::kByteSequenceType));
}
std::pair<std::string, Item> TokenParam(std::string key, std::string value) {
return std::make_pair(key, Token(value));
}
const struct ItemTestCase {
const char* name;
const char* raw;
const std::optional<Item> expected;
const char* canonical;
} item_test_cases[] = {
{"bad token - item", "abc$@%!", std::nullopt, nullptr},
{"leading whitespace", " foo", Token("foo"), "foo"},
{"trailing whitespace", "foo ", Token("foo"), "foo"},
{"leading asterisk", "*foo", Token("*foo"), nullptr},
{"long integer", "999999999999999", Integer(999999999999999L), nullptr},
{"long negative integer", "-999999999999999", Integer(-999999999999999L),
nullptr},
{"too long integer", "1000000000000000", std::nullopt, nullptr},
{"negative too long integer", "-1000000000000000", std::nullopt, nullptr},
{"integral decimal", "1.0", Item(1.0), nullptr},
{"basic string", "\"foo\"", Item("foo"), nullptr},
{"non-ascii string", "\"f\xC3\xBC\xC3\xBC\"", std::nullopt, nullptr},
{"valid quoting containing \\n", "\"\\\\n\"", Item("\\n"), nullptr},
{"valid quoting containing \\t", "\"\\\\t\"", Item("\\t"), nullptr},
{"valid quoting containing \\x", "\"\\\\x61\"", Item("\\x61"), nullptr},
{"c-style hex escape in string", "\"\\x61\"", std::nullopt, nullptr},
{"valid quoting containing \\u", "\"\\\\u0061\"", Item("\\u0061"), nullptr},
{"c-style unicode escape in string", "\"\\u0061\"", std::nullopt, nullptr},
};
const ItemTestCase sh09_item_test_cases[] = {
{"large integer", "9223372036854775807", Integer(9223372036854775807L),
nullptr},
{"large negative integer", "-9223372036854775807",
Integer(-9223372036854775807L), nullptr},
{"too large integer", "9223372036854775808", std::nullopt, nullptr},
{"too large negative integer", "-9223372036854775808", std::nullopt,
nullptr},
{"basic binary", "*aGVsbG8=*", Item("hello", Item::kByteSequenceType),
nullptr},
{"empty binary", "**", Item("", Item::kByteSequenceType), nullptr},
{"bad paddding", "*aGVsbG8*", Item("hello", Item::kByteSequenceType),
"*aGVsbG8=*"},
{"bad end delimiter", "*aGVsbG8=", std::nullopt, nullptr},
{"extra whitespace", "*aGVsb G8=*", std::nullopt, nullptr},
{"extra chars", "*aGVsbG!8=*", std::nullopt, nullptr},
{"suffix chars", "*aGVsbG8=!*", std::nullopt, nullptr},
{"non-zero pad bits", "*iZ==*", Item("\x89", Item::kByteSequenceType),
"*iQ==*"},
{"non-ASCII binary", "*/+Ah*", Item("\xFF\xE0!", Item::kByteSequenceType),
nullptr},
{"base64url binary", "*_-Ah*", std::nullopt, nullptr},
{"token with leading asterisk", "*foo", std::nullopt, nullptr},
};
const struct ParameterizedItemTestCase {
const char* name;
const char* raw;
const std::optional<ParameterizedItem>
expected;
const char* canonical;
} parameterized_item_test_cases[] = {
{"single parameter item",
"text/html;q=1.0",
{{Token("text/html"), {DoubleParam("q", 1)}}},
nullptr},
{"missing parameter value item",
"text/html;a;q=1.0",
{{Token("text/html"), {BooleanParam("a", true), DoubleParam("q", 1)}}},
nullptr},
{"missing terminal parameter value item",
"text/html;q=1.0;a",
{{Token("text/html"), {DoubleParam("q", 1), BooleanParam("a", true)}}},
nullptr},
{"duplicate parameter keys with different value",
"text/html;a=1;b=2;a=3.0",
{{Token("text/html"), {DoubleParam("a", 3), Param("b", 2L)}}},
"text/html;a=3.0;b=2"},
{"multiple duplicate parameter keys at different position",
"text/html;c=1;a=2;b;b=3.0;a",
{{Token("text/html"),
{Param("c", 1L), BooleanParam("a", true), DoubleParam("b", 3)}}},
"text/html;c=1;a;b=3.0"},
{"duplicate parameter keys with missing value",
"text/html;a;a=1",
{{Token("text/html"), {Param("a", 1L)}}},
"text/html;a=1"},
{"whitespace before = parameterised item", "text/html, text/plain;q =0.5",
std::nullopt, nullptr},
{"whitespace after = parameterised item", "text/html, text/plain;q= 0.5",
std::nullopt, nullptr},
{"whitespace before ; parameterised item", "text/html, text/plain ;q=0.5",
std::nullopt, nullptr},
{"whitespace after ; parameterised item",
"text/plain; q=0.5",
{{Token("text/plain"), {DoubleParam("q", 0.5)}}},
"text/plain;q=0.5"},
{"extra whitespace parameterised item",
"text/plain; q=0.5; charset=utf-8",
{{Token("text/plain"),
{DoubleParam("q", 0.5), TokenParam("charset", "utf-8")}}},
"text/plain;q=0.5;charset=utf-8"},
};
const struct ListTestCase {
const char* name;
const char* raw;
const std::optional<List> expected;
const char* canonical;
} list_test_cases[] = {
{"extra whitespace list of lists",
"(1 42)",
{{{{{Integer(1L), {}}, {Integer(42L), {}}}, {}}}},
"(1 42)"},
{"basic parameterised list",
"abc_123;a=1;b=2; cdef_456, ghi;q=\"9\";r=\"+w\"",
{{{Token("abc_123"),
{Param("a", 1), Param("b", 2), BooleanParam("cdef_456", true)}},
{Token("ghi"), {Param("q", "9"), Param("r", "+w")}}}},
"abc_123;a=1;b=2;cdef_456, ghi;q=\"9\";r=\"+w\""},
{"parameterised basic list of lists",
"(1;a=1.0 2), (42 43)",
{{{{{Integer(1L), {DoubleParam("a", 1.0)}}, {Integer(2L), {}}}, {}},
{{{Integer(42L), {}}, {Integer(43L), {}}}, {}}}},
nullptr},
{"parameters on inner members",
"(1;a=1.0 2;b=c), (42;d=?0 43;e=:Zmdo:)",
{{{{{Integer(1L), {DoubleParam("a", 1.0)}},
{Integer(2L), {TokenParam("b", "c")}}},
{}},
{{{Integer(42L), {BooleanParam("d", false)}},
{Integer(43L), {ByteSequenceParam("e", "fgh")}}},
{}}}},
nullptr},
{"parameters on inner lists",
"(1 2);a=1.0, (42 43);b=?0",
{{{{{Integer(1L), {}}, {Integer(2L), {}}}, {DoubleParam("a", 1.0)}},
{{{Integer(42L), {}}, {Integer(43L), {}}}, {BooleanParam("b", false)}}}},
nullptr},
{"default true values for parameters on inner list members",
"(1;a 2), (42 43;b)",
{{{{{Integer(1L), {BooleanParam("a", true)}}, {Integer(2L), {}}}, {}},
{{{Integer(42L), {}}, {Integer(43L), {BooleanParam("b", true)}}}, {}}}},
nullptr},
{"default true values for parameters on inner lists",
"(1 2);a, (42 43);b",
{{{{{Integer(1L), {}}, {Integer(2L), {}}}, {BooleanParam("a", true)}},
{{{Integer(42L), {}}, {Integer(43L), {}}}, {BooleanParam("b", true)}}}},
nullptr},
{"extra whitespace before semicolon in parameters on inner list member",
"(a;b ;c b)", std::nullopt, nullptr},
{"extra whitespace between parameters on inner list member",
"(a;b; c b)",
{{{{{Token("a"), {BooleanParam("b", true), BooleanParam("c", true)}},
{Token("b"), {}}},
{}}}},
"(a;b;c b)"},
{"extra whitespace before semicolon in parameters on inner list",
"(a b);c ;d, (e)", std::nullopt, nullptr},
{"extra whitespace between parameters on inner list",
"(a b);c; d, (e)",
{{{{{Token("a"), {}}, {Token("b"), {}}},
{BooleanParam("c", true), BooleanParam("d", true)}},
{{{Token("e"), {}}}, {}}}},
"(a b);c;d, (e)"},
};
const struct DictionaryTestCase {
const char* name;
const char* raw;
const std::optional<Dictionary>
expected;
const char* canonical;
} dictionary_test_cases[] = {
{"basic dictionary",
"en=\"Applepie\", da=:aGVsbG8=:",
{Dictionary{{{"en", {Item("Applepie"), {}}},
{"da", {Item("hello", Item::kByteSequenceType), {}}}}}},
nullptr},
{"tab separated dictionary",
"a=1\t,\tb=2",
{Dictionary{{{"a", {Integer(1L), {}}}, {"b", {Integer(2L), {}}}}}},
"a=1, b=2"},
{"missing value with params dictionary",
"a=1, b;foo=9, c=3",
{Dictionary{{{"a", {Integer(1L), {}}},
{"b", {Item(true), {Param("foo", 9)}}},
{"c", {Integer(3L), {}}}}}},
nullptr},
{"parameterised inner list member dict",
"a=(\"1\";b=1;c=?0 \"2\");d=\"e\"",
{Dictionary{{{"a",
{{{Item("1"), {Param("b", 1), BooleanParam("c", false)}},
{Item("2"), {}}},
{Param("d", "e")}}}}}},
nullptr},
{"explicit true value with parameter",
"a=?1;b=1",
{Dictionary{{{"a", {Item(true), {Param("b", 1)}}}}}},
"a;b=1"},
{"implicit true value with parameter",
"a;b=1",
{Dictionary{{{"a", {Item(true), {Param("b", 1)}}}}}},
nullptr},
{"implicit true value with implicitly-valued parameter",
"a;b",
{Dictionary{{{"a", {Item(true), {BooleanParam("b", true)}}}}}},
nullptr},
};
}
TEST(StructuredHeaderTest, ParseBareItem) {
for (const auto& c : item_test_cases) {
SCOPED_TRACE(c.name);
std::optional<Item> result = ParseBareItem(c.raw);
EXPECT_EQ(result, c.expected);
}
}
TEST(StructuredHeaderTest, ParseItem) {
for (const auto& c : parameterized_item_test_cases) {
SCOPED_TRACE(c.name);
std::optional<ParameterizedItem> result = ParseItem(c.raw);
EXPECT_EQ(result, c.expected);
}
}
TEST(StructuredHeaderTest, ParseSH09Item) {
for (const auto& c : sh09_item_test_cases) {
SCOPED_TRACE(c.name);
std::optional<ListOfLists> result = ParseListOfLists(c.raw);
if (c.expected.has_value()) {
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->size(), 1UL);
EXPECT_EQ((*result)[0].size(), 1UL);
EXPECT_EQ((*result)[0][0], c.expected);
} else {
EXPECT_FALSE(result.has_value());
}
}
}
TEST(StructuredHeaderTest, SH09HighPrecisionFloats) {
std::optional<ListOfLists> result =
ParseListOfLists("1.03125;-1.03125;12345678901234.5;-12345678901234.5");
ASSERT_TRUE(result.has_value());
EXPECT_EQ(*result,
(ListOfLists{{Item(1.03125), Item(-1.03125), Item(12345678901234.5),
Item(-12345678901234.5)}}));
result = ParseListOfLists("123456789012345.0");
EXPECT_FALSE(result.has_value());
result = ParseListOfLists("-123456789012345.0");
EXPECT_FALSE(result.has_value());
}
TEST(StructuredHeaderTest, ParseListOfLists) {
static const struct TestCase {
const char* name;
const char* raw;
ListOfLists expected;
} cases[] = {
{"basic list of lists",
"1;2, 42;43",
{{Integer(1L), Integer(2L)}, {Integer(42L), Integer(43L)}}},
{"empty list of lists", "", {}},
{"single item list of lists", "42", {{Integer(42L)}}},
{"no whitespace list of lists", "1,42", {{Integer(1L)}, {Integer(42L)}}},
{"no inner whitespace list of lists",
"1;2, 42;43",
{{Integer(1L), Integer(2L)}, {Integer(42L), Integer(43L)}}},
{"extra whitespace list of lists",
"1 , 42",
{{Integer(1L)}, {Integer(42L)}}},
{"extra inner whitespace list of lists",
"1 ; 2,42 ; 43",
{{Integer(1L), Integer(2L)}, {Integer(42L), Integer(43L)}}},
{"trailing comma list of lists", "1;2, 42,", {}},
{"trailing semicolon list of lists", "1;2, 42;43;", {}},
{"leading comma list of lists", ",1;2, 42", {}},
{"leading semicolon list of lists", ";1;2, 42;43", {}},
{"empty item list of lists", "1,,42", {}},
{"empty inner item list of lists", "1;;2,42", {}},
};
for (const auto& c : cases) {
SCOPED_TRACE(c.name);
std::optional<ListOfLists> result = ParseListOfLists(c.raw);
if (!c.expected.empty()) {
EXPECT_TRUE(result.has_value());
EXPECT_EQ(*result, c.expected);
} else {
EXPECT_FALSE(result.has_value());
}
}
}
TEST(StructuredHeaderTest, ParseParameterisedList) {
static const struct TestCase {
const char* name;
const char* raw;
ParameterisedList expected;
} cases[] = {
{"basic param-list",
"abc_123;a=1;b=2; cdef_456, ghi;q=\"9\";r=\"w\"",
{
{Token("abc_123"),
{Param("a", 1), Param("b", 2), NullParam("cdef_456")}},
{Token("ghi"), {Param("q", "9"), Param("r", "w")}},
}},
{"empty param-list", "", {}},
{"single item param-list",
"text/html;q=1",
{{Token("text/html"), {Param("q", 1)}}}},
{"empty param-list", "", {}},
{"no whitespace param-list",
"text/html,text/plain;q=1",
{{Token("text/html"), {}}, {Token("text/plain"), {Param("q", 1)}}}},
{"whitespace before = param-list", "text/html, text/plain;q =1", {}},
{"whitespace after = param-list", "text/html, text/plain;q= 1", {}},
{"extra whitespace param-list",
"text/html , text/plain ; q=1",
{{Token("text/html"), {}}, {Token("text/plain"), {Param("q", 1)}}}},
{"duplicate key", "abc;a=1;b=2;a=1", {}},
{"numeric key", "abc;a=1;1b=2;c=1", {}},
{"uppercase key", "abc;a=1;B=2;c=1", {}},
{"bad key", "abc;a=1;b!=2;c=1", {}},
{"another bad key", "abc;a=1;b==2;c=1", {}},
{"empty key name", "abc;a=1;=2;c=1", {}},
{"empty parameter", "abc;a=1;;c=1", {}},
{"empty list item", "abc;a=1,,def;b=1", {}},
{"extra semicolon", "abc;a=1;b=1;", {}},
{"extra comma", "abc;a=1,def;b=1,", {}},
{"leading semicolon", ";abc;a=1", {}},
{"leading comma", ",abc;a=1", {}},
};
for (const auto& c : cases) {
SCOPED_TRACE(c.name);
std::optional<ParameterisedList> result = ParseParameterisedList(c.raw);
if (c.expected.empty()) {
EXPECT_FALSE(result.has_value());
continue;
}
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->size(), c.expected.size());
if (result->size() == c.expected.size()) {
for (size_t i = 0; i < c.expected.size(); ++i) {
EXPECT_EQ((*result)[i], c.expected[i]);
}
}
}
}
TEST(StructuredHeaderTest, ParseList) {
for (const auto& c : list_test_cases) {
SCOPED_TRACE(c.name);
std::optional<List> result = ParseList(c.raw);
EXPECT_EQ(result, c.expected);
}
}
TEST(StructuredHeaderTest, ParseDictionary) {
for (const auto& c : dictionary_test_cases) {
SCOPED_TRACE(c.name);
std::optional<Dictionary> result = ParseDictionary(c.raw);
EXPECT_EQ(result, c.expected);
}
}
TEST(StructuredHeaderTest, SerializeItem) {
for (const auto& c : item_test_cases) {
SCOPED_TRACE(c.name);
if (c.expected) {
std::optional<std::string> result = SerializeItem(*c.expected);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), std::string(c.canonical ? c.canonical : c.raw));
}
}
}
TEST(StructuredHeaderTest, SerializeParameterizedItem) {
for (const auto& c : parameterized_item_test_cases) {
SCOPED_TRACE(c.name);
if (c.expected) {
std::optional<std::string> result = SerializeItem(*c.expected);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), std::string(c.canonical ? c.canonical : c.raw));
}
}
}
TEST(StructuredHeaderTest, UnserializableItems) {
EXPECT_FALSE(SerializeItem(Item()).has_value());
}
TEST(StructuredHeaderTest, UnserializableTokens) {
static const struct UnserializableString {
const char* name;
const char* value;
} bad_tokens[] = {
{"empty token", ""},
{"contains high ascii", "a\xff"},
{"contains nonprintable character", "a\x7f"},
{"contains C0", "a\x01"},
{"UTF-8 encoded", "a\xc3\xa9"},
{"contains TAB", "a\t"},
{"contains LF", "a\n"},
{"contains CR", "a\r"},
{"contains SP", "a "},
{"begins with digit", "9token"},
{"begins with hyphen", "-token"},
{"begins with LF", "\ntoken"},
{"begins with SP", " token"},
{"begins with colon", ":token"},
{"begins with percent", "%token"},
{"begins with period", ".token"},
{"begins with slash", "/token"},
};
for (const auto& bad_token : bad_tokens) {
SCOPED_TRACE(bad_token.name);
std::optional<std::string> serialization =
SerializeItem(Token(bad_token.value));
EXPECT_FALSE(serialization.has_value()) << *serialization;
}
}
TEST(StructuredHeaderTest, UnserializableKeys) {
static const struct UnserializableString {
const char* name;
const char* value;
} bad_keys[] = {
{"empty key", ""},
{"contains high ascii", "a\xff"},
{"contains nonprintable character", "a\x7f"},
{"contains C0", "a\x01"},
{"UTF-8 encoded", "a\xc3\xa9"},
{"contains TAB", "a\t"},
{"contains LF", "a\n"},
{"contains CR", "a\r"},
{"contains SP", "a "},
{"begins with uppercase", "Atoken"},
{"begins with digit", "9token"},
{"begins with hyphen", "-token"},
{"begins with LF", "\ntoken"},
{"begins with SP", " token"},
{"begins with colon", ":token"},
{"begins with percent", "%token"},
{"begins with period", ".token"},
{"begins with slash", "/token"},
};
for (const auto& bad_key : bad_keys) {
SCOPED_TRACE(bad_key.name);
std::optional<std::string> serialization =
SerializeItem(ParameterizedItem("a", {{bad_key.value, "a"}}));
EXPECT_FALSE(serialization.has_value()) << *serialization;
}
}
TEST(StructuredHeaderTest, UnserializableStrings) {
static const struct UnserializableString {
const char* name;
const char* value;
} bad_strings[] = {
{"contains high ascii", "a\xff"},
{"contains nonprintable character", "a\x7f"},
{"UTF-8 encoded", "a\xc3\xa9"},
{"contains TAB", "a\t"},
{"contains LF", "a\n"},
{"contains CR", "a\r"},
{"contains C0", "a\x01"},
};
for (const auto& bad_string : bad_strings) {
SCOPED_TRACE(bad_string.name);
std::optional<std::string> serialization =
SerializeItem(Item(bad_string.value));
EXPECT_FALSE(serialization.has_value()) << *serialization;
}
}
TEST(StructuredHeaderTest, UnserializableIntegers) {
EXPECT_FALSE(SerializeItem(Integer(1e15L)).has_value());
EXPECT_FALSE(SerializeItem(Integer(-1e15L)).has_value());
}
TEST(StructuredHeaderTest, UnserializableDecimals) {
for (double value :
{std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::infinity(),
-std::numeric_limits<double>::infinity(), 1e12, 1e12 - 0.0001,
1e12 - 0.0005, -1e12, -1e12 + 0.0001, -1e12 + 0.0005}) {
auto x = SerializeItem(Item(value));
EXPECT_FALSE(SerializeItem(Item(value)).has_value());
}
}
TEST(StructuredHeaderTest, SerializeUnparseableDecimals) {
struct UnparseableDecimal {
const char* name;
double value;
const char* canonical;
} float_test_cases[] = {
{"negative 0", -0.0, "0.0"},
{"0.0001", 0.0001, "0.0"},
{"0.0000001", 0.0000001, "0.0"},
{"1.0001", 1.0001, "1.0"},
{"1.0009", 1.0009, "1.001"},
{"round positive odd decimal", 0.0015, "0.002"},
{"round positive even decimal", 0.0025, "0.002"},
{"round negative odd decimal", -0.0015, "-0.002"},
{"round negative even decimal", -0.0025, "-0.002"},
{"round decimal up to integer part", 9.9995, "10.0"},
{"subnormal numbers", std::numeric_limits<double>::denorm_min(), "0.0"},
{"round up to 10 digits", 1e9 - 0.0000001, "1000000000.0"},
{"round up to 11 digits", 1e10 - 0.000001, "10000000000.0"},
{"round up to 12 digits", 1e11 - 0.00001, "100000000000.0"},
{"largest serializable float", nextafter(1e12 - 0.0005, 0),
"999999999999.999"},
{"largest serializable negative float", -nextafter(1e12 - 0.0005, 0),
"-999999999999.999"},
{"float rounds up to next int", 3.9999999, "4.0"},
{"don't double round", 3.99949, "3.999"},
{"don't double round", 123456789.99949, "123456789.999"},
};
for (const auto& test_case : float_test_cases) {
SCOPED_TRACE(test_case.name);
std::optional<std::string> serialization =
SerializeItem(Item(test_case.value));
EXPECT_TRUE(serialization.has_value());
EXPECT_EQ(*serialization, test_case.canonical);
}
}
TEST(StructuredHeaderTest, SerializeList) {
for (const auto& c : list_test_cases) {
SCOPED_TRACE(c.name);
if (c.expected) {
std::optional<std::string> result = SerializeList(*c.expected);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), std::string(c.canonical ? c.canonical : c.raw));
}
}
}
TEST(StructuredHeaderTest, UnserializableLists) {
static const struct UnserializableList {
const char* name;
const List value;
} bad_lists[] = {
{"Null item as member", {{Item(), {}}}},
{"Unserializable item as member", {{Token("\n"), {}}}},
{"Key is empty", {{Token("abc"), {Param("", 1)}}}},
{"Key containswhitespace", {{Token("abc"), {Param("a\n", 1)}}}},
{"Key contains UTF8", {{Token("abc"), {Param("a\xc3\xa9", 1)}}}},
{"Key contains unprintable characters",
{{Token("abc"), {Param("a\x7f", 1)}}}},
{"Key contains disallowed characters",
{{Token("abc"), {Param("a:", 1)}}}},
{"Param value is unserializable", {{Token("abc"), {{"a", Token("\n")}}}}},
{"Inner list contains unserializable item",
{{std::vector<ParameterizedItem>{{Token("\n"), {}}}, {}}}},
};
for (const auto& bad_list : bad_lists) {
SCOPED_TRACE(bad_list.name);
std::optional<std::string> serialization = SerializeList(bad_list.value);
EXPECT_FALSE(serialization.has_value()) << *serialization;
}
}
TEST(StructuredHeaderTest, SerializeDictionary) {
for (const auto& c : dictionary_test_cases) {
SCOPED_TRACE(c.name);
if (c.expected) {
std::optional<std::string> result = SerializeDictionary(*c.expected);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), std::string(c.canonical ? c.canonical : c.raw));
}
}
}
TEST(StructuredHeaderTest, DictionaryConstructors) {
const std::string key0 = "key0";
const std::string key1 = "key1";
const ParameterizedMember member0{Item("Applepie"), {}};
const ParameterizedMember member1{Item("hello", Item::kByteSequenceType), {}};
Dictionary dict;
EXPECT_TRUE(dict.empty());
EXPECT_EQ(0U, dict.size());
dict[key0] = member0;
EXPECT_FALSE(dict.empty());
EXPECT_EQ(1U, dict.size());
const Dictionary dict_copy = dict;
EXPECT_FALSE(dict_copy.empty());
EXPECT_EQ(1U, dict_copy.size());
EXPECT_EQ(dict, dict_copy);
const Dictionary dict_init{{{key0, member0}, {key1, member1}}};
EXPECT_FALSE(dict_init.empty());
EXPECT_EQ(2U, dict_init.size());
EXPECT_EQ(member0, dict_init.at(key0));
EXPECT_EQ(member1, dict_init.at(key1));
}
TEST(StructuredHeaderTest, DictionaryClear) {
const std::string key0 = "key0";
const ParameterizedMember member0{Item("Applepie"), {}};
Dictionary dict({{key0, member0}});
EXPECT_EQ(1U, dict.size());
EXPECT_FALSE(dict.empty());
EXPECT_TRUE(dict.contains(key0));
dict.clear();
EXPECT_EQ(0U, dict.size());
EXPECT_TRUE(dict.empty());
EXPECT_FALSE(dict.contains(key0));
}
TEST(StructuredHeaderTest, DictionaryAccessors) {
const std::string key0 = "key0";
const std::string key1 = "key1";
const ParameterizedMember nonempty_member0{Item("Applepie"), {}};
const ParameterizedMember nonempty_member1{
Item("hello", Item::kByteSequenceType), {}};
const ParameterizedMember empty_member;
Dictionary dict{{{key0, nonempty_member0}}};
EXPECT_TRUE(dict.contains(key0));
EXPECT_EQ(nonempty_member0, dict[key0]);
EXPECT_EQ(&dict[key0], &dict.at(key0));
EXPECT_EQ(&dict[key0], &dict[0]);
EXPECT_EQ(&dict[key0], &dict.at(0));
{
auto it = dict.find(key0);
ASSERT_TRUE(it != dict.end());
EXPECT_EQ(it->first, key0);
EXPECT_EQ(it->second, nonempty_member0);
}
ASSERT_FALSE(dict.contains(key1));
EXPECT_TRUE(dict.find(key1) == dict.end());
ParameterizedMember& member1 = dict[key1];
EXPECT_TRUE(dict.contains(key1));
EXPECT_EQ(empty_member, member1);
EXPECT_EQ(&member1, &dict[key1]);
EXPECT_EQ(&member1, &dict.at(key1));
EXPECT_EQ(&member1, &dict[1]);
EXPECT_EQ(&member1, &dict.at(1));
member1 = nonempty_member1;
EXPECT_EQ(nonempty_member1, dict[key1]);
EXPECT_EQ(&dict[key1], &dict.at(key1));
EXPECT_EQ(&dict[key1], &dict[1]);
EXPECT_EQ(&dict[key1], &dict.at(1));
const Dictionary& dict_ref = dict;
EXPECT_EQ(&member1, &dict_ref.at(key1));
EXPECT_EQ(&member1, &dict_ref[1]);
EXPECT_EQ(&member1, &dict_ref.at(1));
}
TEST(StructuredHeaderTest, UnserializableDictionary) {
static const struct UnserializableDictionary {
const char* name;
const Dictionary value;
} bad_dictionaries[] = {
{"Unserializable dict key", Dictionary{{{"ABC", {Token("abc"), {}}}}}},
{"Dictionary item is unserializable",
Dictionary{{{"abc", {Token("abc="), {}}}}}},
{"Param value is unserializable",
Dictionary{{{"abc", {Token("abc"), {{"a", Token("\n")}}}}}}},
{"Dictionary inner-list contains unserializable item",
Dictionary{
{{"abc",
{std::vector<ParameterizedItem>{{Token("abc="), {}}}, {}}}}}},
};
for (const auto& bad_dictionary : bad_dictionaries) {
SCOPED_TRACE(bad_dictionary.name);
std::optional<std::string> serialization =
SerializeDictionary(bad_dictionary.value);
EXPECT_FALSE(serialization.has_value()) << *serialization;
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/structured_headers.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/structured_headers_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
d7715548-177d-407d-b1e7-f44b71265ada | cpp | tensorflow/tensorflow | pending_counts | tensorflow/core/common_runtime/pending_counts.h | tensorflow/core/common_runtime/pending_counts_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_PENDING_COUNTS_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_PENDING_COUNTS_H_
#include <atomic>
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/port.h"
namespace tensorflow {
class PendingCounts {
public:
enum NodeState {
PENDING_NOTREADY,
PENDING_READY,
STARTED,
COMPLETED
};
class Handle;
class Layout {
public:
Handle CreateHandle(size_t max_pending_count, size_t max_dead_count);
private:
friend class PendingCounts;
int next_offset_ = 0;
};
explicit PendingCounts(Layout layout)
: num_bytes_(layout.next_offset_), bytes_(new char[num_bytes_]()) {
if (num_bytes_ >= sizeof(LargeCounts)) {
CHECK_EQ(uintptr_t(bytes_) % alignof(LargeCounts), 0);
}
}
explicit PendingCounts(const PendingCounts& other)
: num_bytes_(other.num_bytes_), bytes_(new char[num_bytes_]) {
if (num_bytes_ >= sizeof(LargeCounts)) {
CHECK_EQ(uintptr_t(bytes_) % alignof(LargeCounts), 0);
}
memcpy(bytes_, other.bytes_, other.num_bytes_);
}
~PendingCounts() { delete[] bytes_; }
void set_initial_count(Handle h, size_t pending_count) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
c.pending = pending_count;
c.dead_count = 0;
c.has_started = 0;
c_ptr->store(c, std::memory_order_relaxed);
} else {
DCHECK_LE(pending_count, kMaxCountForPackedCounts);
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
c.pending = pending_count;
c.dead_count = 0;
c.has_started = 0;
c_ptr->store(c, std::memory_order_relaxed);
}
}
NodeState node_state(Handle h) {
if (h.is_large_) {
return NodeStateForStruct(Large(h)->load(std::memory_order_relaxed));
} else {
return NodeStateForStruct(Packed(h)->load(std::memory_order_relaxed));
}
}
void mark_started(Handle h) {
DCHECK_EQ(pending(h), 0);
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
DCHECK_EQ(c.has_started, 0);
c.has_started = 1;
c_ptr->store(c, std::memory_order_relaxed);
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
DCHECK_EQ(c.has_started, 0);
c.has_started = 1;
c_ptr->store(c, std::memory_order_relaxed);
}
}
void mark_completed(Handle h) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
DCHECK_EQ(c.has_started, 1);
c.pending = 1;
c_ptr->store(c, std::memory_order_relaxed);
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
DCHECK_EQ(c.has_started, 1);
c.pending = 1;
c_ptr->store(c, std::memory_order_relaxed);
}
}
int pending(Handle h) {
if (h.is_large_) {
LargeCounts c = Large(h)->load(std::memory_order_relaxed);
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
return c.pending;
} else {
return 0;
}
} else {
PackedCounts c = Packed(h)->load(std::memory_order_relaxed);
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
return c.pending;
} else {
return 0;
}
}
}
struct AdjustResult {
int dead_count;
int pending_count;
AdjustResult(int dead_count, int pending_count)
: dead_count(dead_count), pending_count(pending_count) {}
};
int decrement_pending(Handle h, int v) {
DCHECK_GE(pending(h), v);
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
c.pending -= v;
c_ptr->store(c, std::memory_order_relaxed);
return c.pending;
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
c.pending -= v;
c_ptr->store(c, std::memory_order_relaxed);
return c.pending;
}
}
void mark_live(Handle h) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
c.pending &= ~static_cast<int>(0x1);
c_ptr->store(c, std::memory_order_relaxed);
}
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
static_assert(7 == kMaxCountForPackedCounts,
"Live flag incorrect for max packed count");
c.pending &= 0x6;
c_ptr->store(c, std::memory_order_relaxed);
}
}
}
int dead_count(Handle h) {
int r = h.is_large_ ? Large(h)->load(std::memory_order_relaxed).dead_count
: Packed(h)->load(std::memory_order_relaxed).dead_count;
return r;
}
void increment_dead_count(Handle h) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
c.dead_count++;
c_ptr->store(c, std::memory_order_relaxed);
}
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
DCHECK_LT(c.dead_count, kMaxCountForPackedCounts);
c.dead_count++;
c_ptr->store(c, std::memory_order_relaxed);
}
}
}
AdjustResult adjust_for_mark_live(Handle h) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
auto ret_pending = 0;
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
ret_pending = c.pending;
c.pending &= ~static_cast<int>(0x1);
c_ptr->store(c, std::memory_order_relaxed);
}
return AdjustResult(c.dead_count, ret_pending);
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
auto ret_pending = 0;
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
static_assert(7 == kMaxCountForPackedCounts,
"Live flag incorrect for max packed count");
ret_pending = c.pending;
c.pending &= 0x6;
c_ptr->store(c, std::memory_order_relaxed);
}
return AdjustResult(c.dead_count, ret_pending);
}
}
AdjustResult adjust_for_mark_live_atomic(Handle h) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto old_val = c_ptr->load(std::memory_order_relaxed);
while (true) {
auto new_val = old_val;
auto ret_pending = 0;
if (PENDING_NOTREADY == NodeStateForStruct(new_val)) {
ret_pending = old_val.pending;
new_val.pending &= ~static_cast<int>(0x1);
}
AdjustResult ret(old_val.dead_count, ret_pending);
if (TF_PREDICT_TRUE(c_ptr->compare_exchange_weak(old_val, new_val)))
return ret;
}
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto old_val = c_ptr->load(std::memory_order_relaxed);
while (true) {
auto new_val = old_val;
auto ret_pending = 0;
if (PENDING_NOTREADY == NodeStateForStruct(new_val)) {
static_assert(7 == kMaxCountForPackedCounts,
"Live flag incorrect for max packed count");
ret_pending = old_val.pending;
new_val.pending &= 0x6;
}
AdjustResult ret(old_val.dead_count, ret_pending);
if (TF_PREDICT_TRUE(c_ptr->compare_exchange_weak(old_val, new_val)))
return ret;
}
}
}
AdjustResult adjust_for_increment_dead(Handle h) {
if (h.is_large_) {
return adjust_for_increment_dead_shared(Large(h));
} else {
return adjust_for_increment_dead_shared(Packed(h));
}
}
AdjustResult adjust_for_increment_dead_atomic(Handle h) {
if (h.is_large_) {
return adjust_for_increment_dead_shared_atomic(Large(h));
} else {
return adjust_for_increment_dead_shared_atomic(Packed(h));
}
}
AdjustResult adjust_for_decrement_pending(Handle h, int decrement_pending) {
DCHECK_GE(pending(h), decrement_pending);
if (h.is_large_) {
return adjust_for_decrement_pending_shared(Large(h), decrement_pending);
} else {
return adjust_for_decrement_pending_shared(Packed(h), decrement_pending);
}
}
AdjustResult adjust_for_decrement_pending_atomic(Handle h,
int decrement_pending) {
DCHECK_GE(pending(h), decrement_pending);
if (h.is_large_) {
return adjust_for_decrement_pending_shared_atomic(Large(h),
decrement_pending);
} else {
return adjust_for_decrement_pending_shared_atomic(Packed(h),
decrement_pending);
}
}
AdjustResult adjust_for_activation(Handle h, bool increment_dead) {
DCHECK_GE(pending(h), 1);
if (h.is_large_) {
return adjust_for_activation_shared(Large(h), increment_dead);
} else {
return adjust_for_activation_shared(Packed(h), increment_dead);
}
}
AdjustResult adjust_for_activation_atomic(Handle h, bool increment_dead) {
DCHECK_GE(pending(h), 1);
if (h.is_large_) {
return adjust_for_activation_shared_atomic(Large(h), increment_dead);
} else {
return adjust_for_activation_shared_atomic(Packed(h), increment_dead);
}
}
class Handle {
public:
Handle() : byte_offset_(0), is_large_(0) {}
private:
friend class PendingCounts;
int byte_offset_ : 31;
bool is_large_ : 1;
};
private:
template <typename T>
inline AdjustResult adjust_for_increment_dead_shared(std::atomic<T>* c) {
T val = c->load(std::memory_order_relaxed);
auto ret_pending = 0;
if (PENDING_NOTREADY == NodeStateForStruct(val)) {
val.dead_count++;
ret_pending = val.pending;
c->store(val, std::memory_order_relaxed);
}
return AdjustResult(val.dead_count, ret_pending);
}
template <typename T>
inline AdjustResult adjust_for_increment_dead_shared_atomic(
std::atomic<T>* c) {
T old_val = c->load(std::memory_order_relaxed);
while (true) {
auto new_val = old_val;
auto ret_pending = 0;
if (PENDING_NOTREADY == NodeStateForStruct(new_val)) {
ret_pending = new_val.pending;
new_val.dead_count++;
}
AdjustResult ret(new_val.dead_count, ret_pending);
if (TF_PREDICT_TRUE(c->compare_exchange_weak(old_val, new_val)))
return ret;
}
}
template <typename T>
inline AdjustResult adjust_for_decrement_pending_shared(
std::atomic<T>* c, int decrement_pending) {
T val = c->load(std::memory_order_relaxed);
DCHECK_GE(val.pending, decrement_pending);
val.pending -= decrement_pending;
c->store(val, std::memory_order_relaxed);
return AdjustResult(val.dead_count, val.pending);
}
template <typename T>
inline AdjustResult adjust_for_decrement_pending_shared_atomic(
std::atomic<T>* c, int decrement_pending) {
T old_val = c->load(std::memory_order_relaxed);
while (true) {
T new_val = old_val;
DCHECK_GE(new_val.pending, decrement_pending);
new_val.pending -= decrement_pending;
AdjustResult ret(new_val.dead_count, new_val.pending);
if (TF_PREDICT_TRUE(c->compare_exchange_weak(old_val, new_val)))
return ret;
}
}
template <typename T>
inline AdjustResult adjust_for_activation_shared(std::atomic<T>* c,
bool increment_dead) {
T val = c->load(std::memory_order_relaxed);
if (increment_dead && PENDING_NOTREADY == NodeStateForStruct(val)) {
val.dead_count++;
}
DCHECK_GE(val.pending, 1);
val.pending--;
c->store(val, std::memory_order_relaxed);
return AdjustResult(val.dead_count, val.pending);
}
template <typename T>
inline AdjustResult adjust_for_activation_shared_atomic(std::atomic<T>* c,
bool increment_dead) {
T old_val = c->load(std::memory_order_relaxed);
while (true) {
T new_val = old_val;
if (increment_dead && PENDING_NOTREADY == NodeStateForStruct(new_val)) {
new_val.dead_count++;
}
DCHECK_GE(new_val.pending, 1);
new_val.pending--;
AdjustResult ret(new_val.dead_count, new_val.pending);
if (TF_PREDICT_TRUE(c->compare_exchange_weak(old_val, new_val)))
return ret;
}
}
static constexpr int kMaxCountForPackedCounts = 7;
struct PackedCounts {
uint8 pending : 3;
uint8 dead_count : 3;
uint8 has_started : 1;
};
struct alignas(8) LargeCounts {
uint32 pending;
uint32 dead_count : 31;
uint32 has_started : 1;
};
template <typename T>
NodeState NodeStateForStruct(const T& c) const {
if (c.has_started) {
return (c.pending == 0) ? STARTED : COMPLETED;
} else {
return (c.pending == 0) ? PENDING_READY : PENDING_NOTREADY;
}
}
inline std::atomic<LargeCounts>* Large(Handle h) {
DCHECK(h.is_large_);
DCHECK_LE(h.byte_offset_ + sizeof(std::atomic<LargeCounts>), num_bytes_);
DCHECK_EQ(h.byte_offset_ % alignof(std::atomic<LargeCounts>), 0);
return reinterpret_cast<std::atomic<LargeCounts>*>(bytes_ + h.byte_offset_);
}
inline std::atomic<PackedCounts>* Packed(Handle h) {
DCHECK(!h.is_large_);
DCHECK_LE(h.byte_offset_ + sizeof(PackedCounts), num_bytes_);
return reinterpret_cast<std::atomic<PackedCounts>*>(bytes_ +
h.byte_offset_);
}
const int num_bytes_;
char* bytes_;
void operator=(const PendingCounts&) = delete;
};
inline PendingCounts::Handle PendingCounts::Layout::CreateHandle(
size_t max_pending_count, size_t max_dead_count) {
Handle result;
if ((max_pending_count > kMaxCountForPackedCounts) ||
(max_dead_count > kMaxCountForPackedCounts)) {
constexpr int B = sizeof(std::atomic<LargeCounts>);
static_assert(
sizeof(std::atomic<LargeCounts>) >= alignof(std::atomic<LargeCounts>),
"std::atomic<LargeCounts> must be packed");
int64_t offset = ((static_cast<int64_t>(next_offset_) + B - 1) / B) * B;
result.byte_offset_ = offset;
result.is_large_ = true;
next_offset_ = result.byte_offset_ + B;
} else {
result.byte_offset_ = next_offset_;
result.is_large_ = false;
static_assert(sizeof(std::atomic<PackedCounts>) == 1,
"std::atomic<PackedCounts> should be a single byte");
next_offset_ += sizeof(std::atomic<PackedCounts>);
}
return result;
}
}
#endif | #include "tensorflow/core/common_runtime/pending_counts.h"
#include <memory>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
using std::unique_ptr;
namespace tensorflow {
TEST(PendingCounts, Simple) {
const int C = 300;
PendingCounts::Layout layout;
std::vector<PendingCounts::Handle> h(C);
for (int id = 0; id < C; id++) {
h[id] = layout.CreateHandle(id, id);
}
PendingCounts c(layout);
for (int id = 0; id < C; id++) {
c.set_initial_count(h[id], id);
}
for (int id = 0; id < C; id++) {
EXPECT_EQ(c.pending(h[id]), id);
EXPECT_EQ(c.dead_count(h[id]), 0);
}
for (int id = 0; id < C; id++) {
c.increment_dead_count(h[id]);
EXPECT_EQ(c.dead_count(h[id]), (id == 0) ? 0 : 1);
}
EXPECT_EQ(c.decrement_pending(h[1], 1), 0);
EXPECT_EQ(c.decrement_pending(h[3], 1), 2);
EXPECT_EQ(c.decrement_pending(h[3], 1), 1);
c.decrement_pending(h[5], 1);
c.decrement_pending(h[5], 3);
c.decrement_pending(h[170], 1);
c.decrement_pending(h[170], 13);
EXPECT_EQ(c.pending(h[1]), 0);
EXPECT_EQ(c.pending(h[3]), 1);
EXPECT_EQ(c.pending(h[5]), 1);
EXPECT_EQ(c.pending(h[170]), 156);
}
TEST(PendingCounts, CopyConstructor) {
const int C = 300;
PendingCounts::Layout layout;
std::vector<PendingCounts::Handle> h(C);
for (int id = 0; id < C; id++) {
h[id] = layout.CreateHandle(id, id);
}
PendingCounts c(layout);
for (int id = 0; id < C; id++) {
c.set_initial_count(h[id], id);
}
PendingCounts c2(c);
for (int id = 0; id < C; id++) {
EXPECT_EQ(c.pending(h[id]), c2.pending(h[id]));
EXPECT_EQ(c.dead_count(h[id]), c2.dead_count(h[id]));
}
}
TEST(PendingCounts, MarkLiveShowsUpAsCount) {
PendingCounts::Layout layout;
PendingCounts::Handle handles[2];
handles[0] = layout.CreateHandle(5, 4);
handles[1] = layout.CreateHandle(15, 4);
for (int id = 0; id < 2; id++) {
PendingCounts::Handle h = handles[id];
int count = (id == 0) ? 5 : 15;
PendingCounts c(layout);
c.set_initial_count(h, count);
EXPECT_EQ(c.pending(h), count);
auto result = c.adjust_for_mark_live_atomic(h);
EXPECT_EQ(c.pending(h), count - 1);
result = c.adjust_for_mark_live_atomic(h);
EXPECT_EQ(c.pending(h), count - 1);
c.decrement_pending(h, count - 1);
EXPECT_EQ(c.pending(h), 0);
result = c.adjust_for_mark_live_atomic(h);
EXPECT_EQ(c.pending(h), 0);
c.mark_started(h);
result = c.adjust_for_mark_live_atomic(h);
EXPECT_EQ(c.pending(h), 0);
c.mark_completed(h);
result = c.adjust_for_mark_live_atomic(h);
EXPECT_EQ(c.pending(h), 0);
}
}
TEST(PendingCounts, StateIsCorrect) {
const int C = 20;
PendingCounts::Layout layout;
std::vector<PendingCounts::Handle> handles(C);
for (int id = 0; id < C; id++) {
handles[id] = layout.CreateHandle(id, id);
}
PendingCounts c(layout);
for (int id = 0; id < C; id++) {
c.set_initial_count(handles[id], id);
}
for (int id = 0; id < C; id++) {
PendingCounts::Handle h = handles[id];
while (c.pending(h) > 0) {
EXPECT_EQ(c.node_state(h), PendingCounts::PENDING_NOTREADY);
c.decrement_pending(h, 1);
}
EXPECT_EQ(c.node_state(h), PendingCounts::PENDING_READY);
c.mark_started(h);
EXPECT_EQ(c.node_state(h), PendingCounts::STARTED);
c.mark_completed(h);
EXPECT_EQ(c.node_state(h), PendingCounts::COMPLETED);
}
}
TEST(PendingCounts, AdjustForActivation) {
PendingCounts::Layout layout;
PendingCounts::Handle handles[2];
handles[0] = layout.CreateHandle(5, 4);
handles[1] = layout.CreateHandle(15, 4);
for (int id = 0; id < 2; id++) {
PendingCounts::Handle h = handles[id];
int count = (id == 0) ? 5 : 15;
PendingCounts c(layout);
c.set_initial_count(h, count);
EXPECT_EQ(c.pending(h), count);
PendingCounts::AdjustResult result = c.adjust_for_activation(h, false);
EXPECT_EQ(c.pending(h), count - 1);
EXPECT_GT(result.pending_count, 0);
EXPECT_EQ(c.dead_count(h), 0);
EXPECT_EQ(result.dead_count, 0);
result = c.adjust_for_activation(h, true);
EXPECT_EQ(c.pending(h), count - 2);
EXPECT_GT(result.pending_count, 0);
EXPECT_EQ(c.dead_count(h), 1);
EXPECT_GT(result.dead_count, 0);
}
}
TEST(PendingCounts, AdjustForActivationAtomic) {
PendingCounts::Layout layout;
PendingCounts::Handle handles[2];
const int kInitialCounts[2] = {6, 16};
handles[0] = layout.CreateHandle(kInitialCounts[0], 0);
handles[1] = layout.CreateHandle(kInitialCounts[1], 0);
PendingCounts c(layout);
c.set_initial_count(handles[0], kInitialCounts[0]);
c.set_initial_count(handles[1], kInitialCounts[1]);
Env* env = Env::Default();
std::atomic<bool> start{false};
std::vector<unique_ptr<Thread>> threads;
for (int t = 0; t < 2; t++) {
threads.emplace_back(env->StartThread({}, "tester", [&]() {
while (!start) {
}
for (int i = 0; i < kInitialCounts[0] / 2; i++) {
c.adjust_for_activation_atomic(handles[0], false);
}
for (int i = 0; i < kInitialCounts[1] / 2; i++) {
c.adjust_for_activation_atomic(handles[1], false);
}
}));
}
start = true;
threads.clear();
EXPECT_EQ(c.pending(handles[0]), 0);
EXPECT_EQ(c.pending(handles[1]), 0);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/pending_counts.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/pending_counts_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d0680bc2-f060-431e-b3a4-16242667a3f8 | cpp | tensorflow/tensorflow | simple_memory_arena | tensorflow/lite/simple_memory_arena.cc | tensorflow/lite/simple_memory_arena_test.cc | #include "tensorflow/lite/simple_memory_arena.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <limits>
#include <string>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/macros.h"
#ifdef TF_LITE_TENSORFLOW_PROFILER
#include "tensorflow/lite/tensorflow_profiler_logger.h"
#endif
#if defined(__ANDROID__)
#define TF_LITE_HAS_ALIGNED_ALLOC (__ANDROID_API__ >= 28)
#elif defined(__APPLE__)
#define TF_LITE_HAS_ALIGNED_ALLOC 0
#elif defined(_WIN32)
#define TF_LITE_HAS_ALIGNED_ALLOC 0
#elif __cplusplus >= 201703L || __STDC_VERSION__ >= 201112L
#define TF_LITE_HAS_ALIGNED_ALLOC 1
#endif
namespace {
template <typename T>
T AlignTo(size_t alignment, T offset) {
return offset % alignment == 0 ? offset
: offset + (alignment - offset % alignment);
}
tflite::PointerAlignedPointerPair AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(const tflite::PointerAlignedPointerPair& buffer);
tflite::PointerAlignedPointerPair AlignedRealloc(
const tflite::PointerAlignedPointerPair& old_buffer, size_t old_size,
size_t new_size, size_t alignment);
#if defined(_WIN32)
tflite::PointerAlignedPointerPair AlignedAlloc(size_t size, size_t alignment) {
char* pointer = reinterpret_cast<char*>(_aligned_malloc(size, alignment));
char* aligned_ptr = pointer;
return {pointer, aligned_ptr};
}
void AlignedFree(const tflite::PointerAlignedPointerPair& buffer) {
_aligned_free(buffer.pointer);
}
tflite::PointerAlignedPointerPair AlignedRealloc(
const tflite::PointerAlignedPointerPair& old_buffer, size_t old_size,
size_t new_size, size_t alignment) {
char* pointer = reinterpret_cast<char*>(
_aligned_realloc(old_buffer.pointer, new_size, alignment));
char* aligned_ptr = pointer;
return {pointer, aligned_ptr};
}
#else
tflite::PointerAlignedPointerPair AlignedAlloc(size_t size, size_t alignment) {
#if TF_LITE_HAS_ALIGNED_ALLOC
const size_t allocation_size = AlignTo(alignment, size + alignment - 1);
char* pointer =
reinterpret_cast<char*>(::aligned_alloc(alignment, allocation_size));
char* aligned_ptr = pointer;
#else
const size_t allocation_size = size + alignment - 1;
char* pointer = reinterpret_cast<char*>(std::malloc(allocation_size));
char* aligned_ptr = reinterpret_cast<char*>(
AlignTo(alignment, reinterpret_cast<std::uintptr_t>(pointer)));
#endif
#if defined(__clang__)
#if __has_feature(memory_sanitizer)
std::memset(pointer, 0, allocation_size);
#endif
#endif
return {pointer, aligned_ptr};
}
void AlignedFree(const tflite::PointerAlignedPointerPair& buffer) {
std::free(buffer.pointer);
}
tflite::PointerAlignedPointerPair AlignedRealloc(
const tflite::PointerAlignedPointerPair& old_buffer, size_t old_size,
size_t new_size, size_t alignment) {
tflite::PointerAlignedPointerPair new_buffer =
AlignedAlloc(new_size, alignment);
if (new_size > 0 && old_size > 0) {
const size_t copy_amount = std::min(new_size, old_size);
std::memcpy(new_buffer.aligned_pointer, old_buffer.aligned_pointer,
copy_amount);
}
AlignedFree(old_buffer);
return new_buffer;
}
#endif
}
namespace tflite {
bool ResizableAlignedBuffer::Resize(size_t new_size) {
if (new_size <= data_size_) {
return false;
}
#ifdef TF_LITE_TENSORFLOW_PROFILER
PauseHeapMonitoring(true);
OnTfLiteArenaAlloc(subgraph_index_, reinterpret_cast<std::uintptr_t>(this),
new_size);
if (data_size_ > 0) {
OnTfLiteArenaDealloc(subgraph_index_,
reinterpret_cast<std::uintptr_t>(this), data_size_);
}
#endif
auto new_buffer = AlignedRealloc(buffer_, data_size_, new_size, alignment_);
bool reallocated = (new_buffer.aligned_pointer != buffer_.aligned_pointer);
buffer_ = new_buffer;
data_size_ = new_size;
#ifdef TF_LITE_TENSORFLOW_PROFILER
PauseHeapMonitoring(false);
#endif
return reallocated;
}
void ResizableAlignedBuffer::Release() {
if (buffer_.pointer == nullptr) {
return;
}
#ifdef TF_LITE_TENSORFLOW_PROFILER
OnTfLiteArenaDealloc(subgraph_index_, reinterpret_cast<std::uintptr_t>(this),
data_size_);
#endif
AlignedFree(buffer_);
buffer_.pointer = nullptr;
buffer_.aligned_pointer = nullptr;
data_size_ = 0;
}
void SimpleMemoryArena::PurgeAfter(int32_t node) {
for (int i = 0; i < active_allocs_.size(); ++i) {
if (active_allocs_[i].first_node > node) {
active_allocs_[i].tensor = -1;
}
}
active_allocs_.erase(
std::remove_if(active_allocs_.begin(), active_allocs_.end(),
[](ArenaAllocWithUsageInterval& alloc) {
return alloc.tensor == -1;
}),
active_allocs_.end());
}
void SimpleMemoryArena::PurgeActiveAllocs(int32_t node) {
for (int i = 0; i < active_allocs_.size(); ++i) {
if (active_allocs_[i].last_node < node) {
active_allocs_[i].tensor = -1;
}
}
active_allocs_.erase(
std::remove_if(active_allocs_.begin(), active_allocs_.end(),
[](ArenaAllocWithUsageInterval& alloc) {
return alloc.tensor == -1;
}),
active_allocs_.end());
}
void SimpleMemoryArena::CalculateActiveAllocs(
const std::vector<ArenaAllocWithUsageInterval>& allocs, int32_t node) {
active_allocs_.clear();
for (int i = 0; i < allocs.size(); ++i) {
if (allocs[i].first_node <= node && allocs[i].last_node >= node) {
active_allocs_.push_back(allocs[i]);
}
}
std::sort(active_allocs_.begin(), active_allocs_.end());
}
void SimpleMemoryArena::ResetAllocs() { active_allocs_.clear(); }
TfLiteStatus SimpleMemoryArena::Allocate(
TfLiteContext* context, size_t alignment, size_t size, int32_t tensor,
int32_t first_node, int32_t last_node,
ArenaAllocWithUsageInterval* new_alloc) {
TF_LITE_ENSURE(context, alignment <= underlying_buffer_.GetAlignment());
new_alloc->tensor = tensor;
new_alloc->first_node = first_node;
new_alloc->last_node = last_node;
new_alloc->size = size;
if (size == 0) {
new_alloc->offset = 0;
return kTfLiteOk;
}
const size_t kOffsetNotAssigned = std::numeric_limits<size_t>::max();
size_t best_offset = kOffsetNotAssigned;
size_t best_offset_fit = kOffsetNotAssigned;
size_t current_offset = 0;
for (const auto& alloc : active_allocs_) {
if (alloc.last_node < first_node || alloc.first_node > last_node) {
continue;
}
size_t aligned_current_offset = AlignTo(alignment, current_offset);
if (aligned_current_offset + size <= alloc.offset &&
alloc.offset - aligned_current_offset < best_offset_fit) {
best_offset = aligned_current_offset;
best_offset_fit = alloc.offset - current_offset;
}
current_offset = std::max(current_offset, alloc.offset + alloc.size);
if (best_offset_fit == 0) {
break;
}
}
if (best_offset == kOffsetNotAssigned) {
best_offset = AlignTo(alignment, current_offset);
}
high_water_mark_ = std::max(high_water_mark_, best_offset + size);
new_alloc->offset = best_offset;
auto insertion_it = std::upper_bound(active_allocs_.begin(),
active_allocs_.end(), *new_alloc);
active_allocs_.insert(insertion_it, *new_alloc);
return kTfLiteOk;
}
TfLiteStatus SimpleMemoryArena::Commit(bool* arena_reallocated) {
*arena_reallocated = underlying_buffer_.Resize(high_water_mark_);
committed_ = true;
return kTfLiteOk;
}
TfLiteStatus SimpleMemoryArena::ResolveAlloc(
TfLiteContext* context, const ArenaAllocWithUsageInterval& alloc,
char** output_ptr) {
TF_LITE_ENSURE(context, committed_);
TF_LITE_ENSURE(context, output_ptr != nullptr);
TF_LITE_ENSURE(context,
underlying_buffer_.GetSize() >= (alloc.offset + alloc.size));
if (alloc.size == 0) {
*output_ptr = nullptr;
} else {
*output_ptr = underlying_buffer_.GetPtr() + alloc.offset;
}
return kTfLiteOk;
}
TfLiteStatus SimpleMemoryArena::ClearPlan() {
committed_ = false;
high_water_mark_ = 0;
active_allocs_.clear();
return kTfLiteOk;
}
TfLiteStatus SimpleMemoryArena::ReleaseBuffer() {
committed_ = false;
underlying_buffer_.Release();
return kTfLiteOk;
}
TFLITE_ATTRIBUTE_WEAK void DumpArenaInfo(
const std::string& name, const std::vector<int>& execution_plan,
size_t arena_size, const std::vector<ArenaAllocWithUsageInterval>& allocs) {
}
void SimpleMemoryArena::DumpDebugInfo(
const std::string& name, const std::vector<int>& execution_plan) const {
tflite::DumpArenaInfo(name, execution_plan, underlying_buffer_.GetSize(),
active_allocs_);
}
} | #include "tensorflow/lite/simple_memory_arena.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace {
void ReportError(TfLiteContext* context, const char* format, ...) {}
TEST(SimpleMemoryArenaTest, BasicArenaOperations) {
TfLiteContext context;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[6];
arena.Allocate(&context, 32, 2047, 0, 1, 3, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 2, 5, &allocs[1]);
arena.Allocate(&context, 32, 2047, 2, 3, 6, &allocs[2]);
arena.Allocate(&context, 32, 2047, 3, 5, 6, &allocs[3]);
arena.Allocate(&context, 32, 1023, 4, 4, 6, &allocs[4]);
arena.Allocate(&context, 32, 1023, 5, 6, 6, &allocs[5]);
EXPECT_EQ(allocs[0].offset, 0);
EXPECT_EQ(allocs[1].offset, 2048);
EXPECT_EQ(allocs[2].offset, 4096);
EXPECT_EQ(allocs[3].offset, 0);
EXPECT_EQ(allocs[4].offset, 6144);
EXPECT_EQ(allocs[5].offset, 2048);
}
TEST(SimpleMemoryArenaTest, BasicZeroAlloc) {
TfLiteContext context;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval alloc;
ASSERT_EQ(arena.Allocate(&context, 32, 0, 0, 1, 2, &alloc), kTfLiteOk);
EXPECT_EQ(alloc.offset, 0);
EXPECT_EQ(alloc.size, 0);
char* resolved_ptr = nullptr;
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
EXPECT_FALSE(reallocated);
EXPECT_EQ(resolved_ptr, nullptr);
}
TEST(SimpleMemoryArenaTest, InterleavedZeroAlloc) {
TfLiteContext context;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[4];
ASSERT_EQ(arena.Allocate(&context, 32, 2047, 0, 0, 4, &allocs[0]), kTfLiteOk);
ASSERT_EQ(arena.Allocate(&context, 32, 0, 1, 1, 2, &allocs[1]), kTfLiteOk);
ASSERT_EQ(arena.Allocate(&context, 32, 1023, 2, 1, 2, &allocs[2]), kTfLiteOk);
ASSERT_EQ(arena.Allocate(&context, 32, 2047, 3, 3, 4, &allocs[3]), kTfLiteOk);
EXPECT_EQ(allocs[0].offset, 0);
EXPECT_EQ(allocs[1].offset, 0);
EXPECT_EQ(allocs[2].offset, 2048);
EXPECT_EQ(allocs[3].offset, 2048);
}
TEST(SimpleMemoryArenaTest, TestClearPlan) {
TfLiteContext context;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[9];
arena.Allocate(&context, 32, 2047, 0, 0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 1, 2, &allocs[1]);
arena.Allocate(&context, 32, 2047, 2, 1, 2, &allocs[2]);
bool reallocated = false;
arena.Commit(&reallocated);
ASSERT_TRUE(reallocated);
EXPECT_EQ(allocs[0].offset, 0);
EXPECT_EQ(allocs[1].offset, 2048);
EXPECT_EQ(allocs[2].offset, 4096);
arena.ClearPlan();
arena.Allocate(&context, 32, 1023, 3, 0, 2, &allocs[3]);
arena.Allocate(&context, 32, 1023, 4, 1, 2, &allocs[4]);
arena.Allocate(&context, 32, 1023, 5, 1, 2, &allocs[5]);
arena.Commit(&reallocated);
ASSERT_FALSE(reallocated);
EXPECT_EQ(allocs[3].offset, 0);
EXPECT_EQ(allocs[4].offset, 1024);
EXPECT_EQ(allocs[5].offset, 2048);
arena.ClearPlan();
arena.Allocate(&context, 32, 4095, 6, 0, 2, &allocs[6]);
arena.Allocate(&context, 32, 4095, 7, 1, 2, &allocs[7]);
arena.Allocate(&context, 32, 4095, 8, 1, 2, &allocs[8]);
arena.Commit(&reallocated);
ASSERT_TRUE(reallocated);
EXPECT_EQ(allocs[6].offset, 0);
EXPECT_EQ(allocs[7].offset, 4096);
EXPECT_EQ(allocs[8].offset, 8192);
}
TEST(SimpleMemoryArenaTest, TestPurgeAllocs) {
TfLiteContext context;
context.ReportError = ReportError;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[5];
arena.Allocate(&context, 32, 2047, 0,
0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1,
1, 2, &allocs[1]);
arena.Allocate(&context, 32, 2047, 2,
2, 3, &allocs[2]);
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
char* resolved_ptr0 = nullptr;
char* resolved_ptr1 = nullptr;
char* resolved_ptr2 = nullptr;
char* resolved_ptr3 = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr0), kTfLiteOk);
EXPECT_NE(resolved_ptr0, nullptr);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr1), kTfLiteOk);
EXPECT_EQ(resolved_ptr1, resolved_ptr0 + 2048);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[2], &resolved_ptr2), kTfLiteOk);
EXPECT_EQ(resolved_ptr2, resolved_ptr1 + 2048);
arena.PurgeActiveAllocs(4);
arena.Allocate(&context, 32, 13, 3,
4, 5, &allocs[4]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[4], &resolved_ptr3), kTfLiteOk);
ASSERT_EQ(allocs[4].offset, 0);
arena.Allocate(&context, 32, 2047, 0,
0, 2, &allocs[0]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[3], &resolved_ptr3), kTfLiteOk);
ASSERT_EQ(allocs[0].offset, 0);
}
TEST(SimpleMemoryArenaTest, TestResetAllocs) {
TfLiteContext context;
context.ReportError = ReportError;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[9];
arena.Allocate(&context, 32, 2047, 0,
0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1,
1, 2, &allocs[1]);
arena.Allocate(&context, 32, 2047, 2,
2, 3, &allocs[2]);
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
char* resolved_ptr0 = nullptr;
char* resolved_ptr1 = nullptr;
char* resolved_ptr2 = nullptr;
char* resolved_ptr3 = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr0), kTfLiteOk);
EXPECT_NE(resolved_ptr0, nullptr);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr1), kTfLiteOk);
EXPECT_EQ(resolved_ptr1, resolved_ptr0 + 2048);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[2], &resolved_ptr2), kTfLiteOk);
EXPECT_EQ(resolved_ptr2, resolved_ptr1 + 2048);
arena.Allocate(&context, 32, 13, 0,
0, 3, &allocs[3]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr0), kTfLiteOk);
EXPECT_NE(resolved_ptr0, nullptr);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr1), kTfLiteOk);
EXPECT_EQ(resolved_ptr1, resolved_ptr0 + 2048);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[2], &resolved_ptr2), kTfLiteOk);
EXPECT_EQ(resolved_ptr2, resolved_ptr1 + 2048);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[3], &resolved_ptr3), kTfLiteOk);
EXPECT_EQ(resolved_ptr3, resolved_ptr2 + 2048);
arena.ResetAllocs();
arena.Allocate(&context, 32, 13, 0,
0, 2, &allocs[3]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[3], &resolved_ptr3), kTfLiteOk);
ASSERT_EQ(allocs[3].offset, 0);
}
TEST(SimpleMemoryArenaTest, TestClearBuffer) {
TfLiteContext context;
context.ReportError = ReportError;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[9];
arena.Allocate(&context, 32, 2047, 0, 0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 1, 2, &allocs[1]);
ASSERT_EQ(arena.ReleaseBuffer(), kTfLiteOk);
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
char* resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
ASSERT_EQ(arena.ReleaseBuffer(), kTfLiteOk);
ASSERT_EQ(arena.BasePointer(), 0);
ASSERT_NE(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr), kTfLiteOk);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
ASSERT_NE(arena.BasePointer(), 0);
resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
}
class BufferAndPlanClearingTest : public ::testing::Test,
public ::testing::WithParamInterface<bool> {};
TEST_P(BufferAndPlanClearingTest, TestClearBufferAndClearPlan) {
TfLiteContext context;
context.ReportError = ReportError;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[9];
arena.Allocate(&context, 32, 2047, 0, 0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 1, 2, &allocs[1]);
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
if (GetParam()) {
ASSERT_EQ(arena.ReleaseBuffer(), kTfLiteOk);
ASSERT_EQ(arena.ClearPlan(), kTfLiteOk);
} else {
ASSERT_EQ(arena.ClearPlan(), kTfLiteOk);
ASSERT_EQ(arena.ReleaseBuffer(), kTfLiteOk);
}
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
EXPECT_FALSE(reallocated);
char* resolved_ptr = nullptr;
ASSERT_NE(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr), kTfLiteOk);
arena.Allocate(&context, 32, 2047, 0, 0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 1, 2, &allocs[1]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
}
INSTANTIATE_TEST_SUITE_P(BufferAndPlanClearingTest, BufferAndPlanClearingTest,
::testing::Values(true, false));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/simple_memory_arena.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/simple_memory_arena_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9ec2ecf4-73bd-4fdb-aa81-263d6baa14ff | cpp | tensorflow/tensorflow | outfeed_thunk | third_party/xla/xla/service/gpu/runtime/outfeed_thunk.cc | third_party/xla/xla/backends/cpu/runtime/outfeed_thunk_test.cc | #include "xla/service/gpu/runtime/outfeed_thunk.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/gpu_transfer_manager.h"
#include "xla/service/gpu/outfeed_manager.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
OutfeedThunk::OutfeedThunk(ThunkInfo thunk_info,
std::vector<ShapedSlice> source_slices)
: Thunk(Kind::kOutfeed, thunk_info),
source_slices_(std::move(source_slices)) {}
absl::Status OutfeedThunk::ExecuteOnStream(const ExecuteParams& params) {
se::Stream& stream = *params.stream;
const BufferAllocations& buffer_allocations = *params.buffer_allocations;
VLOG(2) << "Outfeeding from GPU";
OutfeedManager* outfeed_manager =
GpuTransferManager::GetOrCreateOutfeedManager(stream.parent());
ShapeTree<std::unique_ptr<OutfeedBuffer>>* output_buffers =
outfeed_manager->BlockingGetNextDestination();
if (source_slices_.empty()) {
return absl::OkStatus();
}
const int64_t leaf_count = output_buffers->leaf_count();
TF_RET_CHECK(source_slices_.size() == leaf_count)
<< "Mismatch between number of outfeed inputs (" << source_slices_.size()
<< ") and outputs (" << leaf_count << ")";
auto output_leaf_it = output_buffers->leaf_begin();
for (int64_t index = 0; index < leaf_count; ++index) {
const ShapeIndex& shape_index = output_leaf_it->first;
std::unique_ptr<OutfeedBuffer>& buffer = output_leaf_it->second;
++output_leaf_it;
const Shape& output_shape =
ShapeUtil::GetSubshape(output_buffers->shape(), shape_index);
TF_RET_CHECK(
ShapeUtil::ReshapeIsBitcast(source_slices_[index].shape, output_shape))
<< "Mismatch between outfeed output buffer shape "
<< ShapeUtil::HumanStringWithLayout(output_shape)
<< " and outfeed source buffer shape "
<< ShapeUtil::HumanStringWithLayout(source_slices_[index].shape);
BufferAllocation::Slice source_slice = source_slices_[index].slice;
if (!source_slice.allocation())
return Internal("outfeed source missing buffer allocation");
se::DeviceMemoryBase data_address =
buffer_allocations.GetDeviceAddress(source_slice);
TF_RETURN_IF_ERROR(stream.Memcpy(buffer->destination()->untyped_data(),
data_address, buffer->length()));
TF_RETURN_IF_ERROR(stream.DoHostCallback([&buffer]() { buffer->Done(); }));
}
absl::Status block_status = stream.BlockHostUntilDone();
if (!block_status.ok()) {
return Internal("Failed to complete data transfer on stream %p: %s",
&stream, block_status.message());
}
VLOG(2) << "Outfeeding from GPU complete";
return absl::OkStatus();
}
}
} | #include "xla/backends/cpu/runtime/outfeed_thunk.h"
#include <memory>
#include "xla/backends/cpu/runtime/resource_use.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(OutfeedThunkTest, BufferAndResourceUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice outfeed_slice(&alloc, 10, 40);
OutfeedThunk::OutfeedBuffer outfeed_buffer = {
outfeed_slice,
ShapeUtil::MakeShape(F32, {10}),
};
auto consume_token = Resource::Create(Resource::kToken);
auto produce_token = Resource::Create(Resource::kToken);
TF_ASSERT_OK_AND_ASSIGN(auto thunk,
OutfeedThunk::Create({"outfeed"}, {outfeed_buffer},
{consume_token, produce_token}));
EXPECT_EQ(thunk->buffer_uses().size(), 1);
EXPECT_EQ(thunk->buffer_uses()[0], BufferUse::Read(outfeed_slice));
EXPECT_EQ(thunk->resource_uses().size(), 2);
EXPECT_EQ(thunk->resource_uses()[0], ResourceUse::Read(consume_token));
EXPECT_EQ(thunk->resource_uses()[1], ResourceUse::Write(produce_token));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/outfeed_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/outfeed_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
18bf2c41-fa73-4265-8160-56ff07857792 | cpp | tensorflow/tensorflow | regex_replace_op | tensorflow/core/kernels/regex_replace_op.cc | tensorflow/core/kernels/regex_replace_op_test.cc | #include <string>
#include "re2/re2.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
namespace {
Status InternalCompute(const RE2& regex, const string& rewrite,
const bool replace_global, OpKernelContext* ctx) {
const Tensor* input_tensor;
TF_RETURN_IF_ERROR(ctx->input("input", &input_tensor));
Tensor* output_tensor;
std::unique_ptr<Tensor> maybe_forwarded =
ctx->forward_input(0 , 0 ,
tensorflow::DT_STRING, input_tensor->shape(),
ctx->input_memory_type(0), ctx->input_alloc_attr(0));
if (maybe_forwarded) {
output_tensor = maybe_forwarded.get();
TF_RETURN_IF_ERROR(ctx->set_output("output", *output_tensor));
} else {
TF_RETURN_IF_ERROR(
ctx->allocate_output("output", input_tensor->shape(), &output_tensor));
output_tensor->flat<tstring>() = input_tensor->flat<tstring>();
}
auto output_flat = output_tensor->flat<tstring>();
for (size_t i = 0; i < output_flat.size(); ++i) {
string buf = output_flat(i);
if (replace_global) {
RE2::GlobalReplace(&buf, regex, rewrite);
} else {
RE2::Replace(&buf, regex, rewrite);
}
output_flat(i) = std::move(buf);
}
return absl::OkStatus();
}
}
class RegexReplaceOp : public OpKernel {
public:
explicit RegexReplaceOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("replace_global", &replace_global_));
}
~RegexReplaceOp() override {}
void Compute(OpKernelContext* ctx) override {
const Tensor* pattern_tensor;
OP_REQUIRES_OK(ctx, ctx->input("pattern", &pattern_tensor));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(pattern_tensor->shape()),
errors::InvalidArgument("Pattern must be scalar, but received ",
pattern_tensor->shape().DebugString()));
const string& pattern = pattern_tensor->scalar<tstring>()();
std::shared_ptr<RE2> regex = CachedRE2(pattern);
OP_REQUIRES(ctx, regex->ok(),
errors::InvalidArgument("Invalid pattern: ", pattern,
", error: ", regex->error()));
const Tensor* rewrite_tensor;
OP_REQUIRES_OK(ctx, ctx->input("rewrite", &rewrite_tensor));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(rewrite_tensor->shape()),
errors::InvalidArgument("Rewrite must be scalar, but received ",
rewrite_tensor->shape().DebugString()));
const string& rewrite = rewrite_tensor->scalar<tstring>()();
OP_REQUIRES_OK(ctx, InternalCompute(*regex, rewrite, replace_global_, ctx));
}
private:
std::shared_ptr<RE2> CachedRE2(const string& pattern) {
{
tf_shared_lock l(mu_);
if (regex_ != nullptr && regex_->pattern() == pattern) {
return regex_;
}
}
auto regex = std::make_shared<RE2>(pattern);
{
mutex_lock l(mu_);
regex_.swap(regex);
return regex_;
}
}
bool replace_global_;
mutex mu_;
std::shared_ptr<RE2> regex_ TF_GUARDED_BY(mu_);
RegexReplaceOp(const RegexReplaceOp&) = delete;
void operator=(const RegexReplaceOp&) = delete;
};
REGISTER_KERNEL_BUILDER(Name("RegexReplace").Device(DEVICE_CPU),
RegexReplaceOp);
class StaticRegexReplaceOp : public OpKernel {
public:
explicit StaticRegexReplaceOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
string pattern;
OP_REQUIRES_OK(ctx, ctx->GetAttr("pattern", &pattern));
re_ = std::make_unique<RE2>(pattern);
OP_REQUIRES(ctx, re_->ok(),
errors::InvalidArgument("Invalid pattern: ", pattern,
", error: ", re_->error()));
OP_REQUIRES_OK(ctx, ctx->GetAttr("rewrite", &rewrite_str_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("replace_global", &replace_global_));
}
void Compute(OpKernelContext* ctx) override {
OP_REQUIRES_OK(ctx,
InternalCompute(*re_, rewrite_str_, replace_global_, ctx));
}
private:
std::unique_ptr<RE2> re_;
string rewrite_str_;
bool replace_global_;
};
REGISTER_KERNEL_BUILDER(Name("StaticRegexReplace").Device(DEVICE_CPU),
StaticRegexReplaceOp);
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
const char* lines[] = {
"**TensorFlow** is an open source software library for numerical "
"computation using data flow graphs.",
"The graph nodes represent mathematical operations, while the graph edges "
"represent the multidimensional data arrays (tensors) that flow between "
"them.",
"This flexible architecture enables you to deploy computation to one or "
"more CPUs or GPUs in a desktop, server, or mobile device without "
"rewriting code.",
"TensorFlow also includes "
"[TensorBoard](https:
"summaries_and_tensorboard), a data visualization toolkit.",
"TensorFlow was originally developed by researchers and engineers working "
"on the Google Brain team within Google's Machine Intelligence Research "
"organization for the purposes of conducting machine learning and deep "
"neural networks research.",
"The system is general enough to be applicable in a wide variety of other "
"domains, as well.",
"TensorFlow provides stable Python API and C APIs as well as without API "
"backwards compatibility guarantee like C++, Go, Java, JavaScript and "
"Swift."};
const char kRegExPattern[] = "\\p{P}";
const char kRewrite[] = " ";
Tensor GetTestTensor(int batch) {
const int sz = TF_ARRAYSIZE(lines);
Tensor t(DT_STRING, {batch});
auto s = t.flat<tstring>();
for (int i = 0; i < batch; ++i) {
s(i) = lines[i % sz];
}
return t;
}
Graph* SetupRegexReplaceGraph(const Tensor& input, const string& input_pattern,
const string& input_rewrite) {
Graph* g = new Graph(OpRegistry::Global());
Tensor pattern(DT_STRING, TensorShape({}));
pattern.flat<tstring>().setConstant(input_pattern);
Tensor rewrite(DT_STRING, TensorShape({}));
rewrite.flat<tstring>().setConstant(input_rewrite);
TF_CHECK_OK(NodeBuilder("regex_replace_op", "RegexReplace")
.Input(test::graph::Constant(g, input))
.Input(test::graph::Constant(g, pattern))
.Input(test::graph::Constant(g, rewrite))
.Attr("replace_global", true)
.Finalize(g, nullptr ));
return g;
}
static void BM_RegexReplace(::testing::benchmark::State& state) {
const int batch_size = state.range(0);
Tensor input = GetTestTensor(batch_size);
Graph* g = SetupRegexReplaceGraph(input, kRegExPattern, kRewrite);
test::Benchmark("cpu", g, false).Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_RegexReplace)
->UseRealTime()
->Arg(1)
->Arg(8)
->Arg(16)
->Arg(32)
->Arg(64)
->Arg(128)
->Arg(256);
Graph* SetupStaticGraph(const Tensor& input, const string& input_pattern,
const string& rewrite) {
Graph* g = new Graph(OpRegistry::Global());
TF_CHECK_OK(NodeBuilder("static_regex_replace_op", "StaticRegexReplace")
.Attr("pattern", input_pattern)
.Attr("rewrite", rewrite)
.Input(test::graph::Constant(g, input))
.Attr("replace_global", true)
.Finalize(g, nullptr ));
return g;
}
static void BM_StaticRegexReplace(::testing::benchmark::State& state) {
const int batch_size = state.range(0);
Tensor input = GetTestTensor(batch_size);
Graph* g = SetupStaticGraph(input, kRegExPattern, kRewrite);
test::Benchmark("cpu", g, false).Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_StaticRegexReplace)
->UseRealTime()
->Arg(1)
->Arg(8)
->Arg(16)
->Arg(32)
->Arg(64)
->Arg(128)
->Arg(256);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/regex_replace_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/regex_replace_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a89dbaf3-8d69-492f-b61a-85d389447364 | cpp | google/quiche | moqt_parser | quiche/quic/moqt/moqt_parser.cc | quiche/quic/moqt/moqt_parser_test.cc | #include "quiche/quic/moqt/moqt_parser.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <optional>
#include <string>
#include "absl/cleanup/cleanup.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_data_reader.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/moqt/moqt_messages.h"
#include "quiche/quic/moqt/moqt_priority.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace moqt {
namespace {
bool ParseDeliveryOrder(uint8_t raw_value,
std::optional<MoqtDeliveryOrder>& output) {
switch (raw_value) {
case 0x00:
output = std::nullopt;
return true;
case 0x01:
output = MoqtDeliveryOrder::kAscending;
return true;
case 0x02:
output = MoqtDeliveryOrder::kDescending;
return true;
default:
return false;
}
}
uint64_t SignedVarintUnserializedForm(uint64_t value) {
if (value & 0x01) {
return -(value >> 1);
}
return value >> 1;
}
bool IsAllowedStreamType(uint64_t value) {
constexpr std::array kAllowedStreamTypes = {
MoqtDataStreamType::kStreamHeaderSubgroup,
MoqtDataStreamType::kStreamHeaderTrack, MoqtDataStreamType::kPadding};
for (MoqtDataStreamType type : kAllowedStreamTypes) {
if (static_cast<uint64_t>(type) == value) {
return true;
}
}
return false;
}
size_t ParseObjectHeader(quic::QuicDataReader& reader, MoqtObject& object,
MoqtDataStreamType type) {
if (!reader.ReadVarInt62(&object.subscribe_id) ||
!reader.ReadVarInt62(&object.track_alias)) {
return 0;
}
if (type != MoqtDataStreamType::kStreamHeaderTrack &&
!reader.ReadVarInt62(&object.group_id)) {
return 0;
}
if (type == MoqtDataStreamType::kStreamHeaderSubgroup) {
uint64_t subgroup_id;
if (!reader.ReadVarInt62(&subgroup_id)) {
return 0;
}
object.subgroup_id = subgroup_id;
}
if (type == MoqtDataStreamType::kObjectDatagram &&
!reader.ReadVarInt62(&object.object_id)) {
return 0;
}
if (!reader.ReadUInt8(&object.publisher_priority)) {
return 0;
}
uint64_t status = static_cast<uint64_t>(MoqtObjectStatus::kNormal);
if (type == MoqtDataStreamType::kObjectDatagram &&
(!reader.ReadVarInt62(&object.payload_length) ||
(object.payload_length == 0 && !reader.ReadVarInt62(&status)))) {
return 0;
}
object.object_status = IntegerToObjectStatus(status);
object.forwarding_preference = GetForwardingPreference(type);
return reader.PreviouslyReadPayload().size();
}
size_t ParseObjectSubheader(quic::QuicDataReader& reader, MoqtObject& object,
MoqtDataStreamType type) {
switch (type) {
case MoqtDataStreamType::kStreamHeaderTrack:
if (!reader.ReadVarInt62(&object.group_id)) {
return 0;
}
[[fallthrough]];
case MoqtDataStreamType::kStreamHeaderSubgroup: {
if (!reader.ReadVarInt62(&object.object_id) ||
!reader.ReadVarInt62(&object.payload_length)) {
return 0;
}
uint64_t status = static_cast<uint64_t>(MoqtObjectStatus::kNormal);
if (object.payload_length == 0 && !reader.ReadVarInt62(&status)) {
return 0;
}
object.object_status = IntegerToObjectStatus(status);
return reader.PreviouslyReadPayload().size();
}
default:
QUICHE_NOTREACHED();
return 0;
}
}
}
void MoqtControlParser::ProcessData(absl::string_view data, bool fin) {
if (no_more_data_) {
ParseError("Data after end of stream");
}
if (processing_) {
return;
}
processing_ = true;
auto on_return = absl::MakeCleanup([&] { processing_ = false; });
if (fin) {
no_more_data_ = true;
if (!buffered_message_.empty() && data.empty()) {
ParseError("End of stream before complete message");
return;
}
}
std::optional<quic::QuicDataReader> reader = std::nullopt;
size_t original_buffer_size = buffered_message_.size();
if (!buffered_message_.empty()) {
absl::StrAppend(&buffered_message_, data);
reader.emplace(buffered_message_);
} else {
reader.emplace(data);
}
size_t total_processed = 0;
while (!reader->IsDoneReading()) {
size_t message_len = ProcessMessage(reader->PeekRemainingPayload());
if (message_len == 0) {
if (reader->BytesRemaining() > kMaxMessageHeaderSize) {
ParseError(MoqtError::kInternalError,
"Cannot parse non-OBJECT messages > 2KB");
return;
}
if (fin) {
ParseError("FIN after incomplete message");
return;
}
if (buffered_message_.empty()) {
absl::StrAppend(&buffered_message_, reader->PeekRemainingPayload());
}
break;
}
total_processed += message_len;
reader->Seek(message_len);
}
if (original_buffer_size > 0) {
buffered_message_.erase(0, total_processed);
}
}
size_t MoqtControlParser::ProcessMessage(absl::string_view data) {
uint64_t value;
quic::QuicDataReader reader(data);
if (!reader.ReadVarInt62(&value)) {
return 0;
}
auto type = static_cast<MoqtMessageType>(value);
switch (type) {
case MoqtMessageType::kClientSetup:
return ProcessClientSetup(reader);
case MoqtMessageType::kServerSetup:
return ProcessServerSetup(reader);
case MoqtMessageType::kSubscribe:
return ProcessSubscribe(reader);
case MoqtMessageType::kSubscribeOk:
return ProcessSubscribeOk(reader);
case MoqtMessageType::kSubscribeError:
return ProcessSubscribeError(reader);
case MoqtMessageType::kUnsubscribe:
return ProcessUnsubscribe(reader);
case MoqtMessageType::kSubscribeDone:
return ProcessSubscribeDone(reader);
case MoqtMessageType::kSubscribeUpdate:
return ProcessSubscribeUpdate(reader);
case MoqtMessageType::kAnnounce:
return ProcessAnnounce(reader);
case MoqtMessageType::kAnnounceOk:
return ProcessAnnounceOk(reader);
case MoqtMessageType::kAnnounceError:
return ProcessAnnounceError(reader);
case MoqtMessageType::kAnnounceCancel:
return ProcessAnnounceCancel(reader);
case MoqtMessageType::kTrackStatusRequest:
return ProcessTrackStatusRequest(reader);
case MoqtMessageType::kUnannounce:
return ProcessUnannounce(reader);
case MoqtMessageType::kTrackStatus:
return ProcessTrackStatus(reader);
case MoqtMessageType::kGoAway:
return ProcessGoAway(reader);
case MoqtMessageType::kSubscribeNamespace:
return ProcessSubscribeNamespace(reader);
case MoqtMessageType::kSubscribeNamespaceOk:
return ProcessSubscribeNamespaceOk(reader);
case MoqtMessageType::kSubscribeNamespaceError:
return ProcessSubscribeNamespaceError(reader);
case MoqtMessageType::kUnsubscribeNamespace:
return ProcessUnsubscribeNamespace(reader);
case MoqtMessageType::kMaxSubscribeId:
return ProcessMaxSubscribeId(reader);
case moqt::MoqtMessageType::kObjectAck:
return ProcessObjectAck(reader);
default:
ParseError("Unknown message type");
}
return 0;
}
size_t MoqtControlParser::ProcessClientSetup(quic::QuicDataReader& reader) {
MoqtClientSetup setup;
uint64_t number_of_supported_versions;
if (!reader.ReadVarInt62(&number_of_supported_versions)) {
return 0;
}
uint64_t version;
for (uint64_t i = 0; i < number_of_supported_versions; ++i) {
if (!reader.ReadVarInt62(&version)) {
return 0;
}
setup.supported_versions.push_back(static_cast<MoqtVersion>(version));
}
uint64_t num_params;
if (!reader.ReadVarInt62(&num_params)) {
return 0;
}
for (uint64_t i = 0; i < num_params; ++i) {
uint64_t type;
absl::string_view value;
if (!ReadParameter(reader, type, value)) {
return 0;
}
auto key = static_cast<MoqtSetupParameter>(type);
switch (key) {
case MoqtSetupParameter::kRole:
if (setup.role.has_value()) {
ParseError("ROLE parameter appears twice in SETUP");
return 0;
}
uint64_t index;
if (!StringViewToVarInt(value, index)) {
return 0;
}
if (index > static_cast<uint64_t>(MoqtRole::kRoleMax)) {
ParseError("Invalid ROLE parameter");
return 0;
}
setup.role = static_cast<MoqtRole>(index);
break;
case MoqtSetupParameter::kPath:
if (uses_web_transport_) {
ParseError(
"WebTransport connection is using PATH parameter in SETUP");
return 0;
}
if (setup.path.has_value()) {
ParseError("PATH parameter appears twice in CLIENT_SETUP");
return 0;
}
setup.path = value;
break;
case MoqtSetupParameter::kMaxSubscribeId:
if (setup.max_subscribe_id.has_value()) {
ParseError("MAX_SUBSCRIBE_ID parameter appears twice in SETUP");
return 0;
}
uint64_t max_id;
if (!StringViewToVarInt(value, max_id)) {
ParseError("MAX_SUBSCRIBE_ID parameter is not a valid varint");
return 0;
}
setup.max_subscribe_id = max_id;
break;
case MoqtSetupParameter::kSupportObjectAcks:
uint64_t flag;
if (!StringViewToVarInt(value, flag) || flag > 1) {
ParseError("Invalid kSupportObjectAcks value");
return 0;
}
setup.supports_object_ack = static_cast<bool>(flag);
break;
default:
break;
}
}
if (!setup.role.has_value()) {
ParseError("ROLE parameter missing from CLIENT_SETUP message");
return 0;
}
if (!uses_web_transport_ && !setup.path.has_value()) {
ParseError("PATH SETUP parameter missing from Client message over QUIC");
return 0;
}
visitor_.OnClientSetupMessage(setup);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessServerSetup(quic::QuicDataReader& reader) {
MoqtServerSetup setup;
uint64_t version;
if (!reader.ReadVarInt62(&version)) {
return 0;
}
setup.selected_version = static_cast<MoqtVersion>(version);
uint64_t num_params;
if (!reader.ReadVarInt62(&num_params)) {
return 0;
}
for (uint64_t i = 0; i < num_params; ++i) {
uint64_t type;
absl::string_view value;
if (!ReadParameter(reader, type, value)) {
return 0;
}
auto key = static_cast<MoqtSetupParameter>(type);
switch (key) {
case MoqtSetupParameter::kRole:
if (setup.role.has_value()) {
ParseError("ROLE parameter appears twice in SETUP");
return 0;
}
uint64_t index;
if (!StringViewToVarInt(value, index)) {
return 0;
}
if (index > static_cast<uint64_t>(MoqtRole::kRoleMax)) {
ParseError("Invalid ROLE parameter");
return 0;
}
setup.role = static_cast<MoqtRole>(index);
break;
case MoqtSetupParameter::kPath:
ParseError("PATH parameter in SERVER_SETUP");
return 0;
case MoqtSetupParameter::kMaxSubscribeId:
if (setup.max_subscribe_id.has_value()) {
ParseError("MAX_SUBSCRIBE_ID parameter appears twice in SETUP");
return 0;
}
uint64_t max_id;
if (!StringViewToVarInt(value, max_id)) {
ParseError("MAX_SUBSCRIBE_ID parameter is not a valid varint");
return 0;
}
setup.max_subscribe_id = max_id;
break;
case MoqtSetupParameter::kSupportObjectAcks:
uint64_t flag;
if (!StringViewToVarInt(value, flag) || flag > 1) {
ParseError("Invalid kSupportObjectAcks value");
return 0;
}
setup.supports_object_ack = static_cast<bool>(flag);
break;
default:
break;
}
}
if (!setup.role.has_value()) {
ParseError("ROLE parameter missing from SERVER_SETUP message");
return 0;
}
visitor_.OnServerSetupMessage(setup);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessSubscribe(quic::QuicDataReader& reader) {
MoqtSubscribe subscribe_request;
uint64_t filter, group, object;
uint8_t group_order;
absl::string_view track_name;
if (!reader.ReadVarInt62(&subscribe_request.subscribe_id) ||
!reader.ReadVarInt62(&subscribe_request.track_alias) ||
!ReadTrackNamespace(reader, subscribe_request.full_track_name) ||
!reader.ReadStringPieceVarInt62(&track_name) ||
!reader.ReadUInt8(&subscribe_request.subscriber_priority) ||
!reader.ReadUInt8(&group_order) || !reader.ReadVarInt62(&filter)) {
return 0;
}
subscribe_request.full_track_name.AddElement(track_name);
if (!ParseDeliveryOrder(group_order, subscribe_request.group_order)) {
ParseError("Invalid group order value in SUBSCRIBE message");
return 0;
}
MoqtFilterType filter_type = static_cast<MoqtFilterType>(filter);
switch (filter_type) {
case MoqtFilterType::kLatestGroup:
subscribe_request.start_object = 0;
break;
case MoqtFilterType::kLatestObject:
break;
case MoqtFilterType::kAbsoluteStart:
case MoqtFilterType::kAbsoluteRange:
if (!reader.ReadVarInt62(&group) || !reader.ReadVarInt62(&object)) {
return 0;
}
subscribe_request.start_group = group;
subscribe_request.start_object = object;
if (filter_type == MoqtFilterType::kAbsoluteStart) {
break;
}
if (!reader.ReadVarInt62(&group) || !reader.ReadVarInt62(&object)) {
return 0;
}
subscribe_request.end_group = group;
if (subscribe_request.end_group < subscribe_request.start_group) {
ParseError("End group is less than start group");
return 0;
}
if (object == 0) {
subscribe_request.end_object = std::nullopt;
} else {
subscribe_request.end_object = object - 1;
if (subscribe_request.start_group == subscribe_request.end_group &&
subscribe_request.end_object < subscribe_request.start_object) {
ParseError("End object comes before start object");
return 0;
}
}
break;
default:
ParseError("Invalid filter type");
return 0;
}
if (!ReadSubscribeParameters(reader, subscribe_request.parameters)) {
return 0;
}
visitor_.OnSubscribeMessage(subscribe_request);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessSubscribeOk(quic::QuicDataReader& reader) {
MoqtSubscribeOk subscribe_ok;
uint64_t milliseconds;
uint8_t group_order;
uint8_t content_exists;
if (!reader.ReadVarInt62(&subscribe_ok.subscribe_id) ||
!reader.ReadVarInt62(&milliseconds) || !reader.ReadUInt8(&group_order) ||
!reader.ReadUInt8(&content_exists)) {
return 0;
}
if (content_exists > 1) {
ParseError("SUBSCRIBE_OK ContentExists has invalid value");
return 0;
}
if (group_order != 0x01 && group_order != 0x02) {
ParseError("Invalid group order value in SUBSCRIBE_OK");
return 0;
}
subscribe_ok.expires = quic::QuicTimeDelta::FromMilliseconds(milliseconds);
subscribe_ok.group_order = static_cast<MoqtDeliveryOrder>(group_order);
if (content_exists) {
subscribe_ok.largest_id = FullSequence();
if (!reader.ReadVarInt62(&subscribe_ok.largest_id->group) ||
!reader.ReadVarInt62(&subscribe_ok.largest_id->object)) {
return 0;
}
}
if (!ReadSubscribeParameters(reader, subscribe_ok.parameters)) {
return 0;
}
if (subscribe_ok.parameters.authorization_info.has_value()) {
ParseError("SUBSCRIBE_OK has authorization info");
return 0;
}
visitor_.OnSubscribeOkMessage(subscribe_ok);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessSubscribeError(quic::QuicDataReader& reader) {
MoqtSubscribeError subscribe_error;
uint64_t error_code;
if (!reader.ReadVarInt62(&subscribe_error.subscribe_id) ||
!reader.ReadVarInt62(&error_code) ||
!reader.ReadStringVarInt62(subscribe_error.reason_phrase) ||
!reader.ReadVarInt62(&subscribe_error.track_alias)) {
return 0;
}
subscribe_error.error_code = static_cast<SubscribeErrorCode>(error_code);
visitor_.OnSubscribeErrorMessage(subscribe_error);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessUnsubscribe(quic::QuicDataReader& reader) {
MoqtUnsubscribe unsubscribe;
if (!reader.ReadVarInt62(&unsubscribe.subscribe_id)) {
return 0;
}
visitor_.OnUnsubscribeMessage(unsubscribe);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessSubscribeDone(quic::QuicDataReader& reader) {
MoqtSubscribeDone subscribe_done;
uint8_t content_exists;
uint64_t value;
if (!reader.ReadVarInt62(&subscribe_done.subscribe_id) ||
!reader.ReadVarInt62(&value) ||
!reader.ReadStringVarInt62(subscribe_done.reason_phrase) ||
!reader.ReadUInt8(&content_exists)) {
return 0;
}
subscribe_done.status_code = static_cast<SubscribeDoneCode>(value);
if (content_exists > 1) {
ParseError("SUBSCRIBE_DONE ContentExists has invalid value");
return 0;
}
if (content_exists == 1) {
subscribe_done.final_id = FullSequence();
if (!reader.ReadVarInt62(&subscribe_done.final_id->group) ||
!reader.ReadVarInt62(&subscribe_done.final_id->object)) {
return 0;
}
}
visitor_.OnSubscribeDoneMessage(subscribe_done);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessSubscribeUpdate(quic::QuicDataReader& reader) {
MoqtSubscribeUpdate subscribe_update;
uint64_t end_group, end_object;
if (!reader.ReadVarInt62(&subscribe_update.subscribe_id) ||
!reader.ReadVarInt62(&subscribe_update.start_group) ||
!reader.ReadVarInt62(&subscribe_update.start_object) ||
!reader.ReadVarInt62(&end_group) || !reader.ReadVarInt62(&end_object) ||
!reader.ReadUInt8(&subscribe_update.subscriber_priority)) {
return 0;
}
if (!ReadSubscribeParameters(reader, subscribe_update.parameters)) {
return 0;
}
if (end_group == 0) {
if (end_object > 0) {
ParseError("SUBSCRIBE_UPDATE has end_object but no end_group");
return 0;
}
} else {
subscribe_update.end_group = end_group - 1;
if (subscribe_update.end_group < subscribe_update.start_group) {
ParseError("End group is less than start group");
return 0;
}
}
if (end_object > 0) {
subscribe_update.end_object = end_object - 1;
if (subscribe_update.end_object.has_value() &&
subscribe_update.start_group == *subscribe_update.end_group &&
*subscribe_update.end_object < subscribe_update.start_object) {
ParseError("End object comes before start object");
return 0;
}
} else {
subscribe_update.end_object = std::nullopt;
}
if (subscribe_update.parameters.authorization_info.has_value()) {
ParseError("SUBSCRIBE_UPDATE has authorization info");
return 0;
}
visitor_.OnSubscribeUpdateMessage(subscribe_update);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessAnnounce(quic::QuicDataReader& reader) {
MoqtAnnounce announce;
if (!ReadTrackNamespace(reader, announce.track_namespace)) {
return 0;
}
if (!ReadSubscribeParameters(reader, announce.parameters)) {
return 0;
}
if (announce.parameters.delivery_timeout.has_value()) {
ParseError("ANNOUNCE has delivery timeout");
return 0;
}
visitor_.OnAnnounceMessage(announce);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessAnnounceOk(quic::QuicDataReader& reader) {
MoqtAnnounceOk announce_ok;
if (!ReadTrackNamespace(reader, announce_ok.track_namespace)) {
return 0;
}
visitor_.OnAnnounceOkMessage(announce_ok);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessAnnounceError(quic::QuicDataReader& reader) {
MoqtAnnounceError announce_error;
if (!ReadTrackNamespace(reader, announce_error.track_namespace)) {
return 0;
}
uint64_t error_code;
if (!reader.ReadVarInt62(&error_code)) {
return 0;
}
announce_error.error_code = static_cast<MoqtAnnounceErrorCode>(error_code);
if (!reader.ReadStringVarInt62(announce_error.reason_phrase)) {
return 0;
}
visitor_.OnAnnounceErrorMessage(announce_error);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessAnnounceCancel(quic::QuicDataReader& reader) {
MoqtAnnounceCancel announce_cancel;
if (!ReadTrackNamespace(reader, announce_cancel.track_namespace)) {
return 0;
}
if (!reader.ReadVarInt62(&announce_cancel.error_code) ||
!reader.ReadStringVarInt62(announce_cancel.reason_phrase)) {
return 0;
}
visitor_.OnAnnounceCancelMessage(announce_cancel);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessTrackStatusRequest(
quic::QuicDataReader& reader) {
MoqtTrackStatusRequest track_status_request;
if (!ReadTrackNamespace(reader, track_status_request.full_track_name)) {
return 0;
}
absl::string_view name;
if (!reader.ReadStringPieceVarInt62(&name)) {
return 0;
}
track_status_request.full_track_name.AddElement(name);
visitor_.OnTrackStatusRequestMessage(track_status_request);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessUnannounce(quic::QuicDataReader& reader) {
MoqtUnannounce unannounce;
if (!ReadTrackNamespace(reader, unannounce.track_namespace)) {
return 0;
}
visitor_.OnUnannounceMessage(unannounce);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessTrackStatus(quic::QuicDataReader& reader) {
MoqtTrackStatus track_status;
if (!ReadTrackNamespace(reader, track_status.full_track_name)) {
return 0;
}
absl::string_view name;
if (!reader.ReadStringPieceVarInt62(&name)) {
return 0;
}
track_status.full_track_name.AddElement(name);
uint64_t value;
if (!reader.ReadVarInt62(&value) ||
!reader.ReadVarInt62(&track_status.last_group) ||
!reader.ReadVarInt62(&track_status.last_object)) {
return 0;
}
track_status.status_code = static_cast<MoqtTrackStatusCode>(value);
visitor_.OnTrackStatusMessage(track_status);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessGoAway(quic::QuicDataReader& reader) {
MoqtGoAway goaway;
if (!reader.ReadStringVarInt62(goaway.new_session_uri)) {
return 0;
}
visitor_.OnGoAwayMessage(goaway);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessSubscribeNamespace(
quic::QuicDataReader& reader) {
MoqtSubscribeNamespace subscribe_namespace;
if (!ReadTrackNamespace(reader, subscribe_namespace.track_namespace)) {
return 0;
}
if (!ReadSubscribeParameters(reader, subscribe_namespace.parameters)) {
return 0;
}
visitor_.OnSubscribeNamespaceMessage(subscribe_namespace);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessSubscribeNamespaceOk(
quic::QuicDataReader& reader) {
MoqtSubscribeNamespaceOk subscribe_namespace_ok;
if (!ReadTrackNamespace(reader, subscribe_namespace_ok.track_namespace)) {
return 0;
}
visitor_.OnSubscribeNamespaceOkMessage(subscribe_namespace_ok);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessSubscribeNamespaceError(
quic::QuicDataReader& reader) {
MoqtSubscribeNamespaceError subscribe_namespace_error;
uint64_t error_code;
if (!ReadTrackNamespace(reader, subscribe_namespace_error.track_namespace) ||
!reader.ReadVarInt62(&error_code) ||
!reader.ReadStringVarInt62(subscribe_namespace_error.reason_phrase)) {
return 0;
}
subscribe_namespace_error.error_code =
static_cast<MoqtAnnounceErrorCode>(error_code);
visitor_.OnSubscribeNamespaceErrorMessage(subscribe_namespace_error);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessUnsubscribeNamespace(
quic::QuicDataReader& reader) {
MoqtUnsubscribeNamespace unsubscribe_namespace;
if (!ReadTrackNamespace(reader, unsubscribe_namespace.track_namespace)) {
return 0;
}
visitor_.OnUnsubscribeNamespaceMessage(unsubscribe_namespace);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessMaxSubscribeId(quic::QuicDataReader& reader) {
MoqtMaxSubscribeId max_subscribe_id;
if (!reader.ReadVarInt62(&max_subscribe_id.max_subscribe_id)) {
return 0;
}
visitor_.OnMaxSubscribeIdMessage(max_subscribe_id);
return reader.PreviouslyReadPayload().length();
}
size_t MoqtControlParser::ProcessObjectAck(quic::QuicDataReader& reader) {
MoqtObjectAck object_ack;
uint64_t raw_delta;
if (!reader.ReadVarInt62(&object_ack.subscribe_id) ||
!reader.ReadVarInt62(&object_ack.group_id) ||
!reader.ReadVarInt62(&object_ack.object_id) ||
!reader.ReadVarInt62(&raw_delta)) {
return 0;
}
object_ack.delta_from_deadline = quic::QuicTimeDelta::FromMicroseconds(
SignedVarintUnserializedForm(raw_delta));
visitor_.OnObjectAckMessage(object_ack);
return reader.PreviouslyReadPayload().length();
}
void MoqtControlParser::ParseError(absl::string_view reason) {
ParseError(MoqtError::kProtocolViolation, reason);
}
void MoqtControlParser::ParseError(MoqtError error_code,
absl::string_view reason) {
if (parsing_error_) {
return;
}
no_more_data_ = true;
parsing_error_ = true;
visitor_.OnParsingError(error_code, reason);
}
bool MoqtControlParser::ReadVarIntPieceVarInt62(quic::QuicDataReader& reader,
uint64_t& result) {
uint64_t length;
if (!reader.ReadVarInt62(&length)) {
return false;
}
uint64_t actual_length = static_cast<uint64_t>(reader.PeekVarInt62Length());
if (length != actual_length) {
ParseError("Parameter VarInt has length field mismatch");
return false;
}
if (!reader.ReadVarInt62(&result)) {
return false;
}
return true;
}
bool MoqtControlParser::ReadParameter(quic::QuicDataReader& reader,
uint64_t& type,
absl::string_view& value) {
if (!reader.ReadVarInt62(&type)) {
return false;
}
return reader.ReadStringPieceVarInt62(&value);
}
bool MoqtControlParser::ReadSubscribeParameters(
quic::QuicDataReader& reader, MoqtSubscribeParameters& params) {
uint64_t num_params;
if (!reader.ReadVarInt62(&num_params)) {
return false;
}
for (uint64_t i = 0; i < num_params; ++i) {
uint64_t type;
absl::string_view value;
if (!ReadParameter(reader, type, value)) {
return false;
}
uint64_t raw_value;
auto key = static_cast<MoqtTrackRequestParameter>(type);
switch (key) {
case MoqtTrackRequestParameter::kAuthorizationInfo:
if (params.authorization_info.has_value()) {
ParseError("AUTHORIZATION_INFO parameter appears twice");
return false;
}
params.authorization_info = value;
break;
case moqt::MoqtTrackRequestParameter::kDeliveryTimeout:
if (params.delivery_timeout.has_value()) {
ParseError("DELIVERY_TIMEOUT parameter appears twice");
return false;
}
if (!StringViewToVarInt(value, raw_value)) {
return false;
}
params.delivery_timeout =
quic::QuicTimeDelta::FromMilliseconds(raw_value);
break;
case moqt::MoqtTrackRequestParameter::kMaxCacheDuration:
if (params.max_cache_duration.has_value()) {
ParseError("MAX_CACHE_DURATION parameter appears twice");
return false;
}
if (!StringViewToVarInt(value, raw_value)) {
return false;
}
params.max_cache_duration =
quic::QuicTimeDelta::FromMilliseconds(raw_value);
break;
case MoqtTrackRequestParameter::kOackWindowSize: {
if (params.object_ack_window.has_value()) {
ParseError("OACK_WINDOW_SIZE parameter appears twice in SUBSCRIBE");
return false;
}
if (!StringViewToVarInt(value, raw_value)) {
ParseError("OACK_WINDOW_SIZE parameter is not a valid varint");
return false;
}
params.object_ack_window =
quic::QuicTimeDelta::FromMicroseconds(raw_value);
break;
}
default:
break;
}
}
return true;
}
bool MoqtControlParser::StringViewToVarInt(absl::string_view& sv,
uint64_t& vi) {
quic::QuicDataReader reader(sv);
if (static_cast<size_t>(reader.PeekVarInt62Length()) != sv.length()) {
ParseError(MoqtError::kParameterLengthMismatch,
"Parameter length does not match varint encoding");
return false;
}
reader.ReadVarInt62(&vi);
return true;
}
bool MoqtControlParser::ReadTrackNamespace(quic::QuicDataReader& reader,
FullTrackName& full_track_name) {
QUICHE_DCHECK(full_track_name.empty());
uint64_t num_elements;
if (!reader.ReadVarInt62(&num_elements)) {
return 0;
}
for (uint64_t i = 0; i < num_elements; ++i) {
absl::string_view element;
if (!reader.ReadStringPieceVarInt62(&element)) {
return false;
}
full_track_name.AddElement(element);
}
return true;
}
void MoqtDataParser::ParseError(absl::string_view reason) {
if (parsing_error_) {
return;
}
no_more_data_ = true;
parsing_error_ = true;
visitor_.OnParsingError(MoqtError::kProtocolViolation, reason);
}
absl::string_view ParseDatagram(absl::string_view data,
MoqtObject& object_metadata) {
uint64_t value;
quic::QuicDataReader reader(data);
if (!reader.ReadVarInt62(&value)) {
return absl::string_view();
}
if (static_cast<MoqtDataStreamType>(value) !=
MoqtDataStreamType::kObjectDatagram) {
return absl::string_view();
}
size_t processed_data = ParseObjectHeader(
reader, object_metadata, MoqtDataStreamType::kObjectDatagram);
if (processed_data == 0) {
return absl::string_view();
}
return reader.PeekRemainingPayload();
}
void MoqtDataParser::ProcessData(absl::string_view data, bool fin) {
if (processing_) {
QUICHE_BUG(MoqtDataParser_reentry)
<< "Calling ProcessData() when ProcessData() is already in progress.";
return;
}
processing_ = true;
auto on_return = absl::MakeCleanup([&] { processing_ = false; });
if (no_more_data_) {
ParseError("Data after end of stream");
return;
}
while (!buffered_message_.empty() && !data.empty()) {
absl::string_view chunk = data.substr(0, chunk_size_);
absl::StrAppend(&buffered_message_, chunk);
absl::string_view unprocessed = ProcessDataInner(buffered_message_);
if (unprocessed.size() >= chunk.size()) {
data.remove_prefix(chunk.size());
} else {
buffered_message_.clear();
data.remove_prefix(chunk.size() - unprocessed.size());
}
}
if (buffered_message_.empty() && !data.empty()) {
buffered_message_.assign(ProcessDataInner(data));
}
if (fin) {
if (!buffered_message_.empty() || !metadata_.has_value() ||
payload_length_remaining_ > 0) {
ParseError("FIN received at an unexpected point in the stream");
return;
}
no_more_data_ = true;
}
}
absl::string_view MoqtDataParser::ProcessDataInner(absl::string_view data) {
quic::QuicDataReader reader(data);
while (!reader.IsDoneReading()) {
absl::string_view remainder = reader.PeekRemainingPayload();
switch (GetNextInput()) {
case kStreamType: {
uint64_t value;
if (!reader.ReadVarInt62(&value)) {
return remainder;
}
if (!IsAllowedStreamType(value)) {
ParseError(absl::StrCat("Unknown stream type: ", value));
return "";
}
type_ = static_cast<MoqtDataStreamType>(value);
continue;
}
case kHeader: {
MoqtObject header;
size_t bytes_read = ParseObjectHeader(reader, header, *type_);
if (bytes_read == 0) {
return remainder;
}
metadata_ = header;
continue;
}
case kSubheader: {
size_t bytes_read = ParseObjectSubheader(reader, *metadata_, *type_);
if (bytes_read == 0) {
return remainder;
}
if (metadata_->object_status ==
MoqtObjectStatus::kInvalidObjectStatus) {
ParseError("Invalid object status provided");
return "";
}
payload_length_remaining_ = metadata_->payload_length;
if (payload_length_remaining_ == 0) {
visitor_.OnObjectMessage(*metadata_, "", true);
}
continue;
}
case kData: {
absl::string_view payload =
reader.ReadAtMost(payload_length_remaining_);
visitor_.OnObjectMessage(*metadata_, payload,
payload.size() == payload_length_remaining_);
payload_length_remaining_ -= payload.size();
continue;
}
case kPadding:
return "";
}
}
return "";
}
} | #include "quiche/quic/moqt/moqt_parser.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/variant.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/moqt/moqt_messages.h"
#include "quiche/quic/moqt/test_tools/moqt_test_message.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace moqt::test {
namespace {
using ::testing::AnyOf;
using ::testing::HasSubstr;
using ::testing::Optional;
constexpr std::array kMessageTypes{
MoqtMessageType::kSubscribe,
MoqtMessageType::kSubscribeOk,
MoqtMessageType::kSubscribeError,
MoqtMessageType::kSubscribeUpdate,
MoqtMessageType::kUnsubscribe,
MoqtMessageType::kSubscribeDone,
MoqtMessageType::kAnnounceCancel,
MoqtMessageType::kTrackStatusRequest,
MoqtMessageType::kTrackStatus,
MoqtMessageType::kAnnounce,
MoqtMessageType::kAnnounceOk,
MoqtMessageType::kAnnounceError,
MoqtMessageType::kUnannounce,
MoqtMessageType::kClientSetup,
MoqtMessageType::kServerSetup,
MoqtMessageType::kGoAway,
MoqtMessageType::kSubscribeNamespace,
MoqtMessageType::kSubscribeNamespaceOk,
MoqtMessageType::kSubscribeNamespaceError,
MoqtMessageType::kUnsubscribeNamespace,
MoqtMessageType::kMaxSubscribeId,
MoqtMessageType::kObjectAck,
};
constexpr std::array kDataStreamTypes{
MoqtDataStreamType::kStreamHeaderTrack,
MoqtDataStreamType::kStreamHeaderSubgroup};
using GeneralizedMessageType =
absl::variant<MoqtMessageType, MoqtDataStreamType>;
}
struct MoqtParserTestParams {
MoqtParserTestParams(MoqtMessageType message_type, bool uses_web_transport)
: message_type(message_type), uses_web_transport(uses_web_transport) {}
explicit MoqtParserTestParams(MoqtDataStreamType message_type)
: message_type(message_type), uses_web_transport(true) {}
GeneralizedMessageType message_type;
bool uses_web_transport;
};
std::vector<MoqtParserTestParams> GetMoqtParserTestParams() {
std::vector<MoqtParserTestParams> params;
for (MoqtMessageType message_type : kMessageTypes) {
if (message_type == MoqtMessageType::kClientSetup) {
for (const bool uses_web_transport : {false, true}) {
params.push_back(
MoqtParserTestParams(message_type, uses_web_transport));
}
} else {
params.push_back(MoqtParserTestParams(message_type, true));
}
}
for (MoqtDataStreamType type : kDataStreamTypes) {
params.push_back(MoqtParserTestParams(type));
}
return params;
}
std::string TypeFormatter(MoqtMessageType type) {
return MoqtMessageTypeToString(type);
}
std::string TypeFormatter(MoqtDataStreamType type) {
return MoqtDataStreamTypeToString(type);
}
std::string ParamNameFormatter(
const testing::TestParamInfo<MoqtParserTestParams>& info) {
return absl::visit([](auto x) { return TypeFormatter(x); },
info.param.message_type) +
"_" + (info.param.uses_web_transport ? "WebTransport" : "QUIC");
}
class MoqtParserTestVisitor : public MoqtControlParserVisitor,
public MoqtDataParserVisitor {
public:
~MoqtParserTestVisitor() = default;
void OnObjectMessage(const MoqtObject& message, absl::string_view payload,
bool end_of_message) override {
MoqtObject object = message;
object_payloads_.push_back(std::string(payload));
end_of_message_ = end_of_message;
if (end_of_message) {
++messages_received_;
}
last_message_ = TestMessageBase::MessageStructuredData(object);
}
template <typename Message>
void OnControlMessage(const Message& message) {
end_of_message_ = true;
++messages_received_;
last_message_ = TestMessageBase::MessageStructuredData(message);
}
void OnClientSetupMessage(const MoqtClientSetup& message) override {
OnControlMessage(message);
}
void OnServerSetupMessage(const MoqtServerSetup& message) override {
OnControlMessage(message);
}
void OnSubscribeMessage(const MoqtSubscribe& message) override {
OnControlMessage(message);
}
void OnSubscribeOkMessage(const MoqtSubscribeOk& message) override {
OnControlMessage(message);
}
void OnSubscribeErrorMessage(const MoqtSubscribeError& message) override {
OnControlMessage(message);
}
void OnSubscribeUpdateMessage(const MoqtSubscribeUpdate& message) override {
OnControlMessage(message);
}
void OnUnsubscribeMessage(const MoqtUnsubscribe& message) override {
OnControlMessage(message);
}
void OnSubscribeDoneMessage(const MoqtSubscribeDone& message) override {
OnControlMessage(message);
}
void OnAnnounceMessage(const MoqtAnnounce& message) override {
OnControlMessage(message);
}
void OnAnnounceOkMessage(const MoqtAnnounceOk& message) override {
OnControlMessage(message);
}
void OnAnnounceErrorMessage(const MoqtAnnounceError& message) override {
OnControlMessage(message);
}
void OnAnnounceCancelMessage(const MoqtAnnounceCancel& message) override {
OnControlMessage(message);
}
void OnTrackStatusRequestMessage(
const MoqtTrackStatusRequest& message) override {
OnControlMessage(message);
}
void OnUnannounceMessage(const MoqtUnannounce& message) override {
OnControlMessage(message);
}
void OnTrackStatusMessage(const MoqtTrackStatus& message) override {
OnControlMessage(message);
}
void OnGoAwayMessage(const MoqtGoAway& message) override {
OnControlMessage(message);
}
void OnSubscribeNamespaceMessage(
const MoqtSubscribeNamespace& message) override {
OnControlMessage(message);
}
void OnSubscribeNamespaceOkMessage(
const MoqtSubscribeNamespaceOk& message) override {
OnControlMessage(message);
}
void OnSubscribeNamespaceErrorMessage(
const MoqtSubscribeNamespaceError& message) override {
OnControlMessage(message);
}
void OnUnsubscribeNamespaceMessage(
const MoqtUnsubscribeNamespace& message) override {
OnControlMessage(message);
}
void OnMaxSubscribeIdMessage(const MoqtMaxSubscribeId& message) override {
OnControlMessage(message);
}
void OnObjectAckMessage(const MoqtObjectAck& message) override {
OnControlMessage(message);
}
void OnParsingError(MoqtError code, absl::string_view reason) override {
QUIC_LOG(INFO) << "Parsing error: " << reason;
parsing_error_ = reason;
parsing_error_code_ = code;
}
std::string object_payload() { return absl::StrJoin(object_payloads_, ""); }
std::vector<std::string> object_payloads_;
bool end_of_message_ = false;
std::optional<std::string> parsing_error_;
MoqtError parsing_error_code_;
uint64_t messages_received_ = 0;
std::optional<TestMessageBase::MessageStructuredData> last_message_;
};
class MoqtParserTest
: public quic::test::QuicTestWithParam<MoqtParserTestParams> {
public:
MoqtParserTest()
: message_type_(GetParam().message_type),
webtrans_(GetParam().uses_web_transport),
control_parser_(GetParam().uses_web_transport, visitor_),
data_parser_(&visitor_) {}
bool IsDataStream() {
return absl::holds_alternative<MoqtDataStreamType>(message_type_);
}
std::unique_ptr<TestMessageBase> MakeMessage() {
if (IsDataStream()) {
return CreateTestDataStream(absl::get<MoqtDataStreamType>(message_type_));
} else {
return CreateTestMessage(absl::get<MoqtMessageType>(message_type_),
webtrans_);
}
}
void ProcessData(absl::string_view data, bool fin) {
if (IsDataStream()) {
data_parser_.ProcessData(data, fin);
} else {
control_parser_.ProcessData(data, fin);
}
}
protected:
MoqtParserTestVisitor visitor_;
GeneralizedMessageType message_type_;
bool webtrans_;
MoqtControlParser control_parser_;
MoqtDataParser data_parser_;
};
INSTANTIATE_TEST_SUITE_P(MoqtParserTests, MoqtParserTest,
testing::ValuesIn(GetMoqtParserTestParams()),
ParamNameFormatter);
TEST_P(MoqtParserTest, OneMessage) {
std::unique_ptr<TestMessageBase> message = MakeMessage();
ProcessData(message->PacketSample(), true);
EXPECT_EQ(visitor_.messages_received_, 1);
EXPECT_TRUE(message->EqualFieldValues(*visitor_.last_message_));
EXPECT_TRUE(visitor_.end_of_message_);
if (IsDataStream()) {
EXPECT_EQ(visitor_.object_payload(), "foo");
}
}
TEST_P(MoqtParserTest, OneMessageWithLongVarints) {
std::unique_ptr<TestMessageBase> message = MakeMessage();
message->ExpandVarints();
ProcessData(message->PacketSample(), true);
EXPECT_EQ(visitor_.messages_received_, 1);
EXPECT_TRUE(message->EqualFieldValues(*visitor_.last_message_));
EXPECT_TRUE(visitor_.end_of_message_);
EXPECT_FALSE(visitor_.parsing_error_.has_value());
if (IsDataStream()) {
EXPECT_EQ(visitor_.object_payload(), "foo");
}
}
TEST_P(MoqtParserTest, TwoPartMessage) {
std::unique_ptr<TestMessageBase> message = MakeMessage();
size_t first_data_size = message->total_message_size() / 2;
ProcessData(message->PacketSample().substr(0, first_data_size), false);
EXPECT_EQ(visitor_.messages_received_, 0);
ProcessData(
message->PacketSample().substr(
first_data_size, message->total_message_size() - first_data_size),
true);
EXPECT_EQ(visitor_.messages_received_, 1);
EXPECT_TRUE(message->EqualFieldValues(*visitor_.last_message_));
EXPECT_TRUE(visitor_.end_of_message_);
EXPECT_FALSE(visitor_.parsing_error_.has_value());
if (IsDataStream()) {
EXPECT_EQ(visitor_.object_payload(), "foo");
}
}
TEST_P(MoqtParserTest, OneByteAtATime) {
std::unique_ptr<TestMessageBase> message = MakeMessage();
for (size_t i = 0; i < message->total_message_size(); ++i) {
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_FALSE(visitor_.end_of_message_);
bool last = i == (message->total_message_size() - 1);
ProcessData(message->PacketSample().substr(i, 1), last);
}
EXPECT_EQ(visitor_.messages_received_, 1);
EXPECT_TRUE(message->EqualFieldValues(*visitor_.last_message_));
EXPECT_TRUE(visitor_.end_of_message_);
EXPECT_FALSE(visitor_.parsing_error_.has_value());
if (IsDataStream()) {
EXPECT_EQ(visitor_.object_payload(), "foo");
}
}
TEST_P(MoqtParserTest, OneByteAtATimeLongerVarints) {
std::unique_ptr<TestMessageBase> message = MakeMessage();
message->ExpandVarints();
for (size_t i = 0; i < message->total_message_size(); ++i) {
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_FALSE(visitor_.end_of_message_);
bool last = i == (message->total_message_size() - 1);
ProcessData(message->PacketSample().substr(i, 1), last);
}
EXPECT_EQ(visitor_.messages_received_, 1);
EXPECT_TRUE(message->EqualFieldValues(*visitor_.last_message_));
EXPECT_TRUE(visitor_.end_of_message_);
EXPECT_FALSE(visitor_.parsing_error_.has_value());
if (IsDataStream()) {
EXPECT_EQ(visitor_.object_payload(), "foo");
}
}
TEST_P(MoqtParserTest, TwoBytesAtATime) {
std::unique_ptr<TestMessageBase> message = MakeMessage();
data_parser_.set_chunk_size(1);
for (size_t i = 0; i < message->total_message_size(); i += 3) {
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_FALSE(visitor_.end_of_message_);
bool last = (i + 2) >= message->total_message_size();
ProcessData(message->PacketSample().substr(i, 3), last);
}
EXPECT_EQ(visitor_.messages_received_, 1);
EXPECT_TRUE(message->EqualFieldValues(*visitor_.last_message_));
EXPECT_TRUE(visitor_.end_of_message_);
EXPECT_FALSE(visitor_.parsing_error_.has_value());
if (IsDataStream()) {
EXPECT_EQ(visitor_.object_payload(), "foo");
}
}
TEST_P(MoqtParserTest, EarlyFin) {
std::unique_ptr<TestMessageBase> message = MakeMessage();
size_t first_data_size = message->total_message_size() - 1;
ProcessData(message->PacketSample().substr(0, first_data_size), true);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_THAT(visitor_.parsing_error_,
AnyOf("FIN after incomplete message",
"FIN received at an unexpected point in the stream"));
}
TEST_P(MoqtParserTest, SeparateEarlyFin) {
std::unique_ptr<TestMessageBase> message = MakeMessage();
size_t first_data_size = message->total_message_size() - 1;
ProcessData(message->PacketSample().substr(0, first_data_size), false);
ProcessData(absl::string_view(), true);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_THAT(visitor_.parsing_error_,
AnyOf("End of stream before complete message",
"FIN received at an unexpected point in the stream"));
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
class MoqtMessageSpecificTest : public quic::test::QuicTest {
public:
MoqtMessageSpecificTest() {}
MoqtParserTestVisitor visitor_;
static constexpr bool kWebTrans = true;
static constexpr bool kRawQuic = false;
};
TEST_F(MoqtMessageSpecificTest, ThreePartObject) {
MoqtDataParser parser(&visitor_);
auto message = std::make_unique<StreamHeaderSubgroupMessage>();
EXPECT_TRUE(message->SetPayloadLength(14));
parser.ProcessData(message->PacketSample(), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(message->EqualFieldValues(*visitor_.last_message_));
EXPECT_FALSE(visitor_.end_of_message_);
EXPECT_EQ(visitor_.object_payload(), "foo");
parser.ProcessData("bar", false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(message->EqualFieldValues(*visitor_.last_message_));
EXPECT_FALSE(visitor_.end_of_message_);
EXPECT_EQ(visitor_.object_payload(), "foobar");
parser.ProcessData("deadbeef", true);
EXPECT_EQ(visitor_.messages_received_, 1);
EXPECT_TRUE(message->EqualFieldValues(*visitor_.last_message_));
EXPECT_TRUE(visitor_.end_of_message_);
EXPECT_EQ(visitor_.object_payload(), "foobardeadbeef");
EXPECT_FALSE(visitor_.parsing_error_.has_value());
}
TEST_F(MoqtMessageSpecificTest, ThreePartObjectFirstIncomplete) {
MoqtDataParser parser(&visitor_);
auto message = std::make_unique<StreamHeaderSubgroupMessage>();
EXPECT_TRUE(message->SetPayloadLength(50));
parser.ProcessData(message->PacketSample().substr(0, 4), false);
EXPECT_EQ(visitor_.messages_received_, 0);
message->set_wire_image_size(55);
parser.ProcessData(
message->PacketSample().substr(4, message->total_message_size() - 4),
false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(message->EqualFieldValues(*visitor_.last_message_));
EXPECT_FALSE(visitor_.end_of_message_);
EXPECT_EQ(visitor_.object_payload().length(), 47);
parser.ProcessData("bar", true);
EXPECT_EQ(visitor_.messages_received_, 1);
EXPECT_TRUE(message->EqualFieldValues(*visitor_.last_message_));
EXPECT_TRUE(visitor_.end_of_message_);
EXPECT_EQ(*visitor_.object_payloads_.crbegin(), "bar");
EXPECT_FALSE(visitor_.parsing_error_.has_value());
}
TEST_F(MoqtMessageSpecificTest, StreamHeaderSubgroupFollowOn) {
MoqtDataParser parser(&visitor_);
auto message1 = std::make_unique<StreamHeaderSubgroupMessage>();
parser.ProcessData(message1->PacketSample(), false);
EXPECT_EQ(visitor_.messages_received_, 1);
EXPECT_TRUE(message1->EqualFieldValues(*visitor_.last_message_));
EXPECT_TRUE(visitor_.end_of_message_);
EXPECT_EQ(visitor_.object_payload(), "foo");
EXPECT_FALSE(visitor_.parsing_error_.has_value());
visitor_.object_payloads_.clear();
auto message2 = std::make_unique<StreamMiddlerSubgroupMessage>();
parser.ProcessData(message2->PacketSample(), false);
EXPECT_EQ(visitor_.messages_received_, 2);
EXPECT_TRUE(message2->EqualFieldValues(*visitor_.last_message_));
EXPECT_TRUE(visitor_.end_of_message_);
EXPECT_EQ(visitor_.object_payload(), "bar");
EXPECT_FALSE(visitor_.parsing_error_.has_value());
}
TEST_F(MoqtMessageSpecificTest, StreamHeaderTrackFollowOn) {
MoqtDataParser parser(&visitor_);
auto message1 = std::make_unique<StreamHeaderTrackMessage>();
parser.ProcessData(message1->PacketSample(), false);
EXPECT_EQ(visitor_.messages_received_, 1);
EXPECT_TRUE(message1->EqualFieldValues(*visitor_.last_message_));
EXPECT_TRUE(visitor_.end_of_message_);
EXPECT_EQ(visitor_.object_payload(), "foo");
EXPECT_FALSE(visitor_.parsing_error_.has_value());
visitor_.object_payloads_.clear();
auto message2 = std::make_unique<StreamMiddlerTrackMessage>();
parser.ProcessData(message2->PacketSample(), false);
EXPECT_EQ(visitor_.messages_received_, 2);
EXPECT_TRUE(message2->EqualFieldValues(*visitor_.last_message_));
EXPECT_TRUE(visitor_.end_of_message_);
EXPECT_EQ(visitor_.object_payload(), "bar");
EXPECT_FALSE(visitor_.parsing_error_.has_value());
}
TEST_F(MoqtMessageSpecificTest, ClientSetupRoleIsInvalid) {
MoqtControlParser parser(kRawQuic, visitor_);
char setup[] = {
0x40, 0x40, 0x02, 0x01, 0x02,
0x03,
0x00, 0x01, 0x04,
0x01, 0x03, 0x66, 0x6f, 0x6f
};
parser.ProcessData(absl::string_view(setup, sizeof(setup)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_, "Invalid ROLE parameter");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, ServerSetupRoleIsInvalid) {
MoqtControlParser parser(kRawQuic, visitor_);
char setup[] = {
0x40, 0x41, 0x01,
0x01,
0x00, 0x01, 0x04,
0x01, 0x03, 0x66, 0x6f, 0x6f
};
parser.ProcessData(absl::string_view(setup, sizeof(setup)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_, "Invalid ROLE parameter");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, SetupRoleAppearsTwice) {
MoqtControlParser parser(kRawQuic, visitor_);
char setup[] = {
0x40, 0x40, 0x02, 0x01, 0x02,
0x03,
0x00, 0x01, 0x03,
0x00, 0x01, 0x03,
0x01, 0x03, 0x66, 0x6f, 0x6f
};
parser.ProcessData(absl::string_view(setup, sizeof(setup)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_, "ROLE parameter appears twice in SETUP");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, ClientSetupRoleIsMissing) {
MoqtControlParser parser(kRawQuic, visitor_);
char setup[] = {
0x40, 0x40, 0x02, 0x01, 0x02,
0x01,
0x01, 0x03, 0x66, 0x6f, 0x6f,
};
parser.ProcessData(absl::string_view(setup, sizeof(setup)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_,
"ROLE parameter missing from CLIENT_SETUP message");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, ClientSetupMaxSubscribeIdAppearsTwice) {
MoqtControlParser parser(kRawQuic, visitor_);
char setup[] = {
0x40, 0x40, 0x02, 0x01, 0x02,
0x04,
0x00, 0x01, 0x03,
0x01, 0x03, 0x66, 0x6f, 0x6f,
0x02, 0x01, 0x32,
0x02, 0x01, 0x32,
};
parser.ProcessData(absl::string_view(setup, sizeof(setup)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_,
"MAX_SUBSCRIBE_ID parameter appears twice in SETUP");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, ServerSetupRoleIsMissing) {
MoqtControlParser parser(kRawQuic, visitor_);
char setup[] = {
0x40, 0x41, 0x01, 0x00,
};
parser.ProcessData(absl::string_view(setup, sizeof(setup)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_,
"ROLE parameter missing from SERVER_SETUP message");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, SetupRoleVarintLengthIsWrong) {
MoqtControlParser parser(kRawQuic, visitor_);
char setup[] = {
0x40, 0x40,
0x02, 0x01, 0x02,
0x02,
0x00, 0x02, 0x03,
0x01, 0x03, 0x66, 0x6f, 0x6f
};
parser.ProcessData(absl::string_view(setup, sizeof(setup)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_,
"Parameter length does not match varint encoding");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kParameterLengthMismatch);
}
TEST_F(MoqtMessageSpecificTest, SetupPathFromServer) {
MoqtControlParser parser(kRawQuic, visitor_);
char setup[] = {
0x40, 0x41,
0x01,
0x01,
0x01, 0x03, 0x66, 0x6f, 0x6f,
};
parser.ProcessData(absl::string_view(setup, sizeof(setup)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_, "PATH parameter in SERVER_SETUP");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, SetupPathAppearsTwice) {
MoqtControlParser parser(kRawQuic, visitor_);
char setup[] = {
0x40, 0x40, 0x02, 0x01, 0x02,
0x03,
0x00, 0x01, 0x03,
0x01, 0x03, 0x66, 0x6f, 0x6f,
0x01, 0x03, 0x66, 0x6f, 0x6f,
};
parser.ProcessData(absl::string_view(setup, sizeof(setup)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_,
"PATH parameter appears twice in CLIENT_SETUP");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, SetupPathOverWebtrans) {
MoqtControlParser parser(kWebTrans, visitor_);
char setup[] = {
0x40, 0x40, 0x02, 0x01, 0x02,
0x02,
0x00, 0x01, 0x03,
0x01, 0x03, 0x66, 0x6f, 0x6f,
};
parser.ProcessData(absl::string_view(setup, sizeof(setup)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_,
"WebTransport connection is using PATH parameter in SETUP");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, SetupPathMissing) {
MoqtControlParser parser(kRawQuic, visitor_);
char setup[] = {
0x40, 0x40, 0x02, 0x01, 0x02,
0x01,
0x00, 0x01, 0x03,
};
parser.ProcessData(absl::string_view(setup, sizeof(setup)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_,
"PATH SETUP parameter missing from Client message over QUIC");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, ServerSetupMaxSubscribeIdAppearsTwice) {
MoqtControlParser parser(kRawQuic, visitor_);
char setup[] = {
0x40, 0x40, 0x02, 0x01, 0x02,
0x04,
0x00, 0x01, 0x03,
0x01, 0x03, 0x66, 0x6f, 0x6f,
0x02, 0x01, 0x32,
0x02, 0x01, 0x32,
};
parser.ProcessData(absl::string_view(setup, sizeof(setup)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_,
"MAX_SUBSCRIBE_ID parameter appears twice in SETUP");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, SubscribeAuthorizationInfoTwice) {
MoqtControlParser parser(kWebTrans, visitor_);
char subscribe[] = {
0x03, 0x01, 0x02, 0x01, 0x03,
0x66, 0x6f, 0x6f,
0x04, 0x61, 0x62, 0x63, 0x64,
0x20, 0x02,
0x02,
0x02,
0x02, 0x03, 0x62, 0x61, 0x72,
0x02, 0x03, 0x62, 0x61, 0x72,
};
parser.ProcessData(absl::string_view(subscribe, sizeof(subscribe)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_EQ(visitor_.parsing_error_,
"AUTHORIZATION_INFO parameter appears twice");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, SubscribeDeliveryTimeoutTwice) {
MoqtControlParser parser(kRawQuic, visitor_);
char subscribe[] = {
0x03, 0x01, 0x02, 0x01, 0x03,
0x66, 0x6f, 0x6f,
0x04, 0x61, 0x62, 0x63, 0x64,
0x20, 0x02,
0x02,
0x02,
0x03, 0x02, 0x67, 0x10,
0x03, 0x02, 0x67, 0x10,
};
parser.ProcessData(absl::string_view(subscribe, sizeof(subscribe)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_EQ(visitor_.parsing_error_,
"DELIVERY_TIMEOUT parameter appears twice");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, SubscribeDeliveryTimeoutMalformed) {
MoqtControlParser parser(kRawQuic, visitor_);
char subscribe[] = {
0x03, 0x01, 0x02, 0x01, 0x03,
0x66, 0x6f, 0x6f,
0x04, 0x61, 0x62, 0x63, 0x64,
0x20, 0x02,
0x02,
0x01,
0x03, 0x01, 0x67, 0x10,
};
parser.ProcessData(absl::string_view(subscribe, sizeof(subscribe)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_EQ(visitor_.parsing_error_,
"Parameter length does not match varint encoding");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kParameterLengthMismatch);
}
TEST_F(MoqtMessageSpecificTest, SubscribeMaxCacheDurationTwice) {
MoqtControlParser parser(kRawQuic, visitor_);
char subscribe[] = {
0x03, 0x01, 0x02, 0x01, 0x03,
0x66, 0x6f, 0x6f,
0x04, 0x61, 0x62, 0x63, 0x64,
0x20, 0x02,
0x02,
0x02,
0x04, 0x02, 0x67, 0x10,
0x04, 0x02, 0x67, 0x10,
};
parser.ProcessData(absl::string_view(subscribe, sizeof(subscribe)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_EQ(visitor_.parsing_error_,
"MAX_CACHE_DURATION parameter appears twice");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, SubscribeMaxCacheDurationMalformed) {
MoqtControlParser parser(kRawQuic, visitor_);
char subscribe[] = {
0x03, 0x01, 0x02, 0x01, 0x03,
0x66, 0x6f, 0x6f,
0x04, 0x61, 0x62, 0x63, 0x64,
0x20, 0x02,
0x02,
0x01,
0x04, 0x01, 0x67, 0x10,
};
parser.ProcessData(absl::string_view(subscribe, sizeof(subscribe)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_EQ(visitor_.parsing_error_,
"Parameter length does not match varint encoding");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kParameterLengthMismatch);
}
TEST_F(MoqtMessageSpecificTest, SubscribeOkHasAuthorizationInfo) {
MoqtControlParser parser(kWebTrans, visitor_);
char subscribe_ok[] = {
0x04, 0x01, 0x03,
0x02, 0x01,
0x0c, 0x14,
0x02,
0x03, 0x02, 0x67, 0x10,
0x02, 0x03, 0x62, 0x61, 0x72,
};
parser.ProcessData(absl::string_view(subscribe_ok, sizeof(subscribe_ok)),
false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_EQ(visitor_.parsing_error_, "SUBSCRIBE_OK has authorization info");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, SubscribeUpdateHasAuthorizationInfo) {
MoqtControlParser parser(kWebTrans, visitor_);
char subscribe_update[] = {
0x02, 0x02, 0x03, 0x01, 0x05, 0x06,
0xaa,
0x01,
0x02, 0x03, 0x62, 0x61, 0x72,
};
parser.ProcessData(
absl::string_view(subscribe_update, sizeof(subscribe_update)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_EQ(visitor_.parsing_error_, "SUBSCRIBE_UPDATE has authorization info");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, AnnounceAuthorizationInfoTwice) {
MoqtControlParser parser(kWebTrans, visitor_);
char announce[] = {
0x06, 0x01, 0x03, 0x66, 0x6f, 0x6f,
0x02,
0x02, 0x03, 0x62, 0x61, 0x72,
0x02, 0x03, 0x62, 0x61, 0x72,
};
parser.ProcessData(absl::string_view(announce, sizeof(announce)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_,
"AUTHORIZATION_INFO parameter appears twice");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, AnnounceHasDeliveryTimeout) {
MoqtControlParser parser(kWebTrans, visitor_);
char announce[] = {
0x06, 0x01, 0x03, 0x66, 0x6f, 0x6f,
0x02,
0x02, 0x03, 0x62, 0x61, 0x72,
0x03, 0x02, 0x67, 0x10,
};
parser.ProcessData(absl::string_view(announce, sizeof(announce)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_, "ANNOUNCE has delivery timeout");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, FinMidPayload) {
MoqtDataParser parser(&visitor_);
auto message = std::make_unique<StreamHeaderSubgroupMessage>();
parser.ProcessData(
message->PacketSample().substr(0, message->total_message_size() - 1),
true);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_EQ(visitor_.parsing_error_,
"FIN received at an unexpected point in the stream");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, PartialPayloadThenFin) {
MoqtDataParser parser(&visitor_);
auto message = std::make_unique<StreamHeaderTrackMessage>();
parser.ProcessData(
message->PacketSample().substr(0, message->total_message_size() - 1),
false);
parser.ProcessData(absl::string_view(), true);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_EQ(visitor_.parsing_error_,
"FIN received at an unexpected point in the stream");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, DataAfterFin) {
MoqtControlParser parser(kRawQuic, visitor_);
parser.ProcessData(absl::string_view(), true);
parser.ProcessData("foo", false);
EXPECT_EQ(visitor_.parsing_error_, "Data after end of stream");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, InvalidObjectStatus) {
MoqtDataParser parser(&visitor_);
char stream_header_subgroup[] = {
0x04,
0x03, 0x04, 0x05, 0x08,
0x07,
0x06, 0x00, 0x0f,
};
parser.ProcessData(
absl::string_view(stream_header_subgroup, sizeof(stream_header_subgroup)),
false);
EXPECT_EQ(visitor_.parsing_error_, "Invalid object status provided");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kProtocolViolation);
}
TEST_F(MoqtMessageSpecificTest, Setup2KB) {
MoqtControlParser parser(kRawQuic, visitor_);
char big_message[2 * kMaxMessageHeaderSize];
quic::QuicDataWriter writer(sizeof(big_message), big_message);
writer.WriteVarInt62(static_cast<uint64_t>(MoqtMessageType::kServerSetup));
writer.WriteVarInt62(0x1);
writer.WriteVarInt62(0x1);
writer.WriteVarInt62(0xbeef);
writer.WriteVarInt62(kMaxMessageHeaderSize);
writer.WriteRepeatedByte(0x04, kMaxMessageHeaderSize);
parser.ProcessData(absl::string_view(big_message, writer.length() - 1),
false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_, "Cannot parse non-OBJECT messages > 2KB");
EXPECT_EQ(visitor_.parsing_error_code_, MoqtError::kInternalError);
}
TEST_F(MoqtMessageSpecificTest, UnknownMessageType) {
MoqtControlParser parser(kRawQuic, visitor_);
char message[4];
quic::QuicDataWriter writer(sizeof(message), message);
writer.WriteVarInt62(0xbeef);
parser.ProcessData(absl::string_view(message, writer.length()), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_, "Unknown message type");
}
TEST_F(MoqtMessageSpecificTest, LatestGroup) {
MoqtControlParser parser(kRawQuic, visitor_);
char subscribe[] = {
0x03, 0x01, 0x02,
0x01, 0x03, 0x66, 0x6f, 0x6f,
0x04, 0x61, 0x62, 0x63, 0x64,
0x20, 0x02,
0x01,
0x01,
0x02, 0x03, 0x62, 0x61, 0x72,
};
parser.ProcessData(absl::string_view(subscribe, sizeof(subscribe)), false);
EXPECT_EQ(visitor_.messages_received_, 1);
ASSERT_TRUE(visitor_.last_message_.has_value());
MoqtSubscribe message =
std::get<MoqtSubscribe>(visitor_.last_message_.value());
EXPECT_FALSE(message.start_group.has_value());
EXPECT_EQ(message.start_object, 0);
EXPECT_FALSE(message.end_group.has_value());
EXPECT_FALSE(message.end_object.has_value());
}
TEST_F(MoqtMessageSpecificTest, LatestObject) {
MoqtControlParser parser(kRawQuic, visitor_);
char subscribe[] = {
0x03, 0x01, 0x02,
0x01, 0x03, 0x66, 0x6f, 0x6f,
0x04, 0x61, 0x62, 0x63, 0x64,
0x20, 0x02,
0x02,
0x01,
0x02, 0x03, 0x62, 0x61, 0x72,
};
parser.ProcessData(absl::string_view(subscribe, sizeof(subscribe)), false);
EXPECT_EQ(visitor_.messages_received_, 1);
EXPECT_FALSE(visitor_.parsing_error_.has_value());
MoqtSubscribe message =
std::get<MoqtSubscribe>(visitor_.last_message_.value());
EXPECT_FALSE(message.start_group.has_value());
EXPECT_FALSE(message.start_object.has_value());
EXPECT_FALSE(message.end_group.has_value());
EXPECT_FALSE(message.end_object.has_value());
}
TEST_F(MoqtMessageSpecificTest, InvalidDeliveryOrder) {
MoqtControlParser parser(kRawQuic, visitor_);
char subscribe[] = {
0x03, 0x01, 0x02,
0x01, 0x03, 0x66, 0x6f, 0x6f,
0x04, 0x61, 0x62, 0x63, 0x64,
0x20, 0x08,
0x01,
0x01,
0x02, 0x03, 0x62, 0x61, 0x72,
};
parser.ProcessData(absl::string_view(subscribe, sizeof(subscribe)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_THAT(visitor_.parsing_error_, Optional(HasSubstr("group order")));
}
TEST_F(MoqtMessageSpecificTest, AbsoluteStart) {
MoqtControlParser parser(kRawQuic, visitor_);
char subscribe[] = {
0x03, 0x01, 0x02,
0x01, 0x03, 0x66, 0x6f, 0x6f,
0x04, 0x61, 0x62, 0x63, 0x64,
0x20, 0x02,
0x03,
0x04,
0x01,
0x01,
0x02, 0x03, 0x62, 0x61, 0x72,
};
parser.ProcessData(absl::string_view(subscribe, sizeof(subscribe)), false);
EXPECT_EQ(visitor_.messages_received_, 1);
EXPECT_FALSE(visitor_.parsing_error_.has_value());
MoqtSubscribe message =
std::get<MoqtSubscribe>(visitor_.last_message_.value());
EXPECT_EQ(message.start_group.value(), 4);
EXPECT_EQ(message.start_object.value(), 1);
EXPECT_FALSE(message.end_group.has_value());
EXPECT_FALSE(message.end_object.has_value());
}
TEST_F(MoqtMessageSpecificTest, AbsoluteRangeExplicitEndObject) {
MoqtControlParser parser(kRawQuic, visitor_);
char subscribe[] = {
0x03, 0x01, 0x02,
0x01, 0x03, 0x66, 0x6f, 0x6f,
0x04, 0x61, 0x62, 0x63, 0x64,
0x20, 0x02,
0x04,
0x04,
0x01,
0x07,
0x03,
0x01,
0x02, 0x03, 0x62, 0x61, 0x72,
};
parser.ProcessData(absl::string_view(subscribe, sizeof(subscribe)), false);
EXPECT_EQ(visitor_.messages_received_, 1);
EXPECT_FALSE(visitor_.parsing_error_.has_value());
MoqtSubscribe message =
std::get<MoqtSubscribe>(visitor_.last_message_.value());
EXPECT_EQ(message.start_group.value(), 4);
EXPECT_EQ(message.start_object.value(), 1);
EXPECT_EQ(message.end_group.value(), 7);
EXPECT_EQ(message.end_object.value(), 2);
}
TEST_F(MoqtMessageSpecificTest, AbsoluteRangeWholeEndGroup) {
MoqtControlParser parser(kRawQuic, visitor_);
char subscribe[] = {
0x03, 0x01, 0x02,
0x01, 0x03, 0x66, 0x6f, 0x6f,
0x04, 0x61, 0x62, 0x63, 0x64,
0x20, 0x02,
0x04,
0x04,
0x01,
0x07,
0x00,
0x01,
0x02, 0x03, 0x62, 0x61, 0x72,
};
parser.ProcessData(absl::string_view(subscribe, sizeof(subscribe)), false);
EXPECT_EQ(visitor_.messages_received_, 1);
EXPECT_FALSE(visitor_.parsing_error_.has_value());
MoqtSubscribe message =
std::get<MoqtSubscribe>(visitor_.last_message_.value());
EXPECT_EQ(message.start_group.value(), 4);
EXPECT_EQ(message.start_object.value(), 1);
EXPECT_EQ(message.end_group.value(), 7);
EXPECT_FALSE(message.end_object.has_value());
}
TEST_F(MoqtMessageSpecificTest, AbsoluteRangeEndGroupTooLow) {
MoqtControlParser parser(kRawQuic, visitor_);
char subscribe[] = {
0x03, 0x01, 0x02,
0x01, 0x03, 0x66, 0x6f, 0x6f,
0x04, 0x61, 0x62, 0x63, 0x64,
0x20, 0x02,
0x04,
0x04,
0x01,
0x03,
0x00,
0x01,
0x02, 0x03, 0x62, 0x61, 0x72,
};
parser.ProcessData(absl::string_view(subscribe, sizeof(subscribe)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_, "End group is less than start group");
}
TEST_F(MoqtMessageSpecificTest, AbsoluteRangeExactlyOneObject) {
MoqtControlParser parser(kRawQuic, visitor_);
char subscribe[] = {
0x03, 0x01, 0x02,
0x01, 0x03, 0x66, 0x6f, 0x6f,
0x04, 0x61, 0x62, 0x63, 0x64,
0x20, 0x02,
0x04,
0x04,
0x01,
0x04,
0x02,
0x00,
};
parser.ProcessData(absl::string_view(subscribe, sizeof(subscribe)), false);
EXPECT_EQ(visitor_.messages_received_, 1);
}
TEST_F(MoqtMessageSpecificTest, SubscribeUpdateExactlyOneObject) {
MoqtControlParser parser(kRawQuic, visitor_);
char subscribe_update[] = {
0x02, 0x02, 0x03, 0x01, 0x04, 0x07,
0x20,
0x00,
};
parser.ProcessData(
absl::string_view(subscribe_update, sizeof(subscribe_update)), false);
EXPECT_EQ(visitor_.messages_received_, 1);
}
TEST_F(MoqtMessageSpecificTest, SubscribeUpdateEndGroupTooLow) {
MoqtControlParser parser(kRawQuic, visitor_);
char subscribe_update[] = {
0x02, 0x02, 0x03, 0x01, 0x03, 0x06,
0x20,
0x01,
0x02, 0x03, 0x62, 0x61, 0x72,
};
parser.ProcessData(
absl::string_view(subscribe_update, sizeof(subscribe_update)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_, "End group is less than start group");
}
TEST_F(MoqtMessageSpecificTest, AbsoluteRangeEndObjectTooLow) {
MoqtControlParser parser(kRawQuic, visitor_);
char subscribe[] = {
0x03, 0x01, 0x02,
0x01, 0x03, 0x66, 0x6f, 0x6f,
0x04, 0x61, 0x62, 0x63, 0x64,
0x20, 0x02,
0x04,
0x04,
0x01,
0x04,
0x01,
0x01,
0x02, 0x03, 0x62, 0x61, 0x72,
};
parser.ProcessData(absl::string_view(subscribe, sizeof(subscribe)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_, "End object comes before start object");
}
TEST_F(MoqtMessageSpecificTest, SubscribeUpdateEndObjectTooLow) {
MoqtControlParser parser(kRawQuic, visitor_);
char subscribe_update[] = {
0x02, 0x02, 0x03, 0x02, 0x04, 0x01,
0xf0, 0x00,
};
parser.ProcessData(
absl::string_view(subscribe_update, sizeof(subscribe_update)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_, "End object comes before start object");
}
TEST_F(MoqtMessageSpecificTest, SubscribeUpdateNoEndGroup) {
MoqtControlParser parser(kRawQuic, visitor_);
char subscribe_update[] = {
0x02, 0x02, 0x03, 0x02, 0x00, 0x01,
0x20,
0x00,
};
parser.ProcessData(
absl::string_view(subscribe_update, sizeof(subscribe_update)), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_,
"SUBSCRIBE_UPDATE has end_object but no end_group");
}
TEST_F(MoqtMessageSpecificTest, ObjectAckNegativeDelta) {
MoqtControlParser parser(kRawQuic, visitor_);
char object_ack[] = {
0x71, 0x84,
0x01, 0x10, 0x20,
0x40, 0x81,
};
parser.ProcessData(absl::string_view(object_ack, sizeof(object_ack)), false);
EXPECT_EQ(visitor_.parsing_error_, std::nullopt);
ASSERT_EQ(visitor_.messages_received_, 1);
MoqtObjectAck message =
std::get<MoqtObjectAck>(visitor_.last_message_.value());
EXPECT_EQ(message.subscribe_id, 0x01);
EXPECT_EQ(message.group_id, 0x10);
EXPECT_EQ(message.object_id, 0x20);
EXPECT_EQ(message.delta_from_deadline,
quic::QuicTimeDelta::FromMicroseconds(-0x40));
}
TEST_F(MoqtMessageSpecificTest, AllMessagesTogether) {
char buffer[5000];
MoqtControlParser parser(kRawQuic, visitor_);
size_t write = 0;
size_t read = 0;
int fully_received = 0;
std::unique_ptr<TestMessageBase> prev_message = nullptr;
for (MoqtMessageType type : kMessageTypes) {
std::unique_ptr<TestMessageBase> message =
CreateTestMessage(type, kRawQuic);
memcpy(buffer + write, message->PacketSample().data(),
message->total_message_size());
size_t new_read = write + message->total_message_size() / 2;
parser.ProcessData(absl::string_view(buffer + read, new_read - read),
false);
EXPECT_EQ(visitor_.messages_received_, fully_received);
if (prev_message != nullptr) {
EXPECT_TRUE(prev_message->EqualFieldValues(*visitor_.last_message_));
}
fully_received++;
read = new_read;
write += message->total_message_size();
prev_message = std::move(message);
}
parser.ProcessData(absl::string_view(buffer + read, write - read), true);
EXPECT_EQ(visitor_.messages_received_, fully_received);
EXPECT_TRUE(prev_message->EqualFieldValues(*visitor_.last_message_));
EXPECT_FALSE(visitor_.parsing_error_.has_value());
}
TEST_F(MoqtMessageSpecificTest, DatagramSuccessful) {
ObjectDatagramMessage message;
MoqtObject object;
absl::string_view payload = ParseDatagram(message.PacketSample(), object);
TestMessageBase::MessageStructuredData object_metadata =
TestMessageBase::MessageStructuredData(object);
EXPECT_TRUE(message.EqualFieldValues(object_metadata));
EXPECT_EQ(payload, "foo");
}
TEST_F(MoqtMessageSpecificTest, WrongMessageInDatagram) {
StreamHeaderSubgroupMessage message;
MoqtObject object;
absl::string_view payload = ParseDatagram(message.PacketSample(), object);
EXPECT_TRUE(payload.empty());
}
TEST_F(MoqtMessageSpecificTest, TruncatedDatagram) {
ObjectDatagramMessage message;
message.set_wire_image_size(4);
MoqtObject object;
absl::string_view payload = ParseDatagram(message.PacketSample(), object);
EXPECT_TRUE(payload.empty());
}
TEST_F(MoqtMessageSpecificTest, VeryTruncatedDatagram) {
char message = 0x40;
MoqtObject object;
absl::string_view payload =
ParseDatagram(absl::string_view(&message, sizeof(message)), object);
EXPECT_TRUE(payload.empty());
}
TEST_F(MoqtMessageSpecificTest, SubscribeOkInvalidContentExists) {
MoqtControlParser parser(kRawQuic, visitor_);
SubscribeOkMessage subscribe_ok;
subscribe_ok.SetInvalidContentExists();
parser.ProcessData(subscribe_ok.PacketSample(), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_,
"SUBSCRIBE_OK ContentExists has invalid value");
}
TEST_F(MoqtMessageSpecificTest, SubscribeOkInvalidDeliveryOrder) {
MoqtControlParser parser(kRawQuic, visitor_);
SubscribeOkMessage subscribe_ok;
subscribe_ok.SetInvalidDeliveryOrder();
parser.ProcessData(subscribe_ok.PacketSample(), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_,
"Invalid group order value in SUBSCRIBE_OK");
}
TEST_F(MoqtMessageSpecificTest, SubscribeDoneInvalidContentExists) {
MoqtControlParser parser(kRawQuic, visitor_);
SubscribeDoneMessage subscribe_done;
subscribe_done.SetInvalidContentExists();
parser.ProcessData(subscribe_done.PacketSample(), false);
EXPECT_EQ(visitor_.messages_received_, 0);
EXPECT_TRUE(visitor_.parsing_error_.has_value());
EXPECT_EQ(*visitor_.parsing_error_,
"SUBSCRIBE_DONE ContentExists has invalid value");
}
TEST_F(MoqtMessageSpecificTest, PaddingStream) {
MoqtDataParser parser(&visitor_);
std::string buffer(32, '\0');
quic::QuicDataWriter writer(buffer.size(), buffer.data());
ASSERT_TRUE(writer.WriteVarInt62(
static_cast<uint64_t>(MoqtDataStreamType::kPadding)));
for (int i = 0; i < 100; ++i) {
parser.ProcessData(buffer, false);
ASSERT_EQ(visitor_.messages_received_, 0);
ASSERT_EQ(visitor_.parsing_error_, std::nullopt);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_parser.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_parser_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
6511299d-fed9-47da-995d-cdf6bba91657 | cpp | google/quiche | qpack_decoder_stream_sender | quiche/quic/core/qpack/qpack_decoder_stream_sender.cc | quiche/quic/core/qpack/qpack_decoder_stream_sender_test.cc | #include "quiche/quic/core/qpack/qpack_decoder_stream_sender.h"
#include <cstddef>
#include <limits>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/qpack/qpack_instructions.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
QpackDecoderStreamSender::QpackDecoderStreamSender()
: delegate_(nullptr),
instruction_encoder_(HuffmanEncoding::kEnabled) {}
void QpackDecoderStreamSender::SendInsertCountIncrement(uint64_t increment) {
instruction_encoder_.Encode(
QpackInstructionWithValues::InsertCountIncrement(increment), &buffer_);
}
void QpackDecoderStreamSender::SendHeaderAcknowledgement(
QuicStreamId stream_id) {
instruction_encoder_.Encode(
QpackInstructionWithValues::HeaderAcknowledgement(stream_id), &buffer_);
}
void QpackDecoderStreamSender::SendStreamCancellation(QuicStreamId stream_id) {
instruction_encoder_.Encode(
QpackInstructionWithValues::StreamCancellation(stream_id), &buffer_);
}
void QpackDecoderStreamSender::Flush() {
if (buffer_.empty() || delegate_ == nullptr) {
return;
}
std::string copy;
std::swap(copy, buffer_);
delegate_->WriteStreamData(copy);
}
} | #include "quiche/quic/core/qpack/qpack_decoder_stream_sender.h"
#include <string>
#include "absl/strings/escaping.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/qpack/qpack_test_utils.h"
using ::testing::Eq;
using ::testing::StrictMock;
namespace quic {
namespace test {
namespace {
class QpackDecoderStreamSenderTest : public QuicTest {
protected:
QpackDecoderStreamSenderTest() {
stream_.set_qpack_stream_sender_delegate(&delegate_);
}
~QpackDecoderStreamSenderTest() override = default;
StrictMock<MockQpackStreamSenderDelegate> delegate_;
QpackDecoderStreamSender stream_;
};
TEST_F(QpackDecoderStreamSenderTest, InsertCountIncrement) {
std::string stream_data;
ASSERT_TRUE(absl::HexStringToBytes("00", &stream_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(stream_data)));
stream_.SendInsertCountIncrement(0);
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes("0a", &stream_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(stream_data)));
stream_.SendInsertCountIncrement(10);
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes("3f00", &stream_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(stream_data)));
stream_.SendInsertCountIncrement(63);
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes("3f8901", &stream_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(stream_data)));
stream_.SendInsertCountIncrement(200);
stream_.Flush();
}
TEST_F(QpackDecoderStreamSenderTest, HeaderAcknowledgement) {
std::string stream_data;
ASSERT_TRUE(absl::HexStringToBytes("80", &stream_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(stream_data)));
stream_.SendHeaderAcknowledgement(0);
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes("a5", &stream_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(stream_data)));
stream_.SendHeaderAcknowledgement(37);
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes("ff00", &stream_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(stream_data)));
stream_.SendHeaderAcknowledgement(127);
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes("fff802", &stream_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(stream_data)));
stream_.SendHeaderAcknowledgement(503);
stream_.Flush();
}
TEST_F(QpackDecoderStreamSenderTest, StreamCancellation) {
std::string stream_data;
ASSERT_TRUE(absl::HexStringToBytes("40", &stream_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(stream_data)));
stream_.SendStreamCancellation(0);
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes("53", &stream_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(stream_data)));
stream_.SendStreamCancellation(19);
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes("7f00", &stream_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(stream_data)));
stream_.SendStreamCancellation(63);
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes("7f2f", &stream_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(stream_data)));
stream_.SendStreamCancellation(110);
stream_.Flush();
}
TEST_F(QpackDecoderStreamSenderTest, Coalesce) {
std::string stream_data;
stream_.SendInsertCountIncrement(10);
stream_.SendHeaderAcknowledgement(37);
stream_.SendStreamCancellation(0);
ASSERT_TRUE(absl::HexStringToBytes("0aa540", &stream_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(stream_data)));
stream_.Flush();
stream_.SendInsertCountIncrement(63);
stream_.SendStreamCancellation(110);
ASSERT_TRUE(absl::HexStringToBytes("3f007f2f", &stream_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(stream_data)));
stream_.Flush();
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_decoder_stream_sender.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_decoder_stream_sender_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
a08fa36a-6348-4f9f-89fa-8d02736d352a | cpp | tensorflow/tensorflow | export | tensorflow/compiler/mlir/tfrt/utils/export.cc | tensorflow/lite/toco/tflite/export_test.cc | #include "tensorflow/compiler/mlir/tfrt/utils/export.h"
#include <memory>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v1/tf_dialect_to_executor.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
absl::Status ExportFunctionDefs(
mlir::ModuleOp module,
absl::AnyInvocable<absl::Status(tensorflow::FunctionDef)> callback,
bool export_tf_original_func_name) {
tsl::profiler::TraceMe traceme([&]() {
return tsl::profiler::TraceMeEncode(
"ExportFunctionDefs",
{{"module_name", absl::string_view(module.getName().value_or("?"))}});
});
TF_RETURN_IF_ERROR(
tensorflow::tf2xla::v1::ExportFromTensorflowDialectToExecutor(module));
{
mlir::StatusScopedDiagnosticHandler diag_handler(module.getContext());
mlir::PassManager pm(module.getContext());
pm.addPass(mlir::CreateBreakUpIslandsPass());
if (mlir::failed(pm.run(module))) {
return diag_handler.ConsumeStatus();
}
}
tensorflow::GraphExportConfig configs;
configs.export_original_tf_func_name = export_tf_original_func_name;
for (auto func : module.getOps<mlir::func::FuncOp>()) {
tensorflow::FunctionDef function_def;
TF_RETURN_IF_ERROR(
tensorflow::tf2xla::v2::ConvertMlirFunctionToFunctionLibraryDef(
func, configs, &function_def));
TF_RETURN_IF_ERROR(callback(std::move(function_def)));
}
return absl::OkStatus();
}
} | #include "tensorflow/lite/toco/tflite/export.h"
#include <algorithm>
#include <initializer_list>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/tflite/builtin_operator.h"
#include "tensorflow/lite/toco/tflite/operator.h"
#include "tensorflow/lite/toco/tflite/types.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace toco {
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
class ExportTest : public ::testing::Test {
protected:
void ResetOperators() { input_model_.operators.clear(); }
void AddTensorsByName(std::initializer_list<std::string> names) {
for (const std::string& name : names) {
input_model_.GetOrCreateArray(name);
}
}
void AddOperatorsByName(std::initializer_list<std::string> names) {
for (const std::string& name : names) {
if (name == "Conv") {
auto* op = new ConvOperator;
op->padding.type = PaddingType::kSame;
op->inputs = {"input", "filter"};
op->outputs = {"output"};
Array& input_array = input_model_.GetOrCreateArray(op->inputs[0]);
Array& filter_array = input_model_.GetOrCreateArray(op->inputs[1]);
Array& output_array = input_model_.GetOrCreateArray(op->outputs[0]);
input_array.data_type = ArrayDataType::kFloat;
filter_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
input_model_.operators.emplace_back(op);
} else if (name == "Add") {
auto* op = new AddOperator;
op->inputs = {"input1", "input2"};
op->outputs = {"output"};
Array& input1_array = input_model_.GetOrCreateArray(op->inputs[0]);
Array& input2_array = input_model_.GetOrCreateArray(op->inputs[1]);
Array& output_array = input_model_.GetOrCreateArray(op->outputs[0]);
input1_array.data_type = ArrayDataType::kFloat;
input2_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
input_model_.operators.emplace_back(op);
} else if (name == "Sub") {
auto* op = new SubOperator;
op->inputs = {"input1", "input2"};
op->outputs = {"output"};
Array& input1_array = input_model_.GetOrCreateArray(op->inputs[0]);
Array& input2_array = input_model_.GetOrCreateArray(op->inputs[1]);
Array& output_array = input_model_.GetOrCreateArray(op->outputs[0]);
input1_array.data_type = ArrayDataType::kFloat;
input2_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
input1_array.copy_shape({1, 2, 2, 2});
input2_array.copy_shape({1, 2, 2, 2});
output_array.copy_shape({1, 2, 2, 2});
input_model_.operators.emplace_back(op);
} else if (name == "Assert") {
auto* op = new TensorFlowAssertOperator;
::tensorflow::NodeDef node_def;
node_def.set_name("Assert");
node_def.set_op("Assert");
node_def.SerializeToString(&op->tensorflow_node_def);
input_model_.operators.emplace_back(op);
} else {
auto* op = new TensorFlowUnsupportedOperator;
op->tensorflow_op = name;
input_model_.operators.emplace_back(op);
}
}
}
void BuildQuantizableTestModel() {
input_model_.GetOrCreateArray("inputs");
Array& weight_array = input_model_.GetOrCreateArray("weights");
int buf_size = 1296;
auto weight_buf = std::make_unique<float[]>(buf_size);
for (int i = 0; i < buf_size; i++) {
weight_buf[i] = static_cast<float>(i % 128);
}
weight_array.data_type = ArrayDataType::kFloat;
Shape* weight_array_shape = weight_array.mutable_shape();
std::vector<int>* weight_array_shape_dim =
weight_array_shape->mutable_dims();
weight_array_shape_dim->resize(4, 6);
auto& weight_array_buffer =
weight_array.GetMutableBuffer<ArrayDataType::kFloat>();
weight_array_buffer.data.resize(buf_size);
float* buf_ptr =
weight_array.GetMutableBuffer<ArrayDataType::kFloat>().data.data();
std::copy(weight_buf.get(), weight_buf.get() + buf_size, buf_ptr);
{
auto* op = new ConvOperator;
op->padding.type = PaddingType::kSame;
op->inputs = {"inputs", "weights"};
op->outputs = {"output"};
Array& input_array = input_model_.GetArray(op->inputs[0]);
Array& filter_array = input_model_.GetArray(op->inputs[1]);
Array& output_array = input_model_.GetOrCreateArray(op->outputs[0]);
input_array.data_type = ArrayDataType::kFloat;
filter_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
input_model_.operators.emplace_back(op);
}
{
auto* op = new AddOperator;
op->inputs = {"input1", "input2"};
op->outputs = {"output"};
Array& input1_array = input_model_.GetOrCreateArray(op->inputs[0]);
Array& input2_array = input_model_.GetOrCreateArray(op->inputs[1]);
Array& output_array = input_model_.GetOrCreateArray(op->outputs[0]);
input1_array.data_type = ArrayDataType::kFloat;
input2_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
input_model_.operators.emplace_back(op);
}
}
tensorflow::Status ExportAndReturnStatus(const ExportParams& params) {
std::string result;
return Export(input_model_, &result, params);
}
std::vector<std::string> ExportAndSummarizeOperators(
const ExportParams& params) {
std::vector<std::string> names;
std::string result;
auto status = Export(input_model_, &result, params);
if (!status.ok()) {
LOG(INFO) << status.message();
return names;
}
auto* model = ::tflite::GetModel(result.data());
for (const ::tflite::OperatorCode* opcode : *model->operator_codes()) {
auto builtin_code = GetBuiltinCode(opcode);
if (builtin_code != ::tflite::BuiltinOperator_CUSTOM) {
names.push_back(std::string("builtin:") +
::tflite::EnumNameBuiltinOperator(builtin_code));
} else {
names.push_back(std::string("custom:") +
opcode->custom_code()->c_str());
}
}
return names;
}
std::vector<uint32_t> ExportAndGetOperatorIndices(
const ExportParams& params) {
std::vector<uint32_t> indices;
std::string result;
if (!Export(input_model_, &result, params).ok()) return indices;
auto* model = ::tflite::GetModel(result.data());
auto operators = (*model->subgraphs())[0]->operators();
for (const auto* op : *operators) {
indices.push_back(op->opcode_index());
}
return indices;
}
Model input_model_;
};
TEST_F(ExportTest, LoadTensorsMap) {
AddTensorsByName({"tensor_one", "tensor_two"});
details::TensorsMap tensors;
details::LoadTensorsMap(input_model_, &tensors);
EXPECT_EQ(0, tensors["tensor_one"]);
EXPECT_EQ(1, tensors["tensor_two"]);
}
TEST_F(ExportTest, LoadOperatorsMap) {
AddOperatorsByName({"Conv", "Add", "MyCrazyOp", "Sub"});
details::OperatorsMap operators;
const auto ops_by_type = BuildOperatorByTypeMap();
details::LoadOperatorsMap(input_model_, &operators, ops_by_type, false);
EXPECT_EQ(
0, operators[details::OperatorKey(::tflite::BuiltinOperator_ADD, "", 1)]);
EXPECT_EQ(1, operators[details::OperatorKey(::tflite::BuiltinOperator_CONV_2D,
"", 1)]);
EXPECT_EQ(2, operators[details::OperatorKey(::tflite::BuiltinOperator_CUSTOM,
"MyCrazyOp", 1)]);
EXPECT_EQ(
3, operators[details::OperatorKey(::tflite::BuiltinOperator_SUB, "", 1)]);
}
TEST_F(ExportTest, UnsupportedFunctionality) {
AddOperatorsByName({"Conv"});
ExportParams params;
params.allow_dynamic_tensors = false;
auto status = ExportAndReturnStatus(params);
EXPECT_EQ(status.code(), ::tensorflow::error::UNIMPLEMENTED);
EXPECT_THAT(status.message(),
HasSubstr("Unsupported flag: allow_dynamic_tensors."));
}
TEST_F(ExportTest, Export) {
AddOperatorsByName({"Conv", "Add", "MyCrazyOp", "Sub"});
ExportParams params;
params.allow_custom_ops = true;
params.enable_select_tf_ops = false;
params.quantize_weights = QuantizedBufferType::NONE;
EXPECT_THAT(ExportAndSummarizeOperators(params),
ElementsAre("builtin:ADD", "builtin:CONV_2D", "custom:MyCrazyOp",
"builtin:SUB"));
EXPECT_THAT(ExportAndGetOperatorIndices(params), ElementsAre(1, 0, 2, 3));
}
TEST_F(ExportTest, ExportMinRuntime) {
AddOperatorsByName({"Conv", "Add", "Sub"});
ExportParams params;
params.allow_custom_ops = true;
params.enable_select_tf_ops = false;
params.quantize_weights = QuantizedBufferType::NONE;
std::string output;
auto status = Export(input_model_, &output, params);
auto* model = ::tflite::GetModel(output.data());
EXPECT_EQ(model->metadata()->size(), 1);
EXPECT_EQ(model->metadata()->Get(0)->name()->str(), "min_runtime_version");
auto buf = model->metadata()->Get(0)->buffer();
auto* buffer = (*model->buffers())[buf];
auto* array = buffer->data();
EXPECT_EQ(reinterpret_cast<const char*>(array->data()), std::string("1.6.0"));
}
TEST_F(ExportTest, ExportEmptyMinRuntime) {
AddOperatorsByName({"Switch", "MyCustomOp", "Assert"});
ExportParams params;
params.allow_custom_ops = true;
std::string output;
auto status = Export(input_model_, &output, params);
auto* model = ::tflite::GetModel(output.data());
EXPECT_EQ(model->metadata()->size(), 1);
EXPECT_EQ(model->metadata()->Get(0)->name()->str(), "min_runtime_version");
auto buf = model->metadata()->Get(0)->buffer();
auto* buffer = (*model->buffers())[buf];
auto* array = buffer->data();
EXPECT_EQ(reinterpret_cast<const char*>(array->data()), std::string(""));
}
TEST_F(ExportTest, UnsupportedControlFlowErrors) {
AddOperatorsByName({"Conv", "Add", "Switch", "Merge"});
ExportParams params;
params.allow_custom_ops = false;
std::string output;
const auto ops_by_type = BuildOperatorByTypeMap();
auto status = Export(input_model_, &output, params, ops_by_type);
EXPECT_EQ(status.message(),
"We are continually in the process of adding support to TensorFlow "
"Lite for more ops. It would be helpful if you could inform us of "
"how this conversion went by opening a github issue at "
"https:
"new?template=40-tflite-op-request.md\n and pasting the "
"following:\n\nTensorFlow Lite currently doesn't support control "
"flow ops: Merge, Switch. We are working on supporting control "
"flow ops, please see github issue at "
"https:
}
TEST_F(ExportTest, UnsupportedOpsAndNeedEnableFlex) {
AddOperatorsByName({"Conv", "Add", "BatchNormWithGlobalNormalization"});
ExportParams params;
params.allow_custom_ops = false;
params.enable_select_tf_ops = false;
std::string output;
const auto ops_by_type = BuildOperatorByTypeMap();
auto status = Export(input_model_, &output, params, ops_by_type);
EXPECT_EQ(
status.message(),
"We are continually in the process of adding support to TensorFlow Lite "
"for more ops. It would be helpful if you could inform us of how this "
"conversion went by opening a github issue at "
"https:
"new?template=40-tflite-op-request.md\n and pasting the "
"following:\n\nSome of the operators in the model are not supported by "
"the standard TensorFlow Lite runtime. If those are native TensorFlow "
"operators, you might be able to use the extended runtime by passing "
"--enable_select_tf_ops, or by setting "
"target_ops=TFLITE_BUILTINS,SELECT_TF_OPS when calling "
"tf.lite.TFLiteConverter(). Otherwise, if you have a custom "
"implementation for them you can disable this error with "
"--allow_custom_ops, or by setting allow_custom_ops=True when calling "
"tf.lite.TFLiteConverter(). Here is a list of builtin operators you are "
"using: ADD, CONV_2D. Here is a list of operators for which you will "
"need custom implementations: BatchNormWithGlobalNormalization.");
}
TEST_F(ExportTest, UnsupportedOpsNeedCustomImplementation) {
AddOperatorsByName({"Conv", "Add", "MyCustomOp1", "MyCustomOp2"});
ExportParams params;
params.allow_custom_ops = false;
params.enable_select_tf_ops = true;
std::string output;
const auto ops_by_type = BuildOperatorByTypeMap();
auto status = Export(input_model_, &output, params, ops_by_type);
EXPECT_EQ(
status.message(),
"We are continually in the process of adding support to TensorFlow Lite "
"for more ops. It would be helpful if you could inform us of how this "
"conversion went by opening a github issue at "
"https:
"new?template=40-tflite-op-request.md\n and pasting the "
"following:\n\nSome of the operators in the model are not supported by "
"the standard TensorFlow Lite runtime and are not recognized by "
"TensorFlow. If you have a custom implementation for them you can "
"disable this error with --allow_custom_ops, or by setting "
"allow_custom_ops=True when calling tf.lite.TFLiteConverter(). Here is a "
"list of builtin operators you are using: ADD, CONV_2D. Here is a list "
"of operators for which you will need custom implementations: "
"MyCustomOp1, MyCustomOp2.");
}
TEST_F(ExportTest, UnsupportedControlFlowAndCustomOpsErrors) {
AddOperatorsByName(
{"Conv", "Add", "Switch", "Merge", "MyCustomOp1", "MyCustomOp2"});
ExportParams params;
params.allow_custom_ops = false;
std::string output;
const auto ops_by_type = BuildOperatorByTypeMap();
auto status = Export(input_model_, &output, params, ops_by_type);
EXPECT_EQ(
status.message(),
"We are continually in the process of adding support to TensorFlow Lite "
"for more ops. It would be helpful if you could inform us of how this "
"conversion went by opening a github issue at "
"https:
"new?template=40-tflite-op-request.md\n and pasting the "
"following:\n\nTensorFlow Lite currently doesn't support control flow "
"ops: Merge, Switch. We are working on supporting control flow ops, "
"please see github issue at "
"https:
"operators in the model are not supported by the standard TensorFlow "
"Lite runtime. If those are native TensorFlow operators, you might be "
"able to use the extended runtime by passing --enable_select_tf_ops, or "
"by setting target_ops=TFLITE_BUILTINS,SELECT_TF_OPS when calling "
"tf.lite.TFLiteConverter(). Otherwise, if you have a custom "
"implementation for them you can disable this error with "
"--allow_custom_ops, or by setting allow_custom_ops=True when calling "
"tf.lite.TFLiteConverter(). Here is a list of builtin operators you are "
"using: ADD, CONV_2D. Here is a list of operators for which you will "
"need custom implementations: MyCustomOp1, MyCustomOp2.");
}
TEST_F(ExportTest, QuantizeWeights) {
BuildQuantizableTestModel();
std::string unquantized_result;
Export(input_model_, true, false, &unquantized_result);
BuildQuantizableTestModel();
std::string quantized_result;
Export(input_model_, true, true, &quantized_result);
EXPECT_LT(quantized_result.size(), unquantized_result.size());
}
class OpSetsTest : public ExportTest {
public:
enum OpSet { kTfLiteBuiltins, kSelectTfOps, kCustomOps };
void SetAllowedOpSets(std::initializer_list<OpSet> sets) {
import_all_ops_as_unsupported_ = true;
params_.allow_custom_ops = false;
params_.enable_select_tf_ops = false;
params_.quantize_weights = QuantizedBufferType::NONE;
for (const OpSet& i : sets) {
switch (i) {
case kTfLiteBuiltins:
import_all_ops_as_unsupported_ = false;
break;
case kSelectTfOps:
params_.enable_select_tf_ops = true;
break;
case kCustomOps:
params_.allow_custom_ops = true;
break;
}
}
}
std::vector<std::string> ImportExport(
std::initializer_list<std::string> op_names) {
ResetOperators();
if (!import_all_ops_as_unsupported_) {
AddOperatorsByName(op_names);
} else {
for (const std::string& name : op_names) {
auto* op = new TensorFlowUnsupportedOperator;
op->tensorflow_op = name;
input_model_.operators.emplace_back(op);
}
}
return ExportAndSummarizeOperators(params_);
}
private:
bool import_all_ops_as_unsupported_;
ExportParams params_;
};
TEST_F(OpSetsTest, BuiltinsOnly) {
SetAllowedOpSets({kTfLiteBuiltins});
EXPECT_THAT(ImportExport({"Add", "AdjustHue", "UnrollAndFold", "Assert"}),
ElementsAre());
EXPECT_THAT(ImportExport({"Add"}), ElementsAre("builtin:ADD"));
SetAllowedOpSets({kTfLiteBuiltins, kCustomOps});
EXPECT_THAT(ImportExport({"Add", "AdjustHue", "UnrollAndFold", "Assert"}),
ElementsAre("builtin:ADD", "custom:AdjustHue", "custom:Assert",
"custom:UnrollAndFold"));
}
TEST_F(OpSetsTest, TfSelectOnly) {
SetAllowedOpSets({kSelectTfOps});
EXPECT_THAT(ImportExport({"Add", "AdjustHue", "RandomUniform",
"UnrollAndFold", "Assert"}),
ElementsAre());
EXPECT_THAT(ImportExport({"Add"}), ElementsAre("custom:FlexAdd"));
SetAllowedOpSets({kSelectTfOps, kCustomOps});
EXPECT_THAT(
ImportExport(
{"Add", "AdjustHue", "RandomUniform", "UnrollAndFold", "Assert"}),
ElementsAre("custom:FlexAdd", "custom:FlexAdjustHue", "custom:FlexAssert",
"custom:FlexRandomUniform", "custom:UnrollAndFold"));
}
TEST_F(OpSetsTest, BuiltinsAndTfSelect) {
SetAllowedOpSets({kTfLiteBuiltins, kSelectTfOps});
EXPECT_THAT(ImportExport({"Add", "AdjustHue", "UnrollAndFold", "Assert"}),
ElementsAre());
EXPECT_THAT(ImportExport({"Add", "RandomUniform"}),
ElementsAre("builtin:ADD", "custom:FlexRandomUniform"));
SetAllowedOpSets({kTfLiteBuiltins, kSelectTfOps, kCustomOps});
EXPECT_THAT(
ImportExport(
{"Add", "AdjustHue", "RandomUniform", "UnrollAndFold", "Assert"}),
ElementsAre("builtin:ADD", "custom:FlexAdjustHue", "custom:FlexAssert",
"custom:FlexRandomUniform", "custom:UnrollAndFold"));
}
class FakeConvolutionOperator
: public BuiltinOperator<ConvOperator, ::tflite::Conv2DOptions,
::tflite::BuiltinOptions_Conv2DOptions> {
public:
FakeConvolutionOperator()
: BuiltinOperator(::tflite::BuiltinOperator_CONV_2D,
OperatorType::kConv) {}
int GetVersion(const OperatorSignature& op_signature) const override {
const TocoOperator& conv_op =
static_cast<const TocoOperator&>(*op_signature.op);
if (conv_op.dilation_width_factor != 1 ||
conv_op.dilation_height_factor != 1) {
return 2;
}
return 1;
}
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto padding = Padding::Serialize(op.padding.type);
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateConv2DOptions(*builder, padding, op.stride_width,
op.stride_height, activation_function,
op.dilation_width_factor,
op.dilation_height_factor);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->padding.type = Padding::Deserialize(options.padding());
op->stride_width = options.stride_w();
op->stride_height = options.stride_h();
op->dilation_width_factor = options.dilation_w_factor();
op->dilation_height_factor = options.dilation_h_factor();
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
};
class VersionedOpExportTest : public ::testing::Test {
protected:
void SetUp() override {
input_model_.GetOrCreateArray("input");
input_model_.GetOrCreateArray("filter");
input_model_.GetOrCreateArray("output");
}
void AddConvOp(bool use_dilation) {
{
auto* op = new ConvOperator;
op->inputs.push_back("input");
op->inputs.push_back("filter");
op->outputs.push_back("output");
op->padding.type = PaddingType::kSame;
op->stride_width = 1;
op->stride_height = 1;
if (use_dilation) {
op->dilation_width_factor = 2;
op->dilation_height_factor = 2;
} else {
op->dilation_width_factor = 1;
op->dilation_height_factor = 1;
}
input_model_.operators.emplace_back(op);
}
}
std::map<OperatorType, std::unique_ptr<BaseOperator>>
BuildFakeOperatorByTypeMap() {
std::map<OperatorType, std::unique_ptr<BaseOperator>> result;
result[OperatorType::kConv] =
std::unique_ptr<BaseOperator>(new FakeConvolutionOperator);
return result;
}
Model input_model_;
};
TEST_F(VersionedOpExportTest, LoadOperatorsMapWithOpV1) {
AddConvOp(false);
details::OperatorsMap operators;
const auto ops_by_type = BuildFakeOperatorByTypeMap();
details::LoadOperatorsMap(input_model_, &operators, ops_by_type, false);
EXPECT_EQ(1, operators.size());
EXPECT_EQ(0, operators.at(details::OperatorKey(
::tflite::BuiltinOperator_CONV_2D, "", 1)));
}
TEST_F(VersionedOpExportTest, LoadOperatorsMapWithOpV2) {
AddConvOp(true);
details::OperatorsMap operators;
const auto ops_by_type = BuildFakeOperatorByTypeMap();
details::LoadOperatorsMap(input_model_, &operators, ops_by_type, false);
EXPECT_EQ(1, operators.size());
EXPECT_EQ(0, operators.at(details::OperatorKey(
::tflite::BuiltinOperator_CONV_2D, "", 2)));
}
TEST_F(VersionedOpExportTest, LoadOperatorsMapWithBothVersions) {
AddConvOp(false);
AddConvOp(true);
details::OperatorsMap operators;
const auto ops_by_type = BuildFakeOperatorByTypeMap();
details::LoadOperatorsMap(input_model_, &operators, ops_by_type, false);
EXPECT_EQ(2, operators.size());
EXPECT_EQ(0, operators.at(details::OperatorKey(
::tflite::BuiltinOperator_CONV_2D, "", 1)));
EXPECT_EQ(1, operators.at(details::OperatorKey(
::tflite::BuiltinOperator_CONV_2D, "", 2)));
}
TEST_F(VersionedOpExportTest, Export) {
AddConvOp(false);
AddConvOp(true);
std::string result;
const auto ops_by_type = BuildFakeOperatorByTypeMap();
Export(input_model_, true, false, &result, ops_by_type);
auto* model = ::tflite::GetModel(result.data());
auto operator_codes = model->operator_codes();
EXPECT_EQ(2, operator_codes->size());
EXPECT_EQ(::tflite::BuiltinOperator_CONV_2D,
GetBuiltinCode((*operator_codes)[0]));
EXPECT_EQ(1, (*operator_codes)[0]->version());
EXPECT_EQ(::tflite::BuiltinOperator_CONV_2D,
GetBuiltinCode((*operator_codes)[1]));
EXPECT_EQ(2, (*operator_codes)[1]->version());
auto operators = (*model->subgraphs())[0]->operators();
EXPECT_EQ(2, operators->size());
EXPECT_EQ(0, (*operators)[0]->opcode_index());
EXPECT_EQ(1, (*operators)[1]->opcode_index());
}
TEST(OperatorKeyTest, TestBuiltinOp) {
Model model;
auto op = std::make_unique<ConvOperator>();
op->inputs = {"input", "filter"};
op->outputs = {"output"};
Array& input_array = model.GetOrCreateArray(op->inputs[0]);
Array& filter_array = model.GetOrCreateArray(op->inputs[1]);
Array& output_array = model.GetOrCreateArray(op->outputs[0]);
input_array.data_type = ArrayDataType::kFloat;
filter_array.data_type = ArrayDataType::kFloat;
output_array.data_type = ArrayDataType::kFloat;
const auto ops_by_type = BuildOperatorByTypeMap();
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, false);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CONV_2D);
EXPECT_EQ(key.custom_code(), "");
EXPECT_EQ(key.version(), 1);
}
TEST(OperatorKeyTest, TestBuiltinOpWithVersionedInputTypes) {
Model model;
auto op = std::make_unique<DequantizeOperator>();
op->inputs = {"input"};
op->outputs = {"output"};
Array& input_array = model.GetOrCreateArray(op->inputs[0]);
Array& output_array = model.GetOrCreateArray(op->outputs[0]);
input_array.data_type = ArrayDataType::kInt8;
output_array.data_type = ArrayDataType::kFloat;
const auto ops_by_type = BuildOperatorByTypeMap();
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, false);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_DEQUANTIZE);
EXPECT_EQ(key.custom_code(), "");
EXPECT_EQ(key.version(), 2);
}
TEST(OperatorKeyTest, TestCustomOp) {
Model model;
auto op = std::make_unique<TensorFlowUnsupportedOperator>();
op->tensorflow_op = "MyCrazyCustomOp";
const auto ops_by_type = BuildOperatorByTypeMap();
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, false);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "MyCrazyCustomOp");
EXPECT_EQ(key.version(), 1);
}
TEST(OperatorKeyTest, TestFlexOp) {
Model model;
auto op = std::make_unique<TensorFlowUnsupportedOperator>();
op->tensorflow_op = "BatchMatMul";
const auto ops_by_type = BuildOperatorByTypeMap();
{
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, false);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "BatchMatMul");
EXPECT_EQ(key.version(), 1);
EXPECT_TRUE(key.is_custom_op());
EXPECT_FALSE(key.is_flex_op());
}
{
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, true);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "FlexBatchMatMul");
EXPECT_EQ(key.version(), 1);
EXPECT_FALSE(key.is_custom_op());
EXPECT_TRUE(key.is_flex_op());
}
}
TEST(OperatorKeyTest, TestFlexWithControlFlowOp) {
Model model;
auto op = std::make_unique<TensorFlowUnsupportedOperator>();
op->tensorflow_op = "Merge";
const auto ops_by_type = BuildOperatorByTypeMap();
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, true);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "FlexMerge");
EXPECT_EQ(key.version(), 1);
EXPECT_FALSE(key.is_custom_op());
EXPECT_TRUE(key.is_flex_op());
EXPECT_TRUE(key.is_unsupported_flex_op());
}
TEST(OperatorKeyTest, TestFlexWithUnsupportedOp) {
Model model;
auto op = std::make_unique<TensorFlowUnsupportedOperator>();
op->tensorflow_op = "UnsupportedOp";
const auto ops_by_type = BuildOperatorByTypeMap();
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, true);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "UnsupportedOp");
EXPECT_EQ(key.version(), 1);
EXPECT_FALSE(key.is_flex_op());
EXPECT_FALSE(key.is_unsupported_flex_op());
}
TEST(OperatorKeyTest, TestFlexWithPartiallySupportedOps) {
Model model;
auto op = std::make_unique<TensorFlowAssertOperator>();
const auto ops_by_type = BuildOperatorByTypeMap();
{
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, true);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "Assert");
EXPECT_EQ(key.version(), 1);
EXPECT_TRUE(key.is_custom_op());
EXPECT_FALSE(key.is_flex_op());
}
::tensorflow::NodeDef node_def;
node_def.set_name("TensorFlowAssert");
node_def.set_op("TensorFlowAssert");
node_def.SerializeToString(&op->tensorflow_node_def);
{
const toco::OperatorSignature op_signature = {op.get(), &model};
const auto key = details::OperatorKey(op_signature, ops_by_type, true);
EXPECT_EQ(key.type(), ::tflite::BuiltinOperator_CUSTOM);
EXPECT_EQ(key.custom_code(), "FlexAssert");
EXPECT_EQ(key.version(), 1);
EXPECT_FALSE(key.is_custom_op());
EXPECT_TRUE(key.is_flex_op());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/utils/export.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/tflite/export_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
27547f78-99c7-49cb-aa5f-2a5f143d52a0 | cpp | tensorflow/tensorflow | rocm_executor | third_party/xla/xla/stream_executor/rocm/rocm_executor.cc | third_party/xla/xla/stream_executor/rocm/rocm_executor_test.cc | #include "xla/stream_executor/rocm/rocm_executor.h"
#include <unistd.h>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include "absl/base/casts.h"
#include "absl/functional/any_invocable.h"
#include "absl/numeric/int128.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "rocm/include/hip/hip_runtime.h"
#include "rocm/include/hip/hip_version.h"
#include "rocm/rocm_config.h"
#include "xla/stream_executor/blas.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/event_based_timer.h"
#include "xla/stream_executor/fft.h"
#include "xla/stream_executor/gpu/context.h"
#include "xla/stream_executor/gpu/gpu_command_buffer.h"
#include "xla/stream_executor/gpu/gpu_diagnostics.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_event.h"
#include "xla/stream_executor/gpu/gpu_executor.h"
#include "xla/stream_executor/gpu/gpu_kernel.h"
#include "xla/stream_executor/gpu/gpu_stream.h"
#include "xla/stream_executor/gpu/gpu_timer.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/gpu/read_numa_node.h"
#include "xla/stream_executor/integrations/device_mem_allocator.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/module_spec.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform/initialize.h"
#include "xla/stream_executor/plugin_registry.h"
#include "xla/stream_executor/rocm/rocm_diagnostics.h"
#include "xla/stream_executor/rocm/rocm_driver.h"
#include "xla/stream_executor/rocm/rocm_driver_wrapper.h"
#include "xla/stream_executor/rocm/rocm_event.h"
#include "xla/stream_executor/rocm/rocm_kernel.h"
#include "xla/stream_executor/rocm/rocm_platform_id.h"
#include "xla/stream_executor/rocm/rocm_runtime.h"
#include "xla/stream_executor/rocm/rocm_version_parser.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/fingerprint.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#define RETURN_IF_ROCM_ERROR(expr, ...) \
do { \
hipError_t _res = (expr); \
if (TF_PREDICT_FALSE(_res != hipSuccess)) { \
if (_res == hipErrorOutOfMemory) \
return absl::ResourceExhaustedError(absl::StrCat( \
__VA_ARGS__, ":", ::stream_executor::gpu::ToString(_res))); \
else \
return absl::InternalError(absl::StrCat( \
__VA_ARGS__, ": ", ::stream_executor::gpu::ToString(_res))); \
} \
} while (0)
namespace stream_executor {
namespace gpu {
static hipDeviceptr_t AsROCmDevicePtr(const DeviceMemoryBase& gpu_mem) {
return const_cast<hipDeviceptr_t>(gpu_mem.opaque());
}
static hipDeviceptr_t AsROCmDevicePtr(DeviceMemoryBase* gpu_mem) {
return AsROCmDevicePtr(*gpu_mem);
}
RocmExecutor::~RocmExecutor() {
for (auto& it : disk_modules_) {
GpuDriver::UnloadModule(gpu_context(), it.second);
}
for (auto& it : in_memory_modules_) {
GpuDriver::UnloadModule(gpu_context(), it.second);
}
if (gpu_context() != nullptr) {
GpuDriver::DestroyContext(gpu_context());
}
CHECK(kernel_to_gpu_binary_.empty()) << "GpuExecutor has live kernels.";
CHECK(gpu_binary_to_module_.empty()) << "GpuExecutor has loaded modules.";
}
bool RocmExecutor::UnloadModule(ModuleHandle module_handle) {
const char* gpu_binary = reinterpret_cast<const char*>(module_handle.id());
absl::MutexLock lock{&in_memory_modules_mu_};
return UnloadGpuBinary(gpu_binary);
}
namespace {
absl::uint128 Fingerprint128(const absl::string_view s) {
auto fp = tsl::Fingerprint128(s);
return absl::MakeUint128(fp.high64, fp.low64);
}
int fpus_per_core(std::string gcn_arch_name) {
int n = 128;
if (gcn_arch_name.substr(0, 6) == "gfx906") {
n = 64;
}
return n;
}
absl::Status FuncGetAttribute(hipFunction_attribute attribute,
hipFunction_t func, int* attribute_value) {
RETURN_IF_ROCM_ERROR(
wrap::hipFuncGetAttribute(attribute_value, attribute, func),
"Failed to query kernel attribute: ", attribute);
return absl::OkStatus();
}
}
absl::StatusOr<std::shared_ptr<DeviceMemoryBase>>
RocmExecutor::CreateOrShareConstant(Stream* stream,
absl::Span<const uint8_t> content) {
absl::MutexLock lock{&shared_constants_mu_};
absl::uint128 fingerprint = Fingerprint128(absl::string_view(
reinterpret_cast<const char*>(content.data()), content.size()));
auto insert_result = shared_constants_.insert(
{fingerprint, std::weak_ptr<DeviceMemoryBase>()});
auto it = insert_result.first;
bool was_already_in_cache = !insert_result.second;
std::shared_ptr<DeviceMemoryBase> shared_constant;
if (was_already_in_cache) {
shared_constant = it->second.lock();
}
if (shared_constant == nullptr) {
DeviceMemoryBase* new_constant =
new DeviceMemoryBase(Allocate(content.size(), 0));
if (new_constant->opaque() == nullptr) {
return absl::InternalError(absl::StrFormat(
"Failed to allocate %d bytes for new constant", content.size()));
}
TF_RETURN_IF_ERROR(
stream->Memcpy(new_constant, content.data(), content.size()));
absl::Status status = stream->BlockHostUntilDone();
if (!status.ok()) {
Deallocate(new_constant);
status.Update(absl::InternalError(absl::StrFormat(
"Memcpy to device address %p failed", new_constant->opaque())));
return status;
}
shared_constant = std::shared_ptr<DeviceMemoryBase>(
new_constant, [this](DeviceMemoryBase* p) {
Deallocate(p);
delete p;
});
it->second = std::weak_ptr<DeviceMemoryBase>(shared_constant);
}
return shared_constant;
}
absl::StatusOr<std::unique_ptr<EventBasedTimer>>
RocmExecutor::CreateEventBasedTimer(GpuStream* stream, bool use_delay_kernel) {
TF_ASSIGN_OR_RETURN(auto start_event, CreateGpuEvent(true));
TF_ASSIGN_OR_RETURN(auto stop_event, CreateGpuEvent(true));
TF_RETURN_IF_ERROR(start_event->Record(stream->gpu_stream()));
return std::make_unique<GpuTimer>(gpu_context(), std::move(start_event),
std::move(stop_event), stream);
}
bool RocmExecutor::UnloadGpuBinary(const void* gpu_binary) {
auto module_it = gpu_binary_to_module_.find(gpu_binary);
if (gpu_binary_to_module_.end() == module_it) {
VLOG(3) << "No loaded HSACO module for " << gpu_binary;
return false;
}
auto& module = module_it->second.first;
auto& refcount = module_it->second.second;
VLOG(3) << "Found HSACO module " << module << " with refcount " << refcount;
if (--refcount == 0) {
VLOG(3) << "Unloading HSACO module " << module;
GpuDriver::UnloadModule(gpu_context(), module);
gpu_binary_to_module_.erase(module_it);
const char* mem_it = nullptr;
for (auto x : in_memory_modules_) {
if (x.second == module) mem_it = x.first;
}
if (mem_it != nullptr) in_memory_modules_.erase(mem_it);
}
return true;
}
void RocmExecutor::UnloadKernel(const Kernel* kernel) {
VLOG(3) << "Unloading kernel " << kernel << " : " << kernel->name();
absl::MutexLock lock{&in_memory_modules_mu_};
auto gpu_binary_it = kernel_to_gpu_binary_.find(kernel);
if (kernel_to_gpu_binary_.end() == gpu_binary_it) {
VLOG(3) << "Kernel " << kernel << " : " << kernel->name()
<< " has never been loaded.";
return;
}
VLOG(3) << "Kernel " << kernel << " : " << kernel->name()
<< " has loaded GPU code " << gpu_binary_it->second;
UnloadGpuBinary(gpu_binary_it->second);
kernel_to_gpu_binary_.erase(gpu_binary_it);
}
absl::Status RocmExecutor::Init() {
TF_RETURN_IF_ERROR(GpuDriver::Init());
TF_RETURN_IF_ERROR(GpuDriver::GetDevice(device_ordinal(), &device_));
Context* context;
TF_RETURN_IF_ERROR(
GpuDriver::CreateContext(device_ordinal(), device_, &context));
set_context(context);
return GpuDriver::GetGpuISAVersion(&version_, device_);
}
absl::StatusOr<std::unique_ptr<Kernel>> RocmExecutor::LoadKernel(
const MultiKernelLoaderSpec& spec) {
auto rocm_kernel = std::make_unique<RocmKernel>(this);
hipModule_t module = nullptr;
const std::string* kernel_name;
if (spec.has_cuda_cubin_in_memory()) {
kernel_name = &spec.cuda_cubin_in_memory().kernel_name();
const char* hsaco = reinterpret_cast<const char*>(
spec.cuda_cubin_in_memory().cubin_bytes().data());
absl::MutexLock lock{&in_memory_modules_mu_};
module = in_memory_modules_[hsaco];
if (module == nullptr) {
TF_RETURN_IF_ERROR(GpuDriver::LoadHsaco(gpu_context(), hsaco, &module));
}
kernel_to_gpu_binary_[rocm_kernel.get()] = hsaco;
} else if (spec.has_in_process_symbol()) {
kernel_name = &spec.in_process_symbol().kernel_name();
void* symbol = spec.in_process_symbol().symbol();
VLOG(1) << "Resolve ROCM kernel " << *kernel_name
<< " from symbol pointer: " << symbol;
#if TF_ROCM_VERSION >= 60200
TF_ASSIGN_OR_RETURN(
GpuFunctionHandle function,
RocmRuntime::GetFuncBySymbol(spec.in_process_symbol().symbol()));
rocm_kernel->set_gpu_function(function);
#else
rocm_kernel->set_gpu_function(
static_cast<hipFunction_t>(spec.in_process_symbol().symbol()));
#endif
} else {
return absl::InternalError("No method of loading ROCM kernel provided");
}
if (!spec.has_in_process_symbol()) {
VLOG(2) << "getting function " << *kernel_name << " from module " << module;
GpuFunctionHandle function;
TF_RETURN_IF_ERROR(GpuDriver::GetModuleFunction(
gpu_context(), module, kernel_name->c_str(), &function));
rocm_kernel->set_gpu_function(function);
}
rocm_kernel->set_arity(spec.arity());
if (!spec.has_in_process_symbol()) {
KernelMetadata kernel_metadata;
TF_RETURN_IF_ERROR(GetKernelMetadata(rocm_kernel.get(), &kernel_metadata));
rocm_kernel->set_metadata(kernel_metadata);
}
rocm_kernel->set_name(*kernel_name);
rocm_kernel->set_args_packing(spec.kernel_args_packing());
return std::move(rocm_kernel);
}
absl::Status RocmExecutor::GetKernelMetadata(GpuKernel* rocm_kernel,
KernelMetadata* kernel_metadata) {
int value = 0;
TF_RETURN_IF_ERROR(FuncGetAttribute(HIP_FUNC_ATTRIBUTE_NUM_REGS,
rocm_kernel->gpu_function(), &value));
kernel_metadata->set_registers_per_thread(value);
TF_RETURN_IF_ERROR(FuncGetAttribute(HIP_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES,
rocm_kernel->gpu_function(), &value));
kernel_metadata->set_shared_memory_bytes(value);
return absl::OkStatus();
}
absl::Status RocmExecutor::LoadModule(const MultiModuleLoaderSpec& spec,
ModuleHandle* module_handle) {
hipModule_t hip_module = nullptr;
if (spec.has_cuda_cubin_in_memory()) {
absl::MutexLock lock{&in_memory_modules_mu_};
TF_RETURN_IF_ERROR(LoadModuleFromHsaco(
reinterpret_cast<const char*>(spec.cuda_cubin_in_memory().data()),
&hip_module));
*module_handle = ModuleHandle(const_cast<void*>(
static_cast<const void*>(spec.cuda_cubin_in_memory().data())));
return absl::OkStatus();
} else {
return absl::InternalError("No HASCO binary found");
}
}
absl::Status RocmExecutor::LoadModuleFromHsaco(const char* hsaco,
hipModule_t* module) {
uint64_t module_refcount;
std::tie(*module, module_refcount) = gpu_binary_to_module_[hsaco];
if (*module == nullptr) {
TF_RETURN_IF_ERROR(GpuDriver::LoadHsaco(gpu_context(), hsaco, module));
module_refcount = 1;
in_memory_modules_[hsaco] = *module;
VLOG(3) << "Loaded HSACO " << static_cast<const void*>(hsaco)
<< " as module " << *module;
} else {
++module_refcount;
VLOG(3) << "HSACO " << static_cast<const void*>(hsaco)
<< " is already loaded as module " << *module;
}
gpu_binary_to_module_[hsaco] = {*module, module_refcount};
return absl::OkStatus();
}
DeviceMemoryBase RocmExecutor::Allocate(uint64_t size, int64_t memory_space) {
if (memory_space ==
static_cast<int64_t>(stream_executor::MemoryType::kHost)) {
return DeviceMemoryBase(GpuDriver::HostAllocate(gpu_context(), size), size);
}
CHECK_EQ(memory_space, 0);
return DeviceMemoryBase(GpuDriver::DeviceAllocate(gpu_context(), size), size);
}
void RocmExecutor::Deallocate(DeviceMemoryBase* mem) {
GpuDriver::DeviceDeallocate(gpu_context(), mem->opaque());
}
bool RocmExecutor::SynchronizeAllActivity() {
return GpuDriver::SynchronizeContext(gpu_context()).ok();
}
absl::Status RocmExecutor::SynchronousMemZero(DeviceMemoryBase* location,
uint64_t size) {
if (reinterpret_cast<uintptr_t>(location->opaque()) % 4 == 0 &&
size % 4 == 0) {
return GpuDriver::SynchronousMemsetUint32(
gpu_context(), AsROCmDevicePtr(location), 0x0, size / 4);
}
return GpuDriver::SynchronousMemsetUint8(
gpu_context(), AsROCmDevicePtr(location), 0x0, size);
}
absl::Status RocmExecutor::SynchronousMemcpy(DeviceMemoryBase* gpu_dst,
const void* host_src,
uint64_t size) {
return GpuDriver::SynchronousMemcpyH2D(
gpu_context(), AsROCmDevicePtr(gpu_dst), host_src, size);
}
absl::Status RocmExecutor::SynchronousMemcpy(void* host_dst,
const DeviceMemoryBase& gpu_src,
uint64_t size) {
return GpuDriver::SynchronousMemcpyD2H(gpu_context(), host_dst,
AsROCmDevicePtr(gpu_src), size);
}
void RocmExecutor::DeallocateStream(Stream* stream) {
{
absl::MutexLock lock(&mu_);
if (dnn_ != nullptr) {
dnn_->NotifyStreamDestroyed(stream);
}
}
GpuStream* rocm_stream = AsGpuStream(stream);
absl::MutexLock l(&alive_gpu_streams_mu_);
alive_gpu_streams_.erase(rocm_stream->gpu_stream());
}
absl::Status RocmExecutor::BlockHostUntilDone(Stream* stream) {
return GpuDriver::SynchronizeStream(gpu_context(), AsGpuStreamValue(stream));
}
blas::BlasSupport* RocmExecutor::AsBlas() {
absl::MutexLock lock(&mu_);
if (blas_ != nullptr) {
return blas_.get();
}
PluginRegistry* registry = PluginRegistry::Instance();
absl::StatusOr<PluginRegistry::BlasFactory> status =
registry->GetFactory<PluginRegistry::BlasFactory>(rocm::kROCmPlatformId);
if (!status.ok()) {
LOG(ERROR) << "Unable to retrieve BLAS factory: "
<< status.status().message();
return nullptr;
}
auto blas = status.value()(this);
blas_.reset(blas);
return blas_.get();
}
dnn::DnnSupport* RocmExecutor::AsDnn() {
absl::MutexLock lock(&mu_);
if (dnn_ != nullptr) {
return dnn_.get();
}
PluginRegistry* registry = PluginRegistry::Instance();
absl::StatusOr<PluginRegistry::DnnFactory> status =
registry->GetFactory<PluginRegistry::DnnFactory>(rocm::kROCmPlatformId);
if (!status.ok()) {
LOG(ERROR) << "Unable to retrieve DNN factory: "
<< status.status().message();
return nullptr;
}
auto dnn = status.value()(this);
dnn_.reset(dnn);
return dnn_.get();
}
fft::FftSupport* RocmExecutor::AsFft() {
absl::MutexLock lock(&mu_);
if (fft_ != nullptr) {
return fft_.get();
}
PluginRegistry* registry = PluginRegistry::Instance();
absl::StatusOr<PluginRegistry::FftFactory> status =
registry->GetFactory<PluginRegistry::FftFactory>(rocm::kROCmPlatformId);
if (!status.ok()) {
LOG(ERROR) << "Unable to retrieve FFT factory: "
<< status.status().message();
return nullptr;
}
auto fft = status.value()(this);
fft_.reset(fft);
return fft_.get();
}
bool RocmExecutor::CanEnablePeerAccessTo(StreamExecutor* other) {
GpuExecutor* rocm_other = static_cast<GpuExecutor*>(other);
return GpuDriver::CanEnablePeerAccess(gpu_context(),
rocm_other->gpu_context());
}
absl::Status RocmExecutor::EnablePeerAccessTo(StreamExecutor* other) {
GpuExecutor* rocm_other = static_cast<GpuExecutor*>(other);
return GpuDriver::EnablePeerAccess(gpu_context(), rocm_other->gpu_context());
}
bool RocmExecutor::DeviceMemoryUsage(int64_t* free, int64_t* total) const {
return GpuDriver::GetDeviceMemoryInfo(gpu_context(), free, total);
}
absl::StatusOr<DeviceMemoryBase> RocmExecutor::GetSymbol(
const std::string& symbol_name, ModuleHandle module_handle) {
void* mem = nullptr;
size_t bytes = 0;
absl::MutexLock lock{&in_memory_modules_mu_};
if (static_cast<bool>(module_handle)) {
auto it = gpu_binary_to_module_.find(module_handle.id());
CHECK(it != gpu_binary_to_module_.end());
TF_RETURN_IF_ERROR(GpuDriver::GetModuleSymbol(
gpu_context(), it->second.first, symbol_name.c_str(),
reinterpret_cast<hipDeviceptr_t*>(&mem), &bytes));
return DeviceMemoryBase(mem, bytes);
}
for (auto& it : gpu_binary_to_module_) {
TF_RETURN_IF_ERROR(GpuDriver::GetModuleSymbol(
gpu_context(), it.second.first, symbol_name.c_str(),
reinterpret_cast<hipDeviceptr_t*>(&mem), &bytes));
return DeviceMemoryBase(mem, bytes);
}
LOG(INFO) << "Falied to find symbol in any modules: " << symbol_name;
return absl::NotFoundError(
absl::StrCat("Check if module containing symbol ", symbol_name,
" is loaded (module_handle = ",
reinterpret_cast<uintptr_t>(module_handle.id()), ")"));
}
absl::Status FillBlockDimLimit(GpuDeviceHandle device,
BlockDim* block_dim_limit) {
int x, y, z;
TF_RETURN_IF_ERROR(GpuDriver::GetGridLimits(&x, &y, &z, device));
block_dim_limit->x = x;
block_dim_limit->y = y;
block_dim_limit->z = z;
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<GpuEvent>> RocmExecutor::CreateGpuEvent(
bool allow_timing) {
auto gpu_event = std::make_unique<RocmEvent>(gpu_context());
TF_RETURN_IF_ERROR(gpu_event->Init(allow_timing));
return std::move(gpu_event);
}
absl::StatusOr<std::unique_ptr<Event>> RocmExecutor::CreateEvent() {
return CreateGpuEvent(false);
}
absl::StatusOr<std::unique_ptr<Stream>> RocmExecutor::CreateStream(
std::optional<std::variant<StreamPriority, int>> priority) {
TF_ASSIGN_OR_RETURN(auto event, CreateGpuEvent(false));
auto stream = std::make_unique<GpuStream>(this, std::move(event), priority);
absl::MutexLock l(&alive_gpu_streams_mu_);
TF_RETURN_IF_ERROR(stream->Init());
auto gpu_stream = stream->gpu_stream();
alive_gpu_streams_[gpu_stream] = stream.get();
return std::move(stream);
}
absl::StatusOr<std::unique_ptr<CommandBuffer>>
RocmExecutor::CreateCommandBuffer(CommandBuffer::Mode mode) {
VLOG(2) << "Create ROCm command buffer (ROCm graph)";
GpuGraphHandle graph = nullptr;
TF_RETURN_IF_ERROR(GpuDriver::CreateGraph(&graph));
return std::make_unique<GpuCommandBuffer>(mode, this, graph);
}
absl::Status RocmExecutor::TrimGraphMemory() {
return GpuDriver::DeviceGraphMemTrim(device_);
}
absl::StatusOr<std::unique_ptr<DeviceDescription>>
RocmExecutor::CreateDeviceDescription(int device_ordinal) {
GpuDeviceHandle device;
auto status = GpuDriver::GetDevice(device_ordinal, &device);
if (!status.ok()) {
return status;
}
int version;
status = GpuDriver::GetGpuISAVersion(&version, device);
if (!status.ok()) {
return status;
}
std::string gcn_arch_name;
status = GpuDriver::GetGpuGCNArchName(device, &gcn_arch_name);
if (!status.ok()) {
return status;
}
DeviceDescription desc;
{
std::string pci_bus_id = GpuDriver::GetPCIBusID(device);
pci_bus_id = absl::AsciiStrToLower(pci_bus_id);
desc.set_pci_bus_id(pci_bus_id);
int numa_node = ReadNumaNode(pci_bus_id, device_ordinal);
desc.set_numa_node(numa_node);
}
hipDeviceProp_t prop;
if (GpuDriver::GetDeviceProperties(&prop, device_ordinal)) {
desc.set_threads_per_block_limit(prop.maxThreadsPerBlock);
ThreadDim thread_dim_limit;
thread_dim_limit.x = prop.maxThreadsDim[0];
thread_dim_limit.y = prop.maxThreadsDim[1];
thread_dim_limit.z = prop.maxThreadsDim[2];
desc.set_thread_dim_limit(thread_dim_limit);
float clock_rate_ghz = static_cast<float>(prop.clockRate) / 1e6;
desc.set_clock_rate_ghz(clock_rate_ghz);
int64_t memory_bandwidth =
2 * (static_cast<int64_t>(prop.memoryBusWidth) / 8) *
(static_cast<int64_t>(prop.memoryClockRate) * 1000);
desc.set_memory_bandwidth(memory_bandwidth);
desc.set_l2_cache_size(prop.l2CacheSize);
}
{
bool ecc_enabled = false;
(void)GpuDriver::IsEccEnabled(device, &ecc_enabled);
desc.set_ecc_enabled(ecc_enabled);
}
uint64_t device_memory_size = -1;
(void)GpuDriver::GetDeviceTotalMemory(device, &device_memory_size);
desc.set_device_memory_size(device_memory_size);
{
BlockDim block_dim_limit;
TF_RETURN_IF_ERROR(FillBlockDimLimit(device, &block_dim_limit));
desc.set_block_dim_limit(block_dim_limit);
}
{
std::string device_name;
TF_RETURN_IF_ERROR(GpuDriver::GetDeviceName(device, &device_name));
desc.set_name(device_name);
}
desc.set_platform_version(
absl::StrCat("AMDGPU ISA version: ", gcn_arch_name));
desc.set_device_address_bits(64);
desc.set_device_vendor("Advanced Micro Devices, Inc");
desc.set_rocm_compute_capability(gcn_arch_name);
desc.set_shared_memory_per_core(
GpuDriver::GetMaxSharedMemoryPerCore(device).value());
desc.set_shared_memory_per_block(
GpuDriver::GetMaxSharedMemoryPerBlock(device).value());
int core_count = GpuDriver::GetMultiprocessorCount(device).value();
desc.set_core_count(core_count);
desc.set_fpus_per_core(fpus_per_core(gcn_arch_name));
desc.set_threads_per_core_limit(
GpuDriver::GetMaxThreadsPerMultiprocessor(device).value());
desc.set_registers_per_block_limit(
GpuDriver::GetMaxRegistersPerBlock(device).value());
desc.set_threads_per_warp(GpuDriver::GetThreadsPerWarp(device).value());
desc.set_registers_per_core_limit(64 * 1024);
desc.set_compile_time_toolkit_version(
SemanticVersion{HIP_VERSION_MAJOR, HIP_VERSION_MINOR, HIP_VERSION_PATCH});
desc.set_runtime_version(
ParseRocmVersion(RocmRuntime::GetRuntimeVersion().value_or(0))
.value_or(SemanticVersion{0, 0, 0}));
desc.set_driver_version(
ParseRocmVersion(GpuDriver::GetDriverVersion().value_or(0))
.value_or(SemanticVersion{0, 0, 0}));
int cc_major = 0;
int cc_minor = 0;
GpuDriver::GetComputeCapability(&cc_major, &cc_minor, device).IgnoreError();
desc.set_model_str(absl::StrFormat("cc_%d.%d with %dB RAM, %d cores",
cc_major, cc_minor, device_memory_size,
core_count));
return std::make_unique<DeviceDescription>(std::move(desc));
}
}
}
STREAM_EXECUTOR_REGISTER_MODULE_INITIALIZER(rocm_executor, {}); | #include "xla/stream_executor/rocm/rocm_executor.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace stream_executor::gpu {
namespace {
using testing::Field;
using testing::Ge;
using testing::IsEmpty;
using testing::Not;
using testing::VariantWith;
TEST(RocmExecutorTest, CreateDeviceDescription) {
TF_ASSERT_OK(GpuDriver::Init());
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<DeviceDescription> result,
CudaExecutor::CreateDeviceDescription(0));
constexpr SemanticVersion kNullVersion{0, 0, 0};
EXPECT_NE(result->runtime_version(), kNullVersion);
EXPECT_NE(result->driver_version(), kNullVersion);
EXPECT_NE(result->compile_time_toolkit_version(), kNullVersion);
EXPECT_THAT(result->platform_version(), Not(IsEmpty()));
EXPECT_THAT(result->name(), Not(IsEmpty()));
EXPECT_THAT(result->model_str(), Not(IsEmpty()));
EXPECT_THAT(result->device_vendor(), "Advanced Micro Devices, Inc");
EXPECT_THAT(result->gpu_compute_capability(),
VariantWith<RocmComputeCapability>(
Field("gcn_arch_name", &RocmComputeCapability::gcn_arch_name,
Not(IsEmpty()))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/rocm/rocm_executor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/rocm/rocm_executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
509e8590-a1ba-4f16-9c3f-6140d6c8b663 | cpp | tensorflow/tensorflow | strided_slice_op | tensorflow/compiler/tf2xla/kernels/strided_slice_op.cc | tensorflow/core/util/strided_slice_op_test.cc | #include "tensorflow/core/util/strided_slice_op.h"
#include <algorithm>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/tf2xla/literal_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/dynamic_shaped_ops.h"
#include "xla/hlo/builder/value_inference.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
using errors::InvalidArgument;
class StridedSliceOp : public XlaOpKernel {
public:
explicit StridedSliceOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("begin_mask", &begin_mask_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("end_mask", &end_mask_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("ellipsis_mask", &ellipsis_mask_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("new_axis_mask", &new_axis_mask_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("shrink_axis_mask", &shrink_axis_mask_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("Index", &index_type_));
}
void EmitDynamicSlice(XlaOpKernelContext* ctx,
const absl::InlinedVector<int64_t, 4>& strides,
PartialTensorShape partial_processing_shape,
PartialTensorShape partial_final_shape,
const StridedSliceShapeSpec& shape_spec,
const std::vector<bool>& begins_are_dynamic,
const std::vector<bool>& ends_are_dynamic) {
const TensorShape input_shape = ctx->InputShape(0);
xla::XlaOp slice = ctx->Input(0);
for (int64_t i = 0; i < ctx->InputShape("begin").dims(); ++i) {
OP_REQUIRES(ctx, strides[i] == 1,
errors::InvalidArgument(
"Strides have to be one when inputs are not constant."));
}
for (int64_t i = 0; i < partial_final_shape.dims(); ++i) {
if (partial_final_shape.dim_size(i) == -1) {
partial_final_shape.set_dim(
i,
input_shape.dim_size(shape_spec.output_to_processing_mapping[i]));
}
}
TensorShape final_shape;
OP_REQUIRES(
ctx, partial_final_shape.AsTensorShape(&final_shape),
InvalidArgument("XLA can't deduce compile time constant output "
"shape for strided slice: ",
partial_final_shape.DebugString(),
", output shape must be a compile-time constant"));
for (int64_t i = 0; i < partial_processing_shape.dims(); ++i) {
if (partial_processing_shape.dim_size(i) == -1) {
partial_processing_shape.set_dim(i, input_shape.dim_size(i));
}
}
TensorShape processing_shape;
OP_REQUIRES(
ctx, partial_processing_shape.AsTensorShape(&processing_shape),
InvalidArgument("XLA can't deduce compile time constant processing "
"shape for strided slice: ",
partial_processing_shape.DebugString(),
", output shape must be a compile-time constant"));
xla::PaddingConfig padding_config;
bool need_padding = false;
std::vector<bool> result_dims_are_dynamic;
const auto& dims = input_shape.dims();
result_dims_are_dynamic.reserve(dims);
for (int64_t i = 0; i < dims; ++i) {
int64_t sparse_index = shape_spec.processing_to_sparse_mapping[i];
bool shrink_axis_set = (1 << i) & shape_spec.shrink_axis_dense_mask;
auto* dims = padding_config.add_dimensions();
dims->set_edge_padding_low(0);
dims->set_interior_padding(0);
if ((begins_are_dynamic[sparse_index] ||
ends_are_dynamic[sparse_index]) &&
!shrink_axis_set) {
dims->set_edge_padding_high(input_shape.dim_size(i));
need_padding = true;
result_dims_are_dynamic.push_back(true);
} else {
dims->set_edge_padding_high(0);
result_dims_are_dynamic.push_back(false);
}
}
if (need_padding) {
slice = xla::Pad(slice, xla::Zero(ctx->builder(), ctx->input_xla_type(0)),
padding_config);
for (int64 i = 0; i < result_dims_are_dynamic.size(); ++i) {
if (result_dims_are_dynamic[i]) {
slice = xla::RemoveDynamicDimension(slice, i);
}
}
}
std::vector<xla::XlaOp> start_indices;
std::vector<xla::XlaOp> slice_sizes_dynamic;
xla::Shape input_xla_shape = ctx->InputXlaShape(0).value();
for (int64_t i = 0; i < input_shape.dims(); ++i) {
bool begin_mask = (1 << i) & shape_spec.begin_dense_mask;
bool end_mask = (1 << i) & shape_spec.end_dense_mask;
auto zero = xla::Zero(ctx->builder(), ctx->InputXlaType("begin"));
xla::XlaOp begin_index, end_index;
int64_t sparse_index = shape_spec.processing_to_sparse_mapping[i];
bool xla_input_is_dynamic = input_xla_shape.is_dynamic_dimension(i);
xla::XlaOp dim_size;
if (xla_input_is_dynamic) {
dim_size = xla::GetDimensionSize(ctx->Input(0), i);
OP_REQUIRES(ctx, ctx->InputXlaType("begin") == xla::S32,
errors::InvalidArgument("'begin shape has to be int32 when "
"indices to slice op are dynamic"));
} else {
dim_size =
xla::ConstantR0WithType(ctx->builder(), ctx->InputXlaType("begin"),
input_xla_shape.dimensions(i));
}
auto scalar_must_be_non_negative = [ctx](xla::XlaOp value) -> bool {
auto lower_bound = ctx->value_inference().AnalyzeConstant(
value, xla::ValueInferenceMode::kLowerBound);
if (!lower_bound.ok() || !lower_bound->AllValid()) {
return false;
}
return lower_bound->Get<int32>({}) >= 0;
};
if (begin_mask) {
begin_index = zero;
} else {
begin_index = xla::Slice(ctx->Input("begin"), {sparse_index},
{sparse_index + 1}, {1});
begin_index = xla::Reshape(begin_index, {});
if (!scalar_must_be_non_negative(begin_index)) {
auto index_negative = xla::Lt(begin_index, zero);
auto wrapped_index = xla::Add(dim_size, begin_index);
begin_index = xla::Select(index_negative, wrapped_index, begin_index);
}
}
start_indices.push_back(begin_index);
if (end_mask) {
end_index = dim_size;
} else {
end_index = xla::Slice(ctx->Input("end"), {sparse_index},
{sparse_index + 1}, {1});
end_index = xla::Reshape(end_index, {});
if (!scalar_must_be_non_negative(end_index)) {
auto index_negative = xla::Lt(end_index, zero);
auto wrapped_index = xla::Add(dim_size, end_index);
end_index = xla::Select(index_negative, wrapped_index, end_index);
}
}
xla::XlaOp size = xla::Max(xla::Sub(end_index, begin_index), zero);
slice_sizes_dynamic.push_back(xla::ConvertElementType(size, xla::S32));
}
slice =
xla::DynamicSlice(slice, start_indices, processing_shape.dim_sizes());
slice = xla::Reshape(slice, final_shape.dim_sizes());
for (int64_t i = 0; i < final_shape.dims(); ++i) {
int64 processing_shape_dim = shape_spec.output_to_processing_mapping[i];
if (processing_shape_dim != -1) {
auto status = xla::SetDimensionSizeWithRebound(
&ctx->value_inference(), slice,
slice_sizes_dynamic[processing_shape_dim], i);
OP_REQUIRES_OK(ctx, status.status());
slice = status.value();
}
}
ctx->SetOutput(0, slice);
}
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape input_shape = ctx->InputShape(0);
const TensorShape begin_shape = ctx->InputShape("begin");
OP_REQUIRES(
ctx, begin_shape.dims() == 1,
errors::InvalidArgument("'begin' input has to be a rank 1 vector"));
absl::InlinedVector<int64_t, 4> begin;
absl::InlinedVector<int64_t, 4> end;
absl::InlinedVector<int64_t, 4> strides;
xla::Literal begin_literal, end_literal, strides_literal;
bool begin_is_constant = ctx->ConstantInput(1, &begin_literal).ok();
bool end_is_constant = ctx->ConstantInput(2, &end_literal).ok();
OP_REQUIRES_OK(ctx, ctx->ConstantInput(3, &strides_literal));
Tensor begin_tensor, end_tensor, strides_tensor;
if (begin_is_constant) {
OP_REQUIRES_OK(
ctx, LiteralToHostTensor(begin_literal, index_type_, &begin_tensor));
}
if (end_is_constant) {
OP_REQUIRES_OK(
ctx, LiteralToHostTensor(end_literal, index_type_, &end_tensor));
}
OP_REQUIRES_OK(ctx, LiteralToHostTensor(strides_literal, index_type_,
&strides_tensor));
PartialTensorShape partial_processing_shape, partial_final_shape;
bool dummy = false;
StridedSliceShapeSpec shape_spec;
OP_REQUIRES_OK(
ctx,
ValidateStridedSliceOp(
begin_is_constant ? &begin_tensor : nullptr,
end_is_constant ? &end_tensor : nullptr, strides_tensor,
input_shape, begin_mask_, end_mask_, ellipsis_mask_, new_axis_mask_,
shrink_axis_mask_, &partial_processing_shape, &partial_final_shape,
&dummy, &dummy, &dummy, &begin, &end, &strides, &shape_spec));
xla::XlaOp slice = ctx->Input(0);
std::vector<bool> begins_are_dynamic;
OP_REQUIRES_OK(
ctx, ctx->ResolveInputDynamismIntoPredVector(1, &begins_are_dynamic));
std::vector<bool> ends_are_dynamic;
OP_REQUIRES_OK(
ctx, ctx->ResolveInputDynamismIntoPredVector(2, &ends_are_dynamic));
if (begin_is_constant && end_is_constant) {
TensorShape final_shape;
OP_REQUIRES(
ctx, partial_final_shape.AsTensorShape(&final_shape),
InvalidArgument("XLA can't deduce compile time constant output "
"shape for strided slice: ",
partial_final_shape.DebugString(),
", output shape must be a compile-time constant"));
absl::InlinedVector<int64_t, 4> dimensions_to_reverse;
absl::InlinedVector<int64_t, 4> slice_begin, slice_end, slice_strides;
for (int i = 0; i < begin.size(); ++i) {
if (strides[i] > 0) {
slice_begin.push_back(begin[i]);
slice_end.push_back(std::max(end[i], begin[i]));
slice_strides.push_back(strides[i]);
} else {
slice_begin.push_back(input_shape.dim_size(i) - begin[i] - 1);
slice_end.push_back(std::max(input_shape.dim_size(i) - end[i] - 1,
input_shape.dim_size(i) - begin[i] - 1));
slice_strides.push_back(-strides[i]);
dimensions_to_reverse.push_back(i);
}
}
if (!dimensions_to_reverse.empty()) {
slice = xla::Rev(slice, dimensions_to_reverse);
}
slice = xla::Slice(slice, slice_begin, slice_end, slice_strides);
auto operand_shape_or = ctx->builder()->GetShape(ctx->Input(0));
OP_REQUIRES_OK(ctx, operand_shape_or.status());
xla::Shape xla_shape = operand_shape_or.value();
bool begins_are_static = absl::c_all_of(
begins_are_dynamic, [](bool dynamic) { return !dynamic; });
OP_REQUIRES(ctx, begins_are_static,
errors::InvalidArgument(
"XLA can't use dynamic begin values for slice."));
bool ends_are_static = absl::c_all_of(
ends_are_dynamic, [](bool dynamic) { return !dynamic; });
slice = xla::Reshape(slice, final_shape.dim_sizes());
if (xla_shape.is_static() && ends_are_static) {
ctx->SetOutput(0, slice);
return;
}
for (int64_t i = 0; i < final_shape.dims(); ++i) {
int64_t input_index = shape_spec.output_to_processing_mapping[i];
if (input_index == -1) {
continue;
}
bool input_is_dynamic = xla_shape.is_dynamic_dimension(input_index);
int64_t sparse_index = shape_spec.output_to_sparse_mapping[i];
bool end_is_dynamic =
sparse_index == -1 ? false : ends_are_dynamic[sparse_index];
bool backward_slice = sparse_index == -1
? false
: end_literal.Get<int32>({sparse_index}) < 0;
if (input_is_dynamic || end_is_dynamic) {
OP_REQUIRES(
ctx, strides[input_index] == 1,
errors::InvalidArgument("XLA has not implemented dynamic "
"sized slice with non-trival stride yet. "
"Please file a bug against XLA"));
auto operand_size = xla::GetDimensionSize(ctx->Input(0), input_index);
if (backward_slice) {
OP_REQUIRES(ctx, !end_is_dynamic,
errors::InvalidArgument(
"XLA has not implemented dynamic "
"sized slice with dynamic negative index %lld. "));
operand_size = xla::Add(
operand_size,
xla::ConstantR0<int32>(ctx->builder(),
end_literal.Get<int32>({sparse_index})));
} else {
xla::XlaOp end_size;
if (end_is_dynamic) {
end_size = xla::Reshape(xla::Slice(ctx->Input(2), {sparse_index},
{sparse_index + 1}, {1}),
{});
} else {
end_size =
xla::ConstantR0<int32>(ctx->builder(), end[input_index]);
}
operand_size = xla::Min(operand_size, end_size);
}
slice = xla::SetDimensionSize(
slice,
xla::Sub(operand_size, xla::ConstantR0<int32>(
ctx->builder(), begin[input_index])),
i);
}
}
ctx->SetOutput(0, slice);
return;
} else {
EmitDynamicSlice(ctx, strides, partial_processing_shape,
partial_final_shape, shape_spec, begins_are_dynamic,
ends_are_dynamic);
}
}
private:
int32 begin_mask_, end_mask_;
int32 ellipsis_mask_, new_axis_mask_, shrink_axis_mask_;
DataType index_type_;
};
REGISTER_XLA_OP(Name("StridedSlice")
.CompileTimeConstantInput("begin")
.CompileTimeConstantInput("end")
.CompileTimeConstantInput("strides"),
StridedSliceOp);
class StridedSliceGradOp : public XlaOpKernel {
public:
explicit StridedSliceGradOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("begin_mask", &begin_mask_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("end_mask", &end_mask_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("ellipsis_mask", &ellipsis_mask_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("new_axis_mask", &new_axis_mask_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("shrink_axis_mask", &shrink_axis_mask_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("Index", &index_type_));
}
void CompileAsDynamicUpdateSlice(XlaOpKernelContext* ctx,
const TensorShape& input_shape,
const xla::Literal& strides_literal) {
bool dummy = false;
Tensor strides_tensor;
PartialTensorShape processing_shape, final_shape;
absl::InlinedVector<int64_t, 4> begin;
absl::InlinedVector<int64_t, 4> end;
absl::InlinedVector<int64_t, 4> strides;
StridedSliceShapeSpec shape_spec;
OP_REQUIRES_OK(ctx, LiteralToHostTensor(strides_literal, index_type_,
&strides_tensor));
OP_REQUIRES_OK(
ctx, ValidateStridedSliceOp(
nullptr, nullptr, strides_tensor, input_shape, begin_mask_,
end_mask_, ellipsis_mask_, new_axis_mask_, shrink_axis_mask_,
&processing_shape, &final_shape, &dummy, &dummy, &dummy,
&begin, &end, &strides, &shape_spec));
for (int64_t i = 0; i < processing_shape.dims(); ++i) {
OP_REQUIRES(
ctx, strides[i] == 1,
errors::InvalidArgument("Strides in strided slice grad have to be "
"one when inputs are not constant."));
}
xla::XlaOp grad = ctx->Input(4);
xla::Shape grad_shape = ctx->InputXlaShape(4).value();
VLOG(1) << "xla grad shape" << grad_shape;
VLOG(1) << "xla final_shape" << final_shape;
VLOG(1) << "input_shape" << input_shape.DebugString();
auto input_sizes = input_shape.dim_sizes();
auto input_sizes_padded = input_shape.dim_sizes();
bool need_padding = false;
for (int64_t i = 0; i < processing_shape.dims(); ++i) {
if (processing_shape.dim_size(i) == -1) {
input_sizes_padded[i] *= 2;
need_padding = true;
}
}
for (int64_t i = 0; i < grad_shape.rank(); ++i) {
if (shape_spec.output_to_processing_mapping[i] != -1) {
processing_shape.set_dim(shape_spec.output_to_processing_mapping[i],
grad_shape.dimensions(i));
}
}
std::vector<xla::XlaOp> begins;
begins.reserve(processing_shape.dims());
for (int64_t i = 0; i < input_shape.dims(); ++i) {
bool begin_mask = (1 << i) & shape_spec.begin_dense_mask;
int64_t begin_dim = shape_spec.processing_to_sparse_mapping[i];
xla::XlaOp begin_index;
auto zero = xla::Zero(ctx->builder(), ctx->InputXlaType("begin"));
if (begin_mask) {
begin_index = zero;
} else {
xla::XlaOp dim_size = xla::Slice(ctx->Input(0), {i}, {i + 1}, {1});
dim_size = xla::Reshape(dim_size, {});
begin_index =
xla::Slice(ctx->Input(1), {begin_dim}, {begin_dim + 1}, {1});
begin_index = xla::Reshape(begin_index, {});
auto index_negative = xla::Lt(begin_index, zero);
auto wrapped_index = xla::Add(dim_size, begin_index);
begin_index = xla::Select(index_negative, wrapped_index, begin_index);
}
begins.push_back(begin_index);
}
auto zero = XlaHelpers::Zero(ctx->builder(), ctx->expected_output_dtype(0));
zero = xla::Broadcast(zero, input_sizes_padded);
grad = xla::Reshape(grad, processing_shape.dim_sizes());
grad = xla::DynamicUpdateSlice(zero, grad, begins);
if (need_padding) {
std::vector<int64_t> strides(input_shape.dims(), 1);
std::vector<int64_t> start_indices(input_shape.dims(), 0);
grad = xla::Slice(grad, start_indices, input_sizes, strides);
}
ctx->SetOutput(0, grad);
}
void Compile(XlaOpKernelContext* ctx) override {
TensorShape processing_shape, final_shape;
absl::InlinedVector<int64_t, 4> begin;
absl::InlinedVector<int64_t, 4> end;
absl::InlinedVector<int64_t, 4> strides;
TensorShape input_shape;
OP_REQUIRES_OK(
ctx, ctx->ConstantInputAsShape(0, &input_shape,
xla::ValueInferenceMode::kUpperBound));
xla::Literal begin_literal, end_literal, strides_literal;
bool begin_is_constant = ctx->ConstantInput(1, &begin_literal).ok();
bool end_is_constant = ctx->ConstantInput(2, &end_literal).ok();
OP_REQUIRES_OK(ctx, ctx->ConstantInput(3, &strides_literal));
if (!(begin_is_constant && end_is_constant)) {
CompileAsDynamicUpdateSlice(ctx, input_shape, strides_literal);
return;
}
Tensor begin_tensor, end_tensor, strides_tensor;
OP_REQUIRES_OK(
ctx, LiteralToHostTensor(begin_literal, index_type_, &begin_tensor));
OP_REQUIRES_OK(ctx,
LiteralToHostTensor(end_literal, index_type_, &end_tensor));
OP_REQUIRES_OK(ctx, LiteralToHostTensor(strides_literal, index_type_,
&strides_tensor));
bool dummy = false;
OP_REQUIRES_OK(
ctx, ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape,
begin_mask_, end_mask_, ellipsis_mask_, new_axis_mask_,
shrink_axis_mask_, &processing_shape, &final_shape, &dummy,
&dummy, &dummy, &begin, &end, &strides));
const TensorShape dy_shape = ctx->InputShape(4);
OP_REQUIRES(
ctx, final_shape == dy_shape,
errors::InvalidArgument("shape of dy was ", dy_shape.DebugString(),
" instead of ", final_shape.DebugString()));
OP_REQUIRES(
ctx, input_shape.dims() == processing_shape.dims(),
errors::Internal(
"input shape and processing shape must have same number of dims"));
auto zero = XlaHelpers::Zero(ctx->builder(), ctx->expected_output_dtype(0));
xla::XlaOp grad = ctx->Input(4);
grad = xla::Reshape(grad, processing_shape.dim_sizes());
absl::InlinedVector<int64_t, 4> dimensions_to_reverse;
xla::PaddingConfig padding_config;
for (int i = 0; i < processing_shape.dims(); ++i) {
auto* dims = padding_config.add_dimensions();
if (strides[i] > 0) {
dims->set_edge_padding_low(begin[i]);
dims->set_interior_padding(strides[i] - 1);
int64_t size =
dims->edge_padding_low() + processing_shape.dim_size(i) +
(processing_shape.dim_size(i) - 1) * dims->interior_padding();
dims->set_edge_padding_high(input_shape.dim_size(i) - size);
} else {
dimensions_to_reverse.push_back(i);
dims->set_edge_padding_high(input_shape.dim_size(i) - begin[i] - 1);
dims->set_interior_padding(-strides[i] - 1);
int64_t size =
dims->edge_padding_high() + processing_shape.dim_size(i) +
(processing_shape.dim_size(i) - 1) * dims->interior_padding();
dims->set_edge_padding_low(input_shape.dim_size(i) - size);
}
}
if (!dimensions_to_reverse.empty()) {
grad = xla::Rev(grad, dimensions_to_reverse);
}
grad = xla::Pad(grad, zero, padding_config);
xla::XlaOp dynamic_shape = ctx->Input(0);
xla::Shape grad_shape = ctx->builder()->GetShape(grad).value();
std::vector<bool> dynamic_input;
OP_REQUIRES_OK(ctx,
ctx->ResolveInputDynamismIntoPredVector(0, &dynamic_input));
DCHECK_EQ(grad_shape.rank(), input_shape.dims());
for (int64_t dim = 0; dim < input_shape.dims(); ++dim) {
DCHECK_EQ(grad_shape.dimensions(dim), input_shape.dim_size(dim));
if (dynamic_input[dim]) {
auto dim_size = xla::Slice(dynamic_shape, {dim}, {dim + 1}, {1});
dim_size = xla::ConvertElementType(dim_size, xla::S32);
auto dim_size_scalar = xla::Reshape(dim_size, {});
grad = xla::SetDimensionSize(grad, dim_size_scalar, dim);
} else if (grad_shape.is_dynamic_dimension(dim)) {
grad = xla::RemoveDynamicDimension(grad, dim);
}
}
ctx->SetOutput(0, grad);
}
private:
int32 begin_mask_, end_mask_;
int32 ellipsis_mask_, new_axis_mask_, shrink_axis_mask_;
DataType index_type_;
};
REGISTER_XLA_OP(Name("StridedSliceGrad")
.CompileTimeConstantInput("shape")
.CompileTimeConstantInput("begin")
.CompileTimeConstantInput("end")
.CompileTimeConstantInput("strides"),
StridedSliceGradOp);
class StridedSliceAssignOp : public XlaOpKernel {
public:
explicit StridedSliceAssignOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("begin_mask", &begin_mask_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("end_mask", &end_mask_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("ellipsis_mask", &ellipsis_mask_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("new_axis_mask", &new_axis_mask_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("shrink_axis_mask", &shrink_axis_mask_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("Index", &index_type_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_));
}
void Compile(XlaOpKernelContext* ctx) override {
TensorShape final_shape;
absl::InlinedVector<int64_t, 4> begin;
absl::InlinedVector<int64_t, 4> end;
absl::InlinedVector<int64_t, 4> strides;
xla::Literal begin_literal, end_literal, strides_literal;
OP_REQUIRES_OK(ctx, ctx->ConstantInput(1, &begin_literal));
OP_REQUIRES_OK(ctx, ctx->ConstantInput(2, &end_literal));
OP_REQUIRES_OK(ctx, ctx->ConstantInput(3, &strides_literal));
Tensor begin_tensor, end_tensor, strides_tensor;
OP_REQUIRES_OK(
ctx, LiteralToHostTensor(begin_literal, index_type_, &begin_tensor));
OP_REQUIRES_OK(ctx,
LiteralToHostTensor(end_literal, index_type_, &end_tensor));
OP_REQUIRES_OK(ctx, LiteralToHostTensor(strides_literal, index_type_,
&strides_tensor));
TensorShape lhs_shape;
xla::XlaOp lhs;
if (ctx->input_type(0) == DT_RESOURCE) {
OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_, &lhs_shape, &lhs));
} else {
lhs_shape = ctx->InputShape(0);
lhs = ctx->Input(0);
}
const TensorShape rhs_shape = ctx->InputShape(4);
TensorShape dummy_processing_shape;
bool dummy = false;
OP_REQUIRES_OK(ctx,
ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, lhs_shape,
begin_mask_, end_mask_, ellipsis_mask_, new_axis_mask_,
shrink_axis_mask_, &dummy_processing_shape, &final_shape,
&dummy, &dummy, &dummy, &begin, &end, &strides));
if (final_shape.num_elements() == 0 && rhs_shape.num_elements() == 0) {
return;
}
OP_REQUIRES(ctx, final_shape == rhs_shape,
errors::Unimplemented(
"sliced l-value shape ", final_shape.DebugString(),
" does not match r-value shape ", rhs_shape.DebugString(),
". Automatic broadcasting not yet implemented."));
xla::XlaOp rhs = ctx->Input(4);
absl::InlinedVector<int64_t, 4> dimensions_to_reverse;
absl::InlinedVector<xla::XlaOp, 4> slice_begin;
absl::InlinedVector<int64_t, 4> slice_dims;
for (int i = 0; i < begin.size(); ++i) {
OP_REQUIRES(
ctx, strides[i] == 1 || strides[i] == -1,
errors::Unimplemented("Strides != 1 or -1 are not yet implemented"));
if (strides[i] > 0) {
slice_begin.push_back(
xla::ConstantR0<int64_t>(ctx->builder(), begin[i]));
slice_dims.push_back(end[i] - begin[i]);
} else {
slice_begin.push_back(
xla::ConstantR0<int64_t>(ctx->builder(), end[i] + 1));
slice_dims.push_back(begin[i] - end[i]);
dimensions_to_reverse.push_back(i);
}
}
if (!dimensions_to_reverse.empty()) {
rhs = xla::Rev(rhs, dimensions_to_reverse);
}
rhs = xla::Reshape(rhs, slice_dims);
lhs = xla::DynamicUpdateSlice(lhs, rhs, slice_begin);
if (ctx->input_type(0) == DT_RESOURCE) {
OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype_, lhs));
} else {
ctx->SetOutput(0, lhs);
}
}
private:
int32 begin_mask_, end_mask_;
int32 ellipsis_mask_, new_axis_mask_, shrink_axis_mask_;
DataType index_type_;
DataType dtype_;
};
REGISTER_XLA_OP(Name("ResourceStridedSliceAssign")
.CompileTimeConstantInput("begin")
.CompileTimeConstantInput("end")
.CompileTimeConstantInput("strides"),
StridedSliceAssignOp);
REGISTER_XLA_OP(Name("TensorStridedSliceUpdate")
.CompileTimeConstantInput("begin")
.CompileTimeConstantInput("end")
.CompileTimeConstantInput("strides"),
StridedSliceAssignOp);
}
} | #include "tensorflow/core/util/strided_slice_op.h"
#include <algorithm>
#include <ostream>
#include <tuple>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace {
using ::testing::PrintToString;
using Vec = typename StridedSliceAssignBCast::Vec;
struct BroadcastPair {
Vec from;
Vec to;
friend std::ostream& operator<<(std::ostream& os, const BroadcastPair& pair) {
return os << strings::StrCat("BroadcastPair{", PrintToString(pair.from),
"->", PrintToString(pair.to), "}");
}
};
struct BroadcastRemap {
int64_t dims;
Vec map;
friend std::ostream& operator<<(std::ostream& os,
const BroadcastRemap& remap) {
return os << strings::StrCat("BroadcastRemap{", remap.dims, ", ",
PrintToString(remap.map), "}");
}
};
int64_t NumberOfElements(const Vec& shape) {
int64_t number_of_elements = 1;
for (int64_t elem : shape) {
number_of_elements *= elem;
}
return number_of_elements;
}
MATCHER_P2(Broadcasts, input_shape, output_shape,
strings::StrCat("broadcasts ", PrintToString(input_shape), " to ",
PrintToString(output_shape))) {
const size_t size = input_shape.size();
for (size_t i = 0; i < size; ++i) {
if (!((arg[i] == 1 && input_shape[i] == output_shape[i]) ||
(arg[i] == output_shape[i] && input_shape[i] == 1))) {
return false;
}
}
return true;
}
MATCHER_P(HasSuffix, suffix, "") {
const size_t offset = arg.size() - suffix.size();
for (size_t i = 0; i < suffix.size(); ++i) {
if (suffix[i] != arg[i + offset]) {
return false;
}
}
return true;
}
MATCHER_P(HasSameNumberOfElementsAs, other, "") {
return NumberOfElements(arg) == NumberOfElements(other);
}
TEST(StridedSliceAssignBCastTest, BroadcastingToSameRankWorks) {
const BroadcastPair test_pairs[] = {
{Vec{1}, Vec{5}},
{Vec{1, 1}, Vec{4, 5}},
{Vec{1, 5}, Vec{4, 5}},
{Vec{4, 1}, Vec{4, 5}},
{Vec{1, 1, 1}, Vec{2, 4, 5}},
{Vec{1, 1, 5}, Vec{2, 4, 5}},
{Vec{1, 4, 5}, Vec{2, 4, 5}},
{Vec{2, 1, 5}, Vec{2, 4, 5}},
{Vec{2, 4, 1}, Vec{2, 4, 5}},
};
for (const BroadcastPair& test_pair : test_pairs) {
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
EXPECT_TRUE(bcast.IsValid()) << test_pair;
EXPECT_TRUE(bcast.IsBroadcastingRequired());
EXPECT_EQ(bcast.result_shape(), test_pair.to);
EXPECT_EQ(bcast.reshape(), test_pair.from);
EXPECT_THAT(bcast.bcast(), Broadcasts(test_pair.from, test_pair.to));
}
}
TEST(StridedSliceAssignBCastTest, BroadcastingToLargerRankWorks) {
const BroadcastPair test_pairs[] = {
{Vec{}, Vec{2, 4, 5}},
{Vec{1}, Vec{2, 4, 5}},
{Vec{5}, Vec{2, 4, 5}},
{Vec{1, 1}, Vec{2, 4, 5}},
{Vec{1, 5}, Vec{2, 4, 5}},
{Vec{4, 1}, Vec{2, 4, 5}},
{Vec{4, 5}, Vec{2, 4, 5}},
};
for (const BroadcastPair& test_pair : test_pairs) {
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
EXPECT_TRUE(bcast.IsValid()) << test_pair;
EXPECT_TRUE(bcast.IsBroadcastingRequired());
EXPECT_EQ(bcast.result_shape(), test_pair.to);
EXPECT_THAT(bcast.reshape(), HasSuffix(test_pair.from));
EXPECT_THAT(bcast.reshape(), HasSameNumberOfElementsAs(test_pair.from));
EXPECT_THAT(bcast.bcast(), Broadcasts(bcast.reshape(), test_pair.to));
}
}
TEST(StridedSliceAssignBCastTest, BroadcastingToSmallerRankWorks) {
const BroadcastPair test_pairs[] = {
{Vec{1, 1}, Vec{5}},
{Vec{1, 1, 5}, Vec{4, 5}},
{Vec{1, 4, 1}, Vec{4, 5}},
{Vec{1, 1, 1, 5}, Vec{4, 5}},
{Vec{1, 1, 4, 1}, Vec{4, 5}},
};
for (const BroadcastPair& test_pair : test_pairs) {
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
EXPECT_TRUE(bcast.IsValid()) << test_pair;
EXPECT_TRUE(bcast.IsBroadcastingRequired());
EXPECT_EQ(bcast.result_shape(), test_pair.to);
EXPECT_THAT(test_pair.from, HasSuffix(bcast.reshape()));
EXPECT_THAT(bcast.reshape(), HasSameNumberOfElementsAs(test_pair.from));
EXPECT_THAT(bcast.bcast(), Broadcasts(bcast.reshape(), test_pair.to));
}
}
TEST(StridedSliceAssignBCastTest, ReshapeOnlyWorks) {
const BroadcastPair test_pairs[] = {
{Vec{}, Vec{1, 1}},
{Vec{5}, Vec{5}},
{Vec{5}, Vec{1, 5}},
{Vec{1, 1}, Vec{}},
{Vec{1, 5}, Vec{5}},
{Vec{2, 4, 5}, Vec{2, 4, 5}},
{Vec{2, 4, 5}, Vec{1, 1, 1, 2, 4, 5}},
{Vec{1, 1, 1, 2, 4, 5}, Vec{2, 4, 5}},
};
for (const BroadcastPair& test_pair : test_pairs) {
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
EXPECT_TRUE(bcast.IsValid()) << test_pair;
EXPECT_FALSE(bcast.IsBroadcastingRequired());
EXPECT_EQ(bcast.result_shape(), test_pair.to);
EXPECT_THAT(bcast.reshape(), HasSameNumberOfElementsAs(test_pair.from));
EXPECT_THAT(bcast.bcast(), Broadcasts(bcast.reshape(), test_pair.to));
}
}
TEST(StridedSliceAssignBCastTest, InvalidBroadcastFails) {
const BroadcastPair test_pairs[] = {
{Vec{5}, Vec{1}},
{Vec{3}, Vec{4, 5}},
{Vec{4}, Vec{4, 5}},
{Vec{5}, Vec{}},
{Vec{3, 5}, Vec{4, 5}},
{Vec{4, 3}, Vec{4, 5}},
{Vec{5, 5}, Vec{1, 5}},
{Vec{2, 4}, Vec{2, 4, 5}},
{Vec{4, 3}, Vec{2, 4, 5}},
{Vec{3, 5}, Vec{2, 4, 5}},
{Vec{3, 5}, Vec{5}},
{Vec{3, 5}, Vec{}},
{Vec{3, 4, 5}, Vec{2, 4, 5}},
{Vec{2, 4, 5}, Vec{1, 4, 5}},
{Vec{2, 3, 5}, Vec{2, 4, 5}},
{Vec{2, 4, 5}, Vec{2, 4, 5, 2}},
{Vec{2, 4, 5}, Vec{2, 4, 5, 1}},
{Vec{2, 4, 5}, Vec{2, 4, 1, 5}},
{Vec{2, 4, 5}, Vec{4, 5}},
{Vec{2, 4, 5}, Vec{2, 4}},
{Vec{1, 4, 5}, Vec{4, 1}},
{Vec{1, 4, 5}, Vec{5}},
{Vec{1, 4, 5}, Vec{}},
};
for (const BroadcastPair& test_pair : test_pairs) {
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
EXPECT_FALSE(bcast.IsValid()) << test_pair;
}
}
TEST(StridedSliceAssignBCastTest, RemapDimensionsToItselfWorks) {
const std::pair<BroadcastPair, BroadcastRemap> test_inputs[] = {
{BroadcastPair{Vec{}, Vec{}},
BroadcastRemap{0, Vec{}}},
{BroadcastPair{Vec{4, 5}, Vec{4, 5}},
BroadcastRemap{2, Vec{0, 1}}},
{BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}},
BroadcastRemap{3, Vec{0, 1, 2}}},
};
for (const auto& test_input : test_inputs) {
const BroadcastPair& test_pair = test_input.first;
const BroadcastRemap& test_remap = test_input.second;
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
ASSERT_TRUE(bcast.IsValid());
EXPECT_TRUE(bcast.RemapDimensions(test_remap.dims, test_remap.map))
<< PrintToString(test_input);
EXPECT_EQ(bcast.result_shape(), test_pair.to);
EXPECT_THAT(bcast.bcast(),
Broadcasts(bcast.reshape(), bcast.result_shape()));
}
}
TEST(StridedSliceAssignBCastTest, RemapDimensionsRemovingAxesWorks) {
const std::tuple<BroadcastPair, BroadcastRemap, Vec> test_inputs[] = {
{BroadcastPair{Vec{2, 1, 4, 1, 5}, Vec{2, 1, 4, 1, 5}},
BroadcastRemap{3, Vec{0, -1, 1, -1, 2}}, Vec{2, 4, 5}},
{BroadcastPair{Vec{1, 4, 1}, Vec{1, 4, 1}},
BroadcastRemap{1, Vec{-1, 0, -1}}, Vec{4}},
{BroadcastPair{Vec{1, 1, 1}, Vec{1, 1, 1}},
BroadcastRemap{0, Vec{-1, -1, -1}}, Vec{}},
};
for (const auto& test_input : test_inputs) {
const BroadcastPair& test_pair = std::get<0>(test_input);
const BroadcastRemap& test_remap = std::get<1>(test_input);
const Vec& expected_result_shape = std::get<2>(test_input);
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
ASSERT_TRUE(bcast.IsValid());
EXPECT_TRUE(bcast.RemapDimensions(test_remap.dims, test_remap.map))
<< PrintToString(test_input);
EXPECT_EQ(bcast.result_shape(), expected_result_shape);
EXPECT_THAT(bcast.bcast(),
Broadcasts(bcast.reshape(), bcast.result_shape()));
}
}
TEST(StridedSliceAssignBCastTest, RemapDimensionsAddingAxesWorks) {
const std::tuple<BroadcastPair, BroadcastRemap, Vec> test_inputs[] = {
{BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}},
BroadcastRemap{5, Vec{0, 2, 4}}, Vec{2, 1, 4, 1, 5}},
{BroadcastPair{Vec{4, 5}, Vec{4, 5}},
BroadcastRemap{4, Vec{1, 2}}, Vec{1, 4, 5, 1}},
{BroadcastPair{Vec{}, Vec{}},
BroadcastRemap{3, Vec{}}, Vec{1, 1, 1}},
};
for (const auto& test_input : test_inputs) {
const BroadcastPair& test_pair = std::get<0>(test_input);
const BroadcastRemap& test_remap = std::get<1>(test_input);
const Vec& expected_result_shape = std::get<2>(test_input);
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
ASSERT_TRUE(bcast.IsValid());
EXPECT_TRUE(bcast.RemapDimensions(test_remap.dims, test_remap.map))
<< PrintToString(test_input);
EXPECT_EQ(bcast.result_shape(), expected_result_shape);
EXPECT_THAT(bcast.bcast(),
Broadcasts(bcast.reshape(), bcast.result_shape()));
}
}
TEST(StridedSliceAssignBCastTest, RemapDimensionsAddingAndRemovingAxesWorks) {
const std::tuple<BroadcastPair, BroadcastRemap, Vec> test_inputs[] = {
{BroadcastPair{Vec{1}, Vec{1}},
BroadcastRemap{1, Vec{-1}}, Vec{1}},
{BroadcastPair{Vec{1}, Vec{1}},
BroadcastRemap{3, Vec{-1}}, Vec{1, 1, 1}},
{BroadcastPair{Vec{1, 5}, Vec{1, 5}},
BroadcastRemap{3, Vec{-1, 1}}, Vec{1, 5, 1}},
{BroadcastPair{Vec{1, 5}, Vec{2, 1, 4, 1, 5}},
BroadcastRemap{4, Vec{0, -1, 1, -1, 3}},
Vec{2, 4, 1, 5}},
};
for (const auto& test_input : test_inputs) {
const BroadcastPair& test_pair = std::get<0>(test_input);
const BroadcastRemap& test_remap = std::get<1>(test_input);
const Vec& expected_result_shape = std::get<2>(test_input);
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
ASSERT_TRUE(bcast.IsValid());
EXPECT_TRUE(bcast.RemapDimensions(test_remap.dims, test_remap.map))
<< PrintToString(test_input);
EXPECT_EQ(bcast.result_shape(), expected_result_shape);
EXPECT_THAT(bcast.bcast(),
Broadcasts(bcast.reshape(), bcast.result_shape()));
}
}
TEST(StridedSliceAssignBCastTest, RemapDimensionsInvalidSizeFails) {
const std::pair<BroadcastPair, BroadcastRemap> test_inputs[] = {
{BroadcastPair{Vec{}, Vec{}},
BroadcastRemap{0, Vec{-1}}},
{BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}},
BroadcastRemap{3, Vec{0, 1, -1, 2}}},
{BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}},
BroadcastRemap{3, Vec{0, 2}}},
};
for (const auto& test_input : test_inputs) {
const BroadcastPair& test_pair = test_input.first;
const BroadcastRemap& test_remap = test_input.second;
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
ASSERT_TRUE(bcast.IsValid());
EXPECT_FALSE(bcast.RemapDimensions(test_remap.dims, test_remap.map))
<< PrintToString(test_input);
}
}
TEST(StridedSliceAssignBCastTest, RemapDimensionsOutOfBoundsFails) {
const std::pair<BroadcastPair, BroadcastRemap> test_inputs[] = {
{BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}},
BroadcastRemap{3, Vec{0, 1, 3}}},
{BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}},
BroadcastRemap{2, Vec{0, 1, 2}}},
};
for (const auto& test_input : test_inputs) {
const BroadcastPair& test_pair = test_input.first;
const BroadcastRemap& test_remap = test_input.second;
StridedSliceAssignBCast bcast(test_pair.from, test_pair.to);
ASSERT_TRUE(bcast.IsValid());
EXPECT_FALSE(bcast.RemapDimensions(test_remap.dims, test_remap.map))
<< PrintToString(test_input);
}
}
using IntVector = absl::InlinedVector<int64_t, 4UL>;
TensorShape AsTensorShape(absl::Span<const int64_t> dim_sizes) {
TensorShape out;
TF_CHECK_OK(TensorShape::BuildTensorShape(dim_sizes, &out));
return out;
}
TEST(ValidateStridedSliceOpTest, BasicStride) {
Tensor begin_tensor = test::AsTensor<int32_t>({1, 1});
Tensor end_tensor = test::AsTensor<int32_t>({7, 7});
Tensor strides_tensor = test::AsTensor<int32_t>({2, 2});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x2;
int32_t end_mask_spec = 0x1;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
TF_EXPECT_OK(ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec,
end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask,
&processing_shape, &final_shape, &is_identity, &is_simple_slice,
&slice_dim0, &begin, &end, &strides, &shape_spec));
EXPECT_EQ(processing_shape, AsTensorShape({5, 4}));
EXPECT_EQ(final_shape, AsTensorShape({5, 4}));
EXPECT_FALSE(is_identity);
EXPECT_FALSE(is_simple_slice);
EXPECT_FALSE(slice_dim0);
EXPECT_EQ(begin, (IntVector{1, 0}));
EXPECT_EQ(end, (IntVector{10, 7}));
EXPECT_EQ(strides, (IntVector{2, 2}));
}
TEST(ValidateStridedSliceOpTest, NegativeBeginEnd) {
Tensor begin_tensor = test::AsTensor<int32_t>({-9, -20});
Tensor end_tensor = test::AsTensor<int32_t>({-3, -3});
Tensor strides_tensor = test::AsTensor<int32_t>({2, 2});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
TF_EXPECT_OK(ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec,
end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask,
&processing_shape, &final_shape, &is_identity, &is_simple_slice,
&slice_dim0, &begin, &end, &strides, &shape_spec));
EXPECT_EQ(processing_shape, AsTensorShape({3, 4}));
EXPECT_EQ(final_shape, AsTensorShape({3, 4}));
EXPECT_EQ(begin, (IntVector{1, 0}));
EXPECT_EQ(end, (IntVector{7, 7}));
}
TEST(ValidateStridedSliceOpTest, EmptyOutputDim) {
Tensor begin_tensor = test::AsTensor<int32_t>({1, 1});
Tensor end_tensor = test::AsTensor<int32_t>({7, 1});
Tensor strides_tensor = test::AsTensor<int32_t>({2, 1});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
TF_EXPECT_OK(ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec,
end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask,
&processing_shape, &final_shape, &is_identity, &is_simple_slice,
&slice_dim0, &begin, &end, &strides, &shape_spec));
EXPECT_EQ(processing_shape, AsTensorShape({3, 0}));
EXPECT_EQ(final_shape, AsTensorShape({3, 0}));
}
TEST(ValidateStridedSliceOpTest, ZeroStrideFails) {
Tensor begin_tensor = test::AsTensor<int32_t>({1, 1});
Tensor end_tensor = test::AsTensor<int32_t>({7, 7});
Tensor strides_tensor = test::AsTensor<int32_t>({0, 2});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x2;
int32_t end_mask_spec = 0x1;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
EXPECT_THAT(
ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape,
begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec),
tsl::testing::StatusIs(
tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("strides.* must be non-zero")));
}
TEST(ValidateStridedSliceOpTest, ShrinkAxis) {
Tensor begin_tensor = test::AsTensor<int16_t>({0, 1, 0});
Tensor end_tensor = test::AsTensor<int16_t>({3, 1, 5});
Tensor strides_tensor = test::AsTensor<int16_t>({1, 1, 1});
TensorShape input_shape = AsTensorShape({3, 4, 5});
int32_t begin_mask_spec = 0x2;
int32_t end_mask_spec = 0x2;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x2;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
TF_EXPECT_OK(ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec,
end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask,
&processing_shape, &final_shape, &is_identity, &is_simple_slice,
&slice_dim0, &begin, &end, &strides, &shape_spec));
EXPECT_EQ(final_shape, AsTensorShape({3, 5}));
}
TEST(ValidateStridedSliceOpTest, ShrinkSliceOutOfBoundsFails) {
Tensor begin_tensor = test::AsTensor<int16_t>({0, 7, 0});
Tensor end_tensor = test::AsTensor<int16_t>({3, 7, 5});
Tensor strides_tensor = test::AsTensor<int16_t>({1, 1, 1});
TensorShape input_shape = AsTensorShape({3, 4, 5});
int32_t begin_mask_spec = 0x2;
int32_t end_mask_spec = 0x2;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x2;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
EXPECT_THAT(
ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape,
begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec),
tsl::testing::StatusIs(
tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("slice index .* out of bounds")));
}
TEST(ValidateStridedSliceOpTest, ShrinkAxisNegativeStrideFails) {
Tensor begin_tensor = test::AsTensor<int16_t>({0, 1, 0});
Tensor end_tensor = test::AsTensor<int16_t>({3, 2, 5});
Tensor strides_tensor = test::AsTensor<int16_t>({1, -1, 1});
TensorShape input_shape = AsTensorShape({3, 4, 5});
int32_t begin_mask_spec = 0x2;
int32_t end_mask_spec = 0x2;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x2;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
EXPECT_THAT(
ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape,
begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec),
tsl::testing::StatusIs(
tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("only stride 1 allowed")));
}
TEST(ValidateStridedSliceOpTest, NewAxis) {
Tensor begin_tensor = test::AsTensor<int64_t>({0, 0});
Tensor end_tensor = test::AsTensor<int64_t>({10, 10});
Tensor strides_tensor = test::AsTensor<int64_t>({1, 1});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x2;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
TF_EXPECT_OK(ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec,
end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask,
&processing_shape, &final_shape, &is_identity, &is_simple_slice,
&slice_dim0, &begin, &end, &strides, &shape_spec));
EXPECT_EQ(processing_shape, AsTensorShape({10, 10}));
EXPECT_EQ(final_shape, AsTensorShape({10, 1, 10}));
}
TEST(ValidateStridedSliceOpTest, Ellipsis) {
Tensor begin_tensor = test::AsTensor<int32_t>({0, 0});
Tensor end_tensor = test::AsTensor<int32_t>({10, 10});
Tensor strides_tensor = test::AsTensor<int32_t>({1, 1});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x1;
int32_t new_axis_mask = 0x2;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
TF_EXPECT_OK(ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec,
end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask,
&processing_shape, &final_shape, &is_identity, &is_simple_slice,
&slice_dim0, &begin, &end, &strides, &shape_spec));
EXPECT_EQ(processing_shape, AsTensorShape({10, 10}));
EXPECT_EQ(final_shape, AsTensorShape({10, 10, 1}));
}
TEST(ValidateStridedSliceOpTest, MultipleEllipsisFails) {
Tensor begin_tensor = test::AsTensor<int32_t>({0, 0});
Tensor end_tensor = test::AsTensor<int32_t>({10, 10});
Tensor strides_tensor = test::AsTensor<int32_t>({1, 1});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x3;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
EXPECT_THAT(
ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape,
begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec),
tsl::testing::StatusIs(tsl::error::Code::INVALID_ARGUMENT,
"Multiple ellipses in slice spec not allowed"));
}
TEST(ValidateStridedSliceOpTest, WrongBeginTensorFails) {
Tensor begin_tensor = test::AsTensor<int32_t>({0});
Tensor end_tensor = test::AsTensor<int32_t>({10, 10});
Tensor strides_tensor = test::AsTensor<int32_t>({1, 1});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x1;
int32_t new_axis_mask = 0x2;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
EXPECT_THAT(
ValidateStridedSliceOp(
&begin_tensor, &end_tensor, strides_tensor, input_shape,
begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec),
tsl::testing::StatusIs(
tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Expected .* equal size tensors")));
}
TEST(ValidateStridedSliceOpTest, WrongStridesTensorWithNullBeginFails) {
Tensor end_tensor = test::AsTensor<int32_t>({10, 10});
Tensor strides_tensor = test::AsTensor<int32_t>({1});
TensorShape input_shape = AsTensorShape({10, 10});
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x1;
int32_t new_axis_mask = 0x2;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
EXPECT_THAT(
ValidateStridedSliceOp(
nullptr, &end_tensor, strides_tensor, input_shape,
begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec),
tsl::testing::StatusIs(
tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Expected .* equal size tensors")));
}
TEST(ValidateStridedSliceOpTest, NullBeginEndWithShrinkAxis) {
Tensor strides_tensor = test::AsTensor<int32_t>({2, -2, 1});
TensorShape input_shape = AsTensorShape({10, 10, 1});
int32_t begin_mask_spec = 0x3;
int32_t end_mask_spec = 0x3;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x4;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
TF_EXPECT_OK(ValidateStridedSliceOp(
nullptr, nullptr, strides_tensor,
input_shape, begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec));
EXPECT_EQ(processing_shape, AsTensorShape({5, 5, 1}));
EXPECT_EQ(final_shape, AsTensorShape({5, 5}));
EXPECT_EQ(strides, (IntVector{2, -2, 1}));
}
TEST(ValidateStridedSliceOpTest, UnknownInputRankFails) {
Tensor end_tensor = test::AsTensor<int32_t>({10, 10});
Tensor strides_tensor = test::AsTensor<int32_t>({1, 1});
PartialTensorShape input_shape;
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x1;
int32_t new_axis_mask = 0x2;
int32_t shrink_axis_mask = 0x0;
TensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
EXPECT_THAT(
ValidateStridedSliceOp(
nullptr, &end_tensor, strides_tensor, input_shape,
begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec),
tsl::testing::StatusIs(tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("unknown rank")));
}
TEST(ValidateStridedSliceOpTest, PartialInputShape) {
Tensor end_tensor = test::AsTensor<int32_t>({10, 10});
Tensor strides_tensor = test::AsTensor<int32_t>({1, 1});
PartialTensorShape input_shape;
TF_CHECK_OK(
PartialTensorShape::BuildPartialTensorShape({10, -1}, &input_shape));
int32_t begin_mask_spec = 0x0;
int32_t end_mask_spec = 0x0;
int32_t ellipsis_mask = 0x0;
int32_t new_axis_mask = 0x0;
int32_t shrink_axis_mask = 0x0;
PartialTensorShape processing_shape, final_shape;
bool is_identity, is_simple_slice, slice_dim0;
IntVector begin, end, strides;
StridedSliceShapeSpec shape_spec;
TF_EXPECT_OK(ValidateStridedSliceOp(
nullptr, &end_tensor, strides_tensor, input_shape,
begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask,
shrink_axis_mask, &processing_shape, &final_shape, &is_identity,
&is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/strided_slice_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/strided_slice_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ff9480bd-20ca-4f9a-97b5-2962374a7103 | cpp | tensorflow/tensorflow | rendezvous_util | tensorflow/core/common_runtime/rendezvous_util.cc | tensorflow/core/common_runtime/rendezvous_util_test.cc | #include "tensorflow/core/common_runtime/rendezvous_util.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/util/reffed_status_callback.h"
namespace tensorflow {
Status SendTensorsToRendezvous(
RendezvousInterface* rendezvous, DeviceContext* device_context,
const std::vector<AllocatorAttributes>& alloc_attrs,
const std::vector<string>& keys, absl::Span<const Tensor> tensors_to_send) {
if (keys.size() != tensors_to_send.size()) {
return errors::InvalidArgument(
"keys and tensors_to_send are not the same size. keys.size() = ",
keys.size(), "; tensors_to_send.size() = ", tensors_to_send.size());
}
if (!alloc_attrs.empty() && (keys.size() != alloc_attrs.size())) {
return errors::InvalidArgument(
"keys and alloc_attrs are not the same size. ",
"keys.size() = ", keys.size(),
"; alloc_attrs.size() = ", alloc_attrs.size());
}
if (!rendezvous) {
return errors::InvalidArgument("Rendezvous is null.");
}
Rendezvous::ParsedKey parsed;
for (int i = 0; i < keys.size(); ++i) {
Rendezvous::Args rendez_args;
rendez_args.device_context = device_context;
if (!alloc_attrs.empty()) {
rendez_args.alloc_attrs = alloc_attrs[i];
}
TF_RETURN_IF_ERROR(Rendezvous::ParseKey(keys[i], &parsed));
TF_RETURN_IF_ERROR(
rendezvous->Send(parsed, rendez_args, tensors_to_send[i], false));
}
return absl::OkStatus();
}
void RecvOutputsFromRendezvousAsync(
RendezvousInterface* rendezvous, DeviceContext* device_context,
const std::vector<AllocatorAttributes>& alloc_attrs,
const std::vector<string>& keys, std::vector<Tensor>* received_tensors,
StatusCallback done) {
if (keys.empty()) {
done(absl::OkStatus());
return;
}
if (!alloc_attrs.empty() && (keys.size() != alloc_attrs.size())) {
done(errors::InvalidArgument(
"keys and alloc_attrs are not the same size. ", "keys.size() = ",
keys.size(), "; alloc_attrs.size() = ", alloc_attrs.size()));
}
received_tensors->reserve(keys.size());
std::vector<
std::tuple<string, Tensor*, Rendezvous::ParsedKey, AllocatorAttributes>>
arguments;
for (int i = 0; i < keys.size(); ++i) {
Rendezvous::ParsedKey parsed;
Status s = Rendezvous::ParseKey(keys[i], &parsed);
received_tensors->push_back(Tensor());
if (!s.ok()) {
done(s);
return;
}
AllocatorAttributes alloc_attr;
if (!alloc_attrs.empty()) {
alloc_attr = alloc_attrs[i];
}
arguments.emplace_back(keys[i], &((*received_tensors)[i]), parsed,
alloc_attr);
}
auto status_cb = new ReffedStatusCallback(std::move(done));
for (auto& p : arguments) {
const string& key = std::get<0>(p);
Tensor* val = std::get<1>(p);
Rendezvous::ParsedKey parsed = std::get<2>(p);
Rendezvous::Args rendez_args;
rendez_args.device_context = device_context;
rendez_args.alloc_attrs = std::get<3>(p);
status_cb->Ref();
rendezvous->RecvAsync(
parsed, rendez_args,
[val, key, status_cb](const Status& s,
const Rendezvous::Args& send_args,
const Rendezvous::Args& recv_args,
const Tensor& v, const bool is_dead) {
Status status = s;
if (status.ok()) {
*val = v;
if (is_dead) {
status = errors::InvalidArgument("The tensor returned for ", key,
" was not valid.");
}
}
status_cb->UpdateStatus(status);
status_cb->Unref();
});
}
status_cb->Unref();
}
Status RecvOutputsFromRendezvous(RendezvousInterface* rendezvous,
NamedTensors* out,
const Rendezvous::Args& args) {
Rendezvous::ParsedKey parsed;
for (auto& p : *out) {
const string& key = p.first;
Tensor* val = &p.second;
bool is_dead = false;
TF_RETURN_IF_ERROR(Rendezvous::ParseKey(key, &parsed));
TF_RETURN_IF_ERROR(rendezvous->Recv(parsed, args, val, &is_dead));
if (is_dead) {
return errors::InvalidArgument("The tensor returned for ", key,
" was not valid.");
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/rendezvous_util.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class RendezvousUtilTest : public ::testing::Test {
public:
RendezvousUtilTest() { rendez_ = NewLocalRendezvous(); }
~RendezvousUtilTest() override { rendez_->Unref(); }
Rendezvous* rendez_;
};
Tensor V(const string& content) {
Tensor tensor(DT_STRING, TensorShape({}));
tensor.scalar<tstring>()() = content;
return tensor;
}
string V(const Tensor& tensor) {
CHECK_EQ(tensor.dtype(), DT_STRING);
CHECK(TensorShapeUtils::IsScalar(tensor.shape()));
return tensor.scalar<tstring>()();
}
string MakeStringKey(const string& name) {
return Rendezvous::CreateKey(
"/job:localhost/replica:0/task:0/device:CPU:0", 0,
"/job:localhost/replica:0/task:0/device:GPU:0", name, FrameAndIter(0, 0));
}
TEST_F(RendezvousUtilTest, SendBeforeRecv) {
TF_ASSERT_OK(SendTensorsToRendezvous(
rendez_, nullptr, {}, {MakeStringKey("hello1"), MakeStringKey("hello2")},
{V("hello1"), V("hello2")}));
Notification n;
std::vector<Tensor> received_keys;
RecvOutputsFromRendezvousAsync(
rendez_, nullptr, {}, {MakeStringKey("hello1"), MakeStringKey("hello2")},
&received_keys, [&n](const Status& status) { n.Notify(); });
n.WaitForNotification();
EXPECT_EQ(2, received_keys.size());
EXPECT_EQ("hello1", V(received_keys[0]));
EXPECT_EQ("hello2", V(received_keys[1]));
}
TEST_F(RendezvousUtilTest, RecvBeforeSend) {
Notification n;
std::vector<Tensor> received_keys;
RecvOutputsFromRendezvousAsync(
rendez_, nullptr, {}, {MakeStringKey("hello1"), MakeStringKey("hello2")},
&received_keys, [&n](const Status& status) { n.Notify(); });
TF_ASSERT_OK(SendTensorsToRendezvous(
rendez_, nullptr, {}, {MakeStringKey("hello1"), MakeStringKey("hello2")},
{V("hello1"), V("hello2")}));
n.WaitForNotification();
EXPECT_EQ(2, received_keys.size());
EXPECT_EQ("hello1", V(received_keys[0]));
EXPECT_EQ("hello2", V(received_keys[1]));
}
TEST(RendezvousUtilCallerThreadTest, RecvBeforeSend) {
Rendezvous* rendez_ = NewLocalRendezvous();
Notification n;
std::vector<Tensor> received_keys;
RecvOutputsFromRendezvousAsync(
rendez_, nullptr, {}, {MakeStringKey("hello1"), MakeStringKey("hello2")},
&received_keys, [&n, rendez_](const Status& status) {
rendez_->Unref();
n.Notify();
});
TF_ASSERT_OK(SendTensorsToRendezvous(
rendez_, nullptr, {}, {MakeStringKey("hello1"), MakeStringKey("hello2")},
{V("hello1"), V("hello2")}));
n.WaitForNotification();
ASSERT_EQ(2, received_keys.size());
EXPECT_EQ("hello1", V(received_keys[0]));
EXPECT_EQ("hello2", V(received_keys[1]));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/rendezvous_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/rendezvous_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
27d392a8-cabd-4861-a727-cdf786189ae2 | cpp | tensorflow/tensorflow | graph_compiler | tensorflow/compiler/tf2xla/graph_compiler.cc | tensorflow/compiler/tf2xla/graph_compiler_test.cc | #include "tensorflow/compiler/tf2xla/graph_compiler.h"
#include <deque>
#include <numeric>
#include <utility>
#include <vector>
#include "tensorflow/compiler/tf2xla/const_analysis.h"
#include "tensorflow/compiler/tf2xla/literal_util.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/side_effect_util.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_context.h"
#include "tensorflow/compiler/tf2xla/xla_expression.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "xla/client/client_library.h"
#include "xla/hlo/builder/xla_builder.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/executor.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_optimizer.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/validate.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
auto* graph_compiler_failed_compilation_op_count =
tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/graph_compilation_failed_op_count",
"Records an op that failed to compile",
"op_name");
namespace {
Status PrepareArguments(XlaOpKernelContext* ctx, Graph* graph,
const std::vector<const XlaExpression*>& expressions,
const NameAttrList& func,
std::vector<XlaCompiler::Argument>* args) {
auto client = ctx->compiler()->client();
std::vector<bool> arg_must_be_compile_time_constant(expressions.size());
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*graph, &arg_must_be_compile_time_constant,
nullptr, ctx->function_library()));
args->resize(expressions.size());
for (int i = 0, end = args->size(); i < end; ++i) {
XlaCompiler::Argument& arg = (*args)[i];
arg.type = ctx->input_type(i);
arg.shape = ctx->InputShape(i);
switch (expressions[i]->kind()) {
case XlaExpression::Kind::kConstant:
arg.kind = XlaCompiler::Argument::kConstant;
arg.constant_value = *expressions[i]->constant_value();
break;
case XlaExpression::Kind::kXlaOp:
if (arg_must_be_compile_time_constant[i]) {
TF_ASSIGN_OR_RETURN(std::optional<Tensor> value,
expressions[i]->ResolveConstant(client));
if (value.has_value()) {
arg.kind = XlaCompiler::Argument::kConstant;
arg.constant_value = *value;
} else {
arg.kind = XlaCompiler::Argument::kParameter;
}
} else {
arg.kind = XlaCompiler::Argument::kParameter;
}
break;
case XlaExpression::Kind::kResource: {
XlaResource* resource = expressions[i]->resource();
XlaCompiler::PopulateArgumentFromResource(*resource, &arg);
break;
}
case XlaExpression::Kind::kTensorList: {
arg.kind = XlaCompiler::Argument::kTensorList;
const xla::XlaOp& tensor_list = expressions[i]->handle();
arg.shape = tensor_list.builder()->GetShape(tensor_list).value();
break;
}
case XlaExpression::Kind::kInvalid:
return errors::InvalidArgument("Invalid function argument");
}
}
return absl::OkStatus();
}
}
Status GraphCompiler::Compile() {
TF_RETURN_IF_ERROR(graph::ValidateGraphHasNoCycle(*graph_));
using NodeOutputs = std::vector<TensorValue>;
std::vector<NodeOutputs> output_registry(graph_->num_node_ids());
auto output_registry_cleanup = gtl::MakeCleanup([&output_registry] {
for (const NodeOutputs& outputs : output_registry) {
for (const TensorValue& value : outputs) {
CHECK(!value.is_ref());
delete value.tensor;
}
}
});
std::vector<Node*> topo_sorted_nodes;
GetReversePostOrder(*graph_, &topo_sorted_nodes,
NodeComparatorName());
OpKernelContext::Params params;
PartiallySetupParams(¶ms);
for (Node* n : topo_sorted_nodes) {
OpKernel* op_kernel_raw = nullptr;
Status s = flib_->CreateKernel(n->properties(), &op_kernel_raw);
std::unique_ptr<OpKernel> op_kernel(op_kernel_raw);
if (!s.ok()) {
s = AttachDef(s, *n);
LOG(ERROR) << "Executor failed to create kernel. " << s;
return s;
}
TF_RET_CHECK(!n->IsRecv() && !n->IsSend() && !n->IsSwitch())
<< "Not supported node: " << n->DebugString();
params.op_kernel = op_kernel.get();
absl::InlinedVector<AllocatorAttributes, 4> output_attr(n->num_outputs());
params.output_attr_array = output_attr.data();
tensor_inputs_.clear();
tensor_inputs_.resize(n->num_inputs());
for (auto* e : n->in_edges()) {
if (e->IsControlEdge()) continue;
const Node* src = e->src();
const int output_registry_size = output_registry.size();
TF_RET_CHECK(src->id() < output_registry_size);
const NodeOutputs& src_outputs = output_registry[src->id()];
tensor_inputs_.at(e->dst_input()) = src_outputs.at(e->src_output());
}
params.inputs = tensor_inputs_;
OpKernelContext op_context(¶ms, n->num_outputs());
VLOG(3) << "Translating " << params.op_kernel->name();
if (IsFunctionCall(*flib_->GetFunctionLibraryDefinition(), *n)) {
TF_RETURN_IF_ERROR(CompileFunctionalNode(n, &op_context));
} else {
device_->Compute(CHECK_NOTNULL(params.op_kernel), &op_context);
Status s = op_context.status();
if (!s.ok()) {
graph_compiler_failed_compilation_op_count
->GetCell(params.op_kernel->def().op())
->IncrementBy(1);
return AttachDef(s, n->def());
}
}
NodeOutputs& outputs = output_registry[n->id()];
outputs.resize(n->num_outputs());
for (int o = 0; o < n->num_outputs(); ++o) {
outputs[o] = op_context.release_output(o);
if (outputs[o].tensor == nullptr) {
return errors::Internal("Missing xla_context ", o, "-th output from ",
FormatNodeForError(*n));
}
}
}
return absl::OkStatus();
}
namespace {
Status GetFunctionNameAndAttr(const FunctionLibraryRuntime& flib,
const Node& node, NameAttrList* func) {
if (node.IsPartitionedCall()) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(
node.attrs().Find(FunctionLibraryDefinition::kFuncAttr, &attr_value));
if (!attr_value->has_func()) {
return errors::InvalidArgument(
"The attribute value for attribute 'f' in node ", node.DebugString(),
" does not have 'func' field set");
}
*func = attr_value->func();
return absl::OkStatus();
}
if (flib.GetFunctionLibraryDefinition()->Find(node.def().op())) {
func->set_name(node.type_string());
} else {
func->set_name(FunctionLibraryDefinition::kGradientOp);
}
*func->mutable_attr() = node.def().attr();
return absl::OkStatus();
}
}
Status GraphCompiler::CompileFunctionalNode(Node* n,
OpKernelContext* op_context) {
TF_RET_CHECK(IsFunctionCall(*flib_->GetFunctionLibraryDefinition(), *n));
XlaOpKernelContext xla_op_context(op_context);
XlaContext& context = XlaContext::Get(op_context);
auto* b = context.builder();
XlaCompiler* compiler = xla_op_context.compiler();
NameAttrList func;
TF_RETURN_IF_ERROR(GetFunctionNameAndAttr(*flib_, *n, &func));
std::vector<const XlaExpression*> expressions;
for (auto tensor : tensor_inputs_) {
auto expression =
reinterpret_cast<const XlaExpression*>(tensor->tensor_data().data());
expressions.push_back(expression);
}
std::vector<XlaCompiler::Argument> arguments;
const FunctionBody* fbody;
TF_RETURN_IF_ERROR(compiler->FindFunctionBody(func, &fbody));
auto graph = compiler->GetGraph(fbody);
TF_RETURN_IF_ERROR(PrepareArguments(&xla_op_context, graph.get(), expressions,
func, &arguments));
bool add_token_input_output =
func.attr().find(kXlaTokenInputNodesAttrName) != func.attr().end();
XlaCompiler::CompileOptions compile_options;
compile_options.is_entry_computation = false;
compile_options.add_token_input_output = add_token_input_output;
XlaCompiler::CompilationResult result;
TF_RETURN_IF_ERROR(
compiler->CompileFunction(compile_options, func, arguments, &result));
TF_RET_CHECK(arguments.size() == expressions.size());
std::vector<xla::XlaOp> handles;
for (int64_t i = 0, end = expressions.size(); i < end; ++i) {
if (arguments[i].kind == XlaCompiler::Argument::kConstant) {
continue;
}
if (arguments[i].kind == XlaCompiler::Argument::kResource) {
handles.push_back(expressions[i]->resource()->value());
} else {
handles.push_back(expressions[i]->handle());
}
}
if (add_token_input_output) {
std::vector<string> token_input_nodes;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(&func.attr()),
kXlaTokenInputNodesAttrName,
&token_input_nodes));
std::vector<xla::XlaOp> token_inputs;
for (const string& node_name : token_input_nodes) {
auto token_or = compiler->GetNodeToken(node_name);
TF_RETURN_IF_ERROR(token_or.status());
token_inputs.push_back(std::move(token_or).value());
}
xla::XlaOp token_input = xla::AfterAll(b, token_inputs);
handles.push_back(token_input);
}
auto output_handle = xla::Call(b, *result.computation, handles);
int computation_output = 0;
for (int64_t i = 0; i < n->num_outputs(); ++i) {
if (result.outputs[i].is_constant) {
xla_op_context.SetConstantOutput(i, result.outputs[i].constant_value);
} else {
if (result.outputs[i].is_tensor_list) {
xla_op_context.SetTensorListOutput(
i, xla::GetTupleElement(output_handle, computation_output));
} else {
xla_op_context.SetOutput(
i, xla::GetTupleElement(output_handle, computation_output));
}
++computation_output;
}
}
for (int64_t i = 0, end = result.resource_updates.size(); i < end; i++) {
if (result.resource_updates[i].modified) {
XlaResource* resource =
expressions[result.resource_updates[i].input_index]->resource();
xla::XlaOp updated_value =
xla::GetTupleElement(output_handle, i + n->num_outputs());
TF_RETURN_IF_ERROR(resource->SetValue(updated_value));
}
}
if (add_token_input_output) {
std::string node_name;
if (!GetNodeAttr(n->attrs(), kXlaOriginalOutsideCompilationNodeName,
&node_name)
.ok())
node_name = n->name();
TF_RETURN_IF_ERROR(compiler->SetNodeToken(
node_name, xla::GetTupleElement(output_handle, computation_output)));
}
return b->first_error();
}
void GraphCompiler::PartiallySetupParams(OpKernelContext::Params* params) {
params->device = device_;
params->step_container = step_container_;
params->resource_manager = device_->resource_manager();
params->function_library = flib_;
}
} | #include "tensorflow/compiler/tf2xla/graph_compiler.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/compiler/tf2xla/graph_compiler_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "tensorflow/compiler/tf2xla/xla_compilation_device.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
using ::tensorflow::monitoring::testing::CellReader;
constexpr char kOpCompilationFailureStreamz[] =
"/tensorflow/core/tf2xla/graph_compilation_failed_op_count";
class DummyOp : public XlaOpKernel {
public:
explicit DummyOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {}
};
REGISTER_KERNEL_BUILDER(Name("NoOp").Device(DEVICE_DEFAULT), DummyOp);
REGISTER_KERNEL_BUILDER(Name("NoOp").Device("XLA_TPU_JIT"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("NoOp").Device("XLA_CPU_JIT"), DummyOp);
class MockAlwaysFailsOp : public XlaOpKernel {
public:
explicit MockAlwaysFailsOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
ctx->CtxFailure(__FILE__, __LINE__, errors::InvalidArgument("MockBroken"));
}
};
REGISTER_OP("MockAlwaysFails")
.SetShapeFn(shape_inference::UnknownShape)
.Doc(R"doc(
A test only Op that always fails to compile.
)doc");
REGISTER_KERNEL_BUILDER(Name("MockAlwaysFails").Device(DEVICE_DEFAULT),
MockAlwaysFailsOp);
REGISTER_KERNEL_BUILDER(Name("MockAlwaysFails").Device("XLA_CPU_JIT"),
MockAlwaysFailsOp);
REGISTER_KERNEL_BUILDER(Name("MockAlwaysFails").Device("XLA_TPU_JIT"),
MockAlwaysFailsOp);
REGISTER_XLA_OP(Name("MockAlwaysFails").CompilationOnly(), MockAlwaysFailsOp);
class GraphCompilerTest : public ::testing::Test {
public:
void SetUp() override {
device_ = new tensorflow::XlaCompilationDevice(
tensorflow::SessionOptions(), tensorflow::DeviceType("XLA_TPU_JIT"));
device_mgr_ = std::make_unique<StaticDeviceMgr>(absl::WrapUnique(device_));
}
Status RunGraphCompiler(Graph& graph) {
ProcessFunctionLibraryRuntime runtime(
device_mgr_.get(), Env::Default(), nullptr, TF_GRAPH_DEF_VERSION,
&graph.flib_def(), OptimizerOptions());
xla::XlaBuilder builder("test_builder");
XlaCompiler::Options options;
options.device_type = "XLA_TPU_JIT";
XlaCompiler xla_compiler(options);
XlaContext* xla_context = new XlaContext(&xla_compiler, &builder, &graph);
core::ScopedUnref context_unref(xla_context);
xla_context->Ref();
auto step_container =
std::make_unique<ScopedStepContainer>(0, [this](const string& name) {
Status status = this->device_->resource_manager()->Cleanup(name);
});
auto container_status = step_container->Create(
device_->resource_manager(), XlaContext::kXlaContextResourceName,
xla_context);
GraphCompiler graph_compiler(
device_, &graph, runtime.GetFLR(device_->name()), step_container.get());
return graph_compiler.Compile();
}
protected:
XlaCompilationDevice* device_;
std::unique_ptr<StaticDeviceMgr> device_mgr_;
};
TEST_F(GraphCompilerTest, CompilesGraph) {
Graph graph(OpRegistry::Global());
EXPECT_TRUE(RunGraphCompiler(graph).ok());
}
TEST_F(GraphCompilerTest, RecordsStreamzFailedCompilationNode) {
Graph graph(OpRegistry::Global());
Node* mock_fail;
ASSERT_TRUE(NodeBuilder("mock_fail", "MockAlwaysFails")
.Finalize(&graph, &mock_fail)
.ok());
graph.AddControlEdge(graph.source_node(), mock_fail);
graph.AddControlEdge(mock_fail, graph.sink_node());
CellReader<int64_t> op_reader(kOpCompilationFailureStreamz);
EXPECT_FALSE(RunGraphCompiler(graph).ok());
EXPECT_EQ(op_reader.Delta("MockAlwaysFails"), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/graph_compiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/graph_compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9ebde96a-9c38-4b91-acb1-612813f99103 | cpp | tensorflow/tensorflow | ragged_gather_op | tensorflow/core/kernels/ragged_gather_op.cc | tensorflow/core/kernels/ragged_gather_op_test.cc | #include <limits>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
namespace {
template <typename VALUE_TYPE, typename SPLITS_TYPE>
void WriteValueSlices(
const Tensor& params_dense_values_in,
const std::vector<std::pair<SPLITS_TYPE, SPLITS_TYPE>>& value_slices,
SPLITS_TYPE value_size, Tensor* values_out) {
const auto& params_dense_values =
params_dense_values_in.flat_outer_dims<VALUE_TYPE, 2>();
auto values = values_out->flat_outer_dims<VALUE_TYPE, 2>();
int out_pos = 0;
for (const auto& slice : value_slices) {
for (int i = slice.first; i < slice.second; ++i) {
for (int j = 0; j < value_size; ++j) {
values(out_pos, j) = params_dense_values(i, j);
}
++out_pos;
}
}
}
}
template <typename INDEX_TYPE, typename SPLITS_TYPE>
class RaggedGatherOpBase : public OpKernel {
public:
using OpKernel::OpKernel;
void Compute(OpKernelContext* context) override {
OpInputList params_nested_splits_in;
OP_REQUIRES_OK(context, context->input_list("params_nested_splits",
¶ms_nested_splits_in));
OP_REQUIRES(
context, params_nested_splits_in.size() > 0,
errors::InvalidArgument("params_nested_splits must be non empty"));
const Tensor& params_dense_values_in =
context->input(params_nested_splits_in.size());
const Tensor& indices_in =
context->input(params_nested_splits_in.size() + 1);
OP_REQUIRES(context, params_nested_splits_in[0].dims() > 0,
errors::InvalidArgument("Split tensors must not be scalars"));
SPLITS_TYPE num_params = params_nested_splits_in[0].dim_size(0) - 1;
OP_REQUIRES_OK(context, ValidateIndices(indices_in, num_params));
OP_REQUIRES(context, params_dense_values_in.dims() > 0,
errors::InvalidArgument("params.rank must be nonzero"));
SPLITS_TYPE num_params_dense_values = params_dense_values_in.dim_size(0);
std::vector<std::pair<SPLITS_TYPE, SPLITS_TYPE>> value_slices;
SPLITS_TYPE num_values = 0;
std::vector<std::vector<SPLITS_TYPE>> out_splits;
OP_REQUIRES_OK(context, MakeSplits(indices_in, params_nested_splits_in,
num_params_dense_values, &out_splits,
&value_slices, &num_values));
OP_REQUIRES_OK(context, WriteSplits(out_splits, context));
OP_REQUIRES_OK(context,
WriteValues(params_dense_values_in, value_slices,
out_splits.size(), num_values, context));
}
private:
using ConstFlatType = typename TTypes<SPLITS_TYPE>::ConstFlat;
::tensorflow::Status ValidateIndices(const Tensor& indices_in,
SPLITS_TYPE num_params) {
const auto& indices = indices_in.flat<INDEX_TYPE>();
for (SPLITS_TYPE i = 0; i < indices.size(); ++i) {
SPLITS_TYPE index = indices(i);
if (index < 0 || index >= num_params) {
return errors::InvalidArgument(
"indices", SliceDebugString(indices_in.shape(), i), " = ", index,
" is not in [0, ", num_params, ")");
}
}
return absl::OkStatus();
}
::tensorflow::Status MakeSplits(
const Tensor& indices_in, const OpInputList& params_nested_splits_in,
SPLITS_TYPE num_params_dense_values,
std::vector<std::vector<SPLITS_TYPE>>* out_splits,
std::vector<std::pair<SPLITS_TYPE, SPLITS_TYPE>>* value_slices,
SPLITS_TYPE* num_values) {
*num_values = 0;
value_slices->clear();
int num_splits = indices_in.dims() - 1 + params_nested_splits_in.size();
out_splits->assign(num_splits, {0});
const auto& indices = indices_in.flat<INDEX_TYPE>();
std::vector<ConstFlatType> params_nested_splits;
params_nested_splits.reserve(params_nested_splits_in.size());
for (const auto& splits_in : params_nested_splits_in) {
params_nested_splits.push_back(splits_in.flat<SPLITS_TYPE>());
}
TF_RETURN_IF_ERROR(
ValidateSplits(params_nested_splits, num_params_dense_values));
int nrows = 1;
for (int dim = 0; dim < indices_in.dims() - 1; ++dim) {
nrows *= indices_in.dim_size(dim);
int row_length = indices_in.dim_size(dim + 1);
for (int i = 1; i < nrows + 1; ++i) {
out_splits->at(dim).push_back(i * row_length);
}
}
for (int i = 0; i < indices.size(); ++i) {
int start = indices(i);
int limit = indices(i) + 1;
for (int dim = 0; dim < params_nested_splits.size(); ++dim) {
const auto& splits = params_nested_splits[dim];
int out_dim = dim + indices_in.dims() - 1;
if (out_dim >= 0) {
SPLITS_TYPE delta = out_splits->at(out_dim).back() - splits(start);
for (int j = start; j < limit; ++j) {
out_splits->at(out_dim).push_back(splits(j + 1) + delta);
}
}
start = splits(start);
limit = splits(limit);
}
if (limit != start) {
value_slices->emplace_back(start, limit);
*num_values += limit - start;
}
}
return absl::OkStatus();
}
::tensorflow::Status ValidateSplits(
const std::vector<ConstFlatType>& params_nested_splits,
SPLITS_TYPE num_params_dense_values) {
for (int dim = 0; dim < params_nested_splits.size(); ++dim) {
const auto& splits = params_nested_splits[dim];
SPLITS_TYPE last_split = (dim == params_nested_splits.size() - 1)
? num_params_dense_values
: params_nested_splits[dim + 1].size();
if (splits.size() == 0) {
return errors::InvalidArgument("Ragged splits may not be empty");
}
if (splits(0) < 0) {
return errors::InvalidArgument("Ragged splits must be non-negative");
}
if (splits(splits.size() - 1) > last_split) {
return errors::InvalidArgument(
"Ragged splits must not point past values");
}
for (int i = 1; i < splits.size(); ++i) {
if (splits(i - 1) > splits(i)) {
return errors::InvalidArgument("Ragged splits must be sorted");
}
}
}
return absl::OkStatus();
}
::tensorflow::Status WriteSplits(
const std::vector<std::vector<SPLITS_TYPE>>& out_splits,
OpKernelContext* context) {
OpOutputList splits_out;
TF_RETURN_IF_ERROR(
context->output_list("output_nested_splits", &splits_out));
for (int i = 0; i < out_splits.size(); ++i) {
Tensor* splits;
SPLITS_TYPE num_splits = out_splits[i].size();
TF_RETURN_IF_ERROR(
splits_out.allocate(i, TensorShape({num_splits}), &splits));
auto splits_flat = splits->flat<SPLITS_TYPE>();
std::copy_n(out_splits[i].data(), out_splits[i].size(),
splits_flat.data());
}
return absl::OkStatus();
}
::tensorflow::Status WriteValues(
const Tensor& params_dense_values_in,
const std::vector<std::pair<SPLITS_TYPE, SPLITS_TYPE>>& value_slices,
int values_index, SPLITS_TYPE num_values,
OpKernelContext* context) const {
Tensor* values_out = nullptr;
TensorShape values_shape = params_dense_values_in.shape();
values_shape.set_dim(0, num_values);
TF_RETURN_IF_ERROR(
context->allocate_output(values_index, values_shape, &values_out));
const SPLITS_TYPE num_elements = params_dense_values_in.NumElements();
const SPLITS_TYPE value_size =
num_elements == 0 ? 0
: (num_elements / params_dense_values_in.dim_size(0));
CallWriteValueSlices(params_dense_values_in, value_slices, value_size,
values_out);
return absl::OkStatus();
}
protected:
virtual void CallWriteValueSlices(
const Tensor& params_dense_values_in,
const std::vector<std::pair<SPLITS_TYPE, SPLITS_TYPE>>& value_slices,
SPLITS_TYPE value_size, Tensor* values_out) const = 0;
};
template <typename INDEX_TYPE, typename VALUE_TYPE, typename SPLITS_TYPE>
class RaggedGatherOp : public RaggedGatherOpBase<INDEX_TYPE, SPLITS_TYPE> {
public:
using RaggedGatherOpBase<INDEX_TYPE, SPLITS_TYPE>::RaggedGatherOpBase;
private:
void CallWriteValueSlices(
const Tensor& params_dense_values_in,
const std::vector<std::pair<SPLITS_TYPE, SPLITS_TYPE>>& value_slices,
SPLITS_TYPE value_size, Tensor* values_out) const override {
WriteValueSlices<VALUE_TYPE>(params_dense_values_in, value_slices,
value_size, values_out);
}
};
#define REGISTER_CPU_KERNEL_WITH_INDEX_TYPE(index_type, value_type, \
splits_type) \
REGISTER_KERNEL_BUILDER( \
Name("RaggedGather") \
.Device(DEVICE_CPU) \
.TypeConstraint<index_type>("Tindices") \
.TypeConstraint<value_type>("Tvalues") \
.TypeConstraint<splits_type>("Tsplits"), \
RaggedGatherOp<index_type, value_type, splits_type>);
#define REGISTER_CPU_KERNEL(value_type) \
REGISTER_CPU_KERNEL_WITH_INDEX_TYPE(int32, value_type, int32) \
REGISTER_CPU_KERNEL_WITH_INDEX_TYPE(int64_t, value_type, int32) \
REGISTER_CPU_KERNEL_WITH_INDEX_TYPE(int32, value_type, int64_t) \
REGISTER_CPU_KERNEL_WITH_INDEX_TYPE(int64_t, value_type, int64_t)
TF_CALL_POD_TYPES(REGISTER_CPU_KERNEL);
TF_CALL_tstring(REGISTER_CPU_KERNEL);
TF_CALL_QUANTIZED_TYPES(REGISTER_CPU_KERNEL);
TF_CALL_quint16(REGISTER_CPU_KERNEL);
TF_CALL_qint16(REGISTER_CPU_KERNEL);
#undef REGISTER_CPU_KERNEL
#undef REGISTER_CPU_KERNEL_WITH_INDEX_TYPE
} | #include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class RaggedGatherOpTest : public ::tensorflow::OpsTestBase {
protected:
template <typename VALUE_TYPE, typename INDEX_TYPE>
void BuildRaggedGatherGraph(
const TensorShape& indices_shape, const std::vector<INDEX_TYPE>& indices,
const std::vector<std::vector<int64_t>>& params_nested_splits,
const TensorShape& params_dense_values_shape,
const gtl::ArraySlice<VALUE_TYPE> params_dense_values) {
const auto& value_dtype = DataTypeToEnum<VALUE_TYPE>::v();
const auto& index_dtype = DataTypeToEnum<INDEX_TYPE>::v();
int64_t PARAMS_RAGGED_RANK = params_nested_splits.size();
int64_t num_splits = PARAMS_RAGGED_RANK + indices_shape.dims() - 1;
TF_ASSERT_OK(
NodeDefBuilder("tested_op", "RaggedGather")
.Input(FakeInput(PARAMS_RAGGED_RANK))
.Input(FakeInput(value_dtype))
.Input(FakeInput(index_dtype))
.Attr("PARAMS_RAGGED_RANK", PARAMS_RAGGED_RANK)
.Attr("OUTPUT_RAGGED_RANK", num_splits)
.Attr("Tvalues", value_dtype)
.Attr("Tindices", index_dtype)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
for (const auto& splits : params_nested_splits) {
int64_t splits_size = splits.size();
AddInputFromArray<int64_t>(TensorShape({splits_size}), splits);
}
AddInputFromArray<VALUE_TYPE>(params_dense_values_shape,
params_dense_values);
AddInputFromArray<INDEX_TYPE>(indices_shape, indices);
}
};
TEST_F(RaggedGatherOpTest, RaggedGather) {
BuildRaggedGatherGraph<float, int32>(
TensorShape({4}),
{2, 1, 0, 3},
{{0, 3, 3, 7, 9}},
TensorShape({9}),
{.1, .2, .3, .4, .5, .6, .7, .8, .9}
);
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(0),
test::AsTensor<int64_t>({0, 4, 4, 7, 9}));
test::ExpectTensorNear<float>(
*GetOutput(1),
test::AsTensor<float>({.4, .5, .6, .7, .1, .2, .3, .8, .9}), 0.1);
}
TEST_F(RaggedGatherOpTest, RaggedGather_3DParams) {
BuildRaggedGatherGraph<float, int32>(
TensorShape({5}),
{2, 1, 0, 2, 3},
{{0, 1, 3, 3, 5, 6}, {0, 0, 2, 3, 5, 8, 9}},
TensorShape({9}),
{.1, .2, .3, .4, .5, .6, .7, .8, .9}
);
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(0),
test::AsTensor<int64_t>({0, 0, 2, 3, 3, 5}));
test::ExpectTensorEqual<int64_t>(*GetOutput(1),
test::AsTensor<int64_t>({0, 2, 3, 3, 5, 8}));
test::ExpectTensorNear<float>(
*GetOutput(2), test::AsTensor<float>({.1, .2, .3, .4, .5, .6, .7, .8}),
0.1);
}
TEST_F(RaggedGatherOpTest, RaggedGather_4DParams) {
BuildRaggedGatherGraph<int32, int32>(
TensorShape({4}),
{2, 1, 0, 2},
{{0, 1, 3, 3}, {0, 0, 3, 4}},
TensorShape({4, 2}),
{1, 2, 3, 4, 5, 6, 7, 8}
);
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(0),
test::AsTensor<int64_t>({0, 0, 2, 3, 3}));
test::ExpectTensorEqual<int64_t>(*GetOutput(1),
test::AsTensor<int64_t>({0, 3, 4, 4}));
test::ExpectTensorEqual<int32>(
*GetOutput(2),
test::AsTensor<int32>({1, 2, 3, 4, 5, 6, 7, 8}, TensorShape({4, 2})));
}
TEST_F(RaggedGatherOpTest, RaggedGather_2DIndices) {
BuildRaggedGatherGraph<float, int32>(
TensorShape({2, 2}),
{2, 1, 0, 3},
{{0, 3, 3, 7, 9}},
TensorShape({9}),
{.1, .2, .3, .4, .5, .6, .7, .8, .9}
);
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(0),
test::AsTensor<int64_t>({0, 2, 4}));
test::ExpectTensorEqual<int64_t>(*GetOutput(1),
test::AsTensor<int64_t>({0, 4, 4, 7, 9}));
test::ExpectTensorNear<float>(
*GetOutput(2),
test::AsTensor<float>({.4, .5, .6, .7, .1, .2, .3, .8, .9}), 0.1);
}
TEST_F(RaggedGatherOpTest, RaggedGather_ScalarIndices) {
BuildRaggedGatherGraph<float, int32>(
TensorShape({}),
{2},
{{0, 3, 3, 7, 9}},
TensorShape({9}),
{.1, .2, .3, .4, .5, .6, .7, .8, .9}
);
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorNear<float>(*GetOutput(0),
test::AsTensor<float>({.4, .5, .6, .7}), 0.1);
}
TEST_F(RaggedGatherOpTest, RaggedGather_OutOfBounds) {
BuildRaggedGatherGraph<float, int32>(
TensorShape({2}),
{2, 10},
{{0, 3, 3, 7, 9}},
TensorShape({9}),
{.1, .2, .3, .4, .5, .6, .7, .8, .9}
);
EXPECT_EQ("indices[1] = 10 is not in [0, 4)", RunOpKernel().message());
}
TEST_F(RaggedGatherOpTest, InvalidSplitsNotSorted) {
BuildRaggedGatherGraph<float, int32>(
TensorShape({2}),
{0, 2},
{{0, 3, 5, 2, 9}},
TensorShape({9}),
{.1, .2, .3, .4, .5, .6, .7, .8, .9}
);
EXPECT_EQ("Ragged splits must be sorted", RunOpKernel().message());
}
TEST_F(RaggedGatherOpTest, InvalidSplitsNegative) {
BuildRaggedGatherGraph<float, int32>(
TensorShape({2}),
{0, 2},
{{-1, 3, 2, 7, 9}},
TensorShape({9}),
{.1, .2, .3, .4, .5, .6, .7, .8, .9}
);
EXPECT_EQ("Ragged splits must be non-negative", RunOpKernel().message());
}
TEST_F(RaggedGatherOpTest, InvalidSplitsEmpty) {
BuildRaggedGatherGraph<float, int32>(
TensorShape({0}),
{},
{{}},
TensorShape({0}),
{}
);
EXPECT_EQ("Ragged splits may not be empty", RunOpKernel().message());
}
TEST_F(RaggedGatherOpTest, InvalidSplitsTooBig) {
BuildRaggedGatherGraph<float, int32>(
TensorShape({2}),
{0, 2},
{{0, 20, 40, 80, 100}},
TensorShape({9}),
{.1, .2, .3, .4, .5, .6, .7, .8, .9}
);
EXPECT_EQ("Ragged splits must not point past values",
RunOpKernel().message());
}
TEST_F(RaggedGatherOpTest, BadValuesShape) {
BuildRaggedGatherGraph<float, int32>(
TensorShape({0}),
{},
{{0}},
TensorShape({}),
{.1}
);
EXPECT_EQ("params.rank must be nonzero", RunOpKernel().message());
}
TEST_F(RaggedGatherOpTest, ShapeFn) {
ShapeInferenceTestOp op("RaggedGather");
(*op.node_def.mutable_attr())["PARAMS_RAGGED_RANK"].set_i(1);
(*op.node_def.mutable_attr())["OUTPUT_RAGGED_RANK"].set_i(1);
INFER_OK(op, "?;?;?", "[?];?");
INFER_OK(op, "[?];[?];[?]", "[?];[?]");
INFER_OK(op, "[?];[?,?,?];[?]", "[?];[?,d1_1,d1_2]");
INFER_OK(op, "[5];[10];[15]", "[?];[?]");
INFER_OK(op, "[5];[10,2];[15]", "[?];[?,d1_1]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[5];[];[]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[1,2];[];[5]");
(*op.node_def.mutable_attr())["PARAMS_RAGGED_RANK"].set_i(2);
(*op.node_def.mutable_attr())["OUTPUT_RAGGED_RANK"].set_i(2);
INFER_OK(op, "?;?;?;?", "[?];[?];?");
INFER_OK(op, "[?];[?];[?];[?]", "[?];[?];[?]");
INFER_OK(op, "[?];[?];[?,?,?];[?]", "[?];[?];[?,d2_1,d2_2]");
INFER_OK(op, "[5];[10];[15];[20]", "[?];[?];[?]");
(*op.node_def.mutable_attr())["PARAMS_RAGGED_RANK"].set_i(1);
(*op.node_def.mutable_attr())["OUTPUT_RAGGED_RANK"].set_i(2);
INFER_OK(op, "?;?;?", "[?];[?];?");
INFER_OK(op, "[?];[?];[?,?]", "[?];[?];[?]");
INFER_OK(op, "[?];[?,?,?];[?,?]", "[?];[?];[?,d1_1,d1_2]");
INFER_OK(op, "[15];[20];[5,10]", "[?];[?];[?]");
INFER_OK(op, "[15];[20,2];[5,10]", "[?];[?];[?,d1_1]");
(*op.node_def.mutable_attr())["PARAMS_RAGGED_RANK"].set_i(1);
(*op.node_def.mutable_attr())["OUTPUT_RAGGED_RANK"].set_i(0);
INFER_OK(op, "[?];[?];[]", "[?]");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_gather_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_gather_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
97f5f733-75dc-4de4-9034-7cc1e1a54823 | cpp | tensorflow/tensorflow | matmul_op | tensorflow/compiler/tf2xla/kernels/matmul_op.cc | tensorflow/core/kernels/matmul_op_test.cc | #include <array>
#include <optional>
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/matrix.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tsl/platform/tensor_float_32_utils.h"
namespace tensorflow {
namespace {
constexpr std::array<DataType, 10> kMatmulTypes = {
{DT_HALF, DT_BFLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128,
DT_INT32, DT_INT64, DT_INT16, DT_INT8}};
class MatMulOp : public XlaOpKernel {
public:
explicit MatMulOp(OpKernelConstruction* ctx, bool is_sparse = false)
: XlaOpKernel(ctx),
is_sparse_(is_sparse),
grad_a_(false),
grad_b_(false) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("transpose_a", &transpose_a_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("transpose_b", &transpose_b_));
if (!is_sparse) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("grad_a", &grad_a_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("grad_b", &grad_b_));
}
if (is_sparse) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("Ta", &a_type_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("Tb", &b_type_));
bool dummy_is_sparse;
OP_REQUIRES_OK(ctx, ctx->GetAttr("a_is_sparse", &dummy_is_sparse));
OP_REQUIRES_OK(ctx, ctx->GetAttr("b_is_sparse", &dummy_is_sparse));
}
}
~MatMulOp() override = default;
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape a_shape = ctx->InputShape(0);
const TensorShape b_shape = ctx->InputShape(1);
OP_REQUIRES(ctx, a_shape.dims() == b_shape.dims(),
errors::InvalidArgument("In[0] and In[1] has different ndims: ",
a_shape.DebugString(), " vs. ",
b_shape.DebugString()));
OP_REQUIRES(
ctx, TensorShapeUtils::IsMatrix(a_shape),
errors::InvalidArgument("In[0] is not a matrix. Instead it has shape ",
a_shape.DebugString()));
OP_REQUIRES(
ctx, TensorShapeUtils::IsMatrix(b_shape),
errors::InvalidArgument("In[1] is not a matrix. Instead it has shape ",
b_shape.DebugString()));
int first_index = transpose_a_ ? 0 : 1;
int second_index = transpose_b_ ? 1 : 0;
OP_REQUIRES(ctx,
a_shape.dim_size(first_index) == b_shape.dim_size(second_index),
errors::InvalidArgument(
"Matrix size-incompatible: In[0]: ", a_shape.DebugString(),
", In[1]: ", b_shape.DebugString()));
xla::XlaOp a = ctx->Input(0);
xla::XlaOp b = ctx->Input(1);
if (is_sparse_) {
if (a_type_ == DT_BFLOAT16) {
a = xla::ConvertElementType(a, xla::F32);
}
if (b_type_ == DT_BFLOAT16) {
b = xla::ConvertElementType(b, xla::F32);
}
}
xla::PrecisionConfig::Precision precision =
tsl::tensor_float_32_execution_enabled()
? xla::PrecisionConfig::DEFAULT
: xla::PrecisionConfig::HIGHEST;
ctx->SetOutput(0, xla::BatchDot(a, transpose_a_, b, transpose_b_, precision,
std::nullopt, grad_a_, grad_b_));
}
private:
bool is_sparse_;
bool transpose_a_;
bool transpose_b_;
bool grad_a_;
bool grad_b_;
DataType a_type_;
DataType b_type_;
};
REGISTER_XLA_OP(Name("MatMul").TypeConstraint("T", kMatmulTypes), MatMulOp);
class SparseMatMulOp : public MatMulOp {
public:
explicit SparseMatMulOp(OpKernelConstruction* ctx) : MatMulOp(ctx, true) {}
~SparseMatMulOp() override = default;
};
REGISTER_XLA_OP(Name("SparseMatMul"), SparseMatMulOp);
}
} | #include <functional>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/ops_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tsl/platform/status.h"
#if TENSORFLOW_USE_ROCM
#include "rocm/rocm_config.h"
#endif
namespace tensorflow {
namespace {
template <typename T>
class FusedMatMulOpTest : public OpsTestBase {
protected:
static constexpr auto kTValueType = DataTypeToEnum<T>::value;
using BiasAddGraphRunner =
std::function<bool(const Tensor& lhs_data, const Tensor& rhs_data,
const Tensor& bias_data, Tensor* out)>;
void RunAndFetch(const tensorflow::Scope& root, const string& fetch,
Tensor* output, bool allow_gpu_device,
const NodeDef* fetch_node = nullptr,
absl::Status* last_status = nullptr) {
tensorflow::GraphDef graph;
TF_ASSERT_OK(root.ToGraphDef(&graph));
if (fetch_node) {
*graph.add_node() = *fetch_node;
}
tensorflow::SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions::L0);
tensorflow::RewriterConfig* cfg =
session_options.config.mutable_graph_options()
->mutable_rewrite_options();
cfg->set_constant_folding(tensorflow::RewriterConfig::OFF);
cfg->set_layout_optimizer(tensorflow::RewriterConfig::OFF);
cfg->set_remapping(tensorflow::RewriterConfig::OFF);
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(session_options));
std::vector<DeviceAttributes> available_devices;
TF_ASSERT_OK(session->ListDevices(&available_devices))
<< "Failed to get available session devices";
const bool has_gpu_device =
absl::c_any_of(available_devices, [](const DeviceAttributes& device) {
return device.device_type() == DEVICE_GPU;
});
const bool place_all_on_gpu = allow_gpu_device && has_gpu_device;
const string device = place_all_on_gpu ? "/device:GPU:0" : "/device:CPU:0";
for (NodeDef& mutable_node : *graph.mutable_node()) {
mutable_node.set_device(device);
}
TF_ASSERT_OK(session->Create(graph));
std::vector<Tensor> unfused_tensors;
auto res = session->Run({}, {fetch}, {}, &unfused_tensors);
if (last_status != nullptr) {
*last_status = res;
} else {
TF_ASSERT_OK(res);
}
if (!unfused_tensors.empty()) {
*output = unfused_tensors[0];
}
}
void RunMatMulWithBias(const Tensor& lhs_data, const Tensor& rhs_data,
const Tensor& bias_data, bool transpose_a,
bool transpose_b, Tensor* output,
bool allow_gpu_device = false) {
Scope root = tensorflow::Scope::NewRootScope();
ops::MatMul matmul = ops::MatMul(
root.WithOpName("matmul"),
ops::Const(root.WithOpName("lhs"), Input::Initializer(lhs_data)),
ops::Const(root.WithOpName("rhs"), Input::Initializer(rhs_data)),
ops::MatMul::Attrs().TransposeA(transpose_a).TransposeB(transpose_b));
ops::BiasAdd with_bias = ops::BiasAdd(
root.WithOpName("with_bias"), matmul,
ops::Const(root.WithOpName("bias"), Input::Initializer(bias_data)));
RunAndFetch(root, "with_bias", output, allow_gpu_device);
}
void RunMatMulWithBiasAndActivation(
const Tensor& lhs_data, const Tensor& rhs_data, const Tensor& bias_data,
bool transpose_a, bool transpose_b, const string& activation_type,
Tensor* output, bool allow_gpu_device = false) {
Scope root = tensorflow::Scope::NewRootScope();
ops::MatMul matmul = ops::MatMul(
root.WithOpName("matmul"),
ops::Const(root.WithOpName("lhs"), Input::Initializer(lhs_data)),
ops::Const(root.WithOpName("rhs"), Input::Initializer(rhs_data)),
ops::MatMul::Attrs().TransposeA(transpose_a).TransposeB(transpose_b));
ops::BiasAdd with_bias = ops::BiasAdd(
root.WithOpName("with_bias"), matmul,
ops::Const(root.WithOpName("bias"), Input::Initializer(bias_data)));
if (activation_type == "Relu") {
ops::Relu(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "Relu6") {
ops::Relu6(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "Elu") {
ops::Elu(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "LeakyRelu") {
ops::internal::LeakyRelu(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "GeluExact") {
VLOG(0) << "ERROR: GeluExact is yet not available!!";
ops::Identity(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "Sigmoid") {
ops::Sigmoid(root.WithOpName("with_activation"), with_bias);
} else if (activation_type == "Tanh") {
ops::Tanh(root.WithOpName("with_activation"), with_bias);
} else {
ops::Identity(root.WithOpName("with_activation"), with_bias);
}
RunAndFetch(root, "with_activation", output, allow_gpu_device);
}
void RunFusedMatMulOp(const Tensor& lhs_data, const Tensor& rhs_data,
const std::vector<Tensor>& args_data,
const std::vector<string>& fused_ops, bool transpose_a,
bool transpose_b, Tensor* output,
bool allow_gpu_device = false,
bool* test_skipped = nullptr) {
Scope root = tensorflow::Scope::NewRootScope();
DataType dtype = DataTypeToEnum<T>::v();
int num_args = static_cast<int>(args_data.size());
Output lhs =
ops::Const(root.WithOpName("lhs"), Input::Initializer(lhs_data));
Output rhs =
ops::Const(root.WithOpName("rhs"), Input::Initializer(rhs_data));
std::vector<NodeDefBuilder::NodeOut> args;
for (int i = 0; i < num_args; ++i) {
Output arg = ops::Const(root.WithOpName(absl::StrCat("arg", i)),
Input::Initializer(args_data[i]));
args.emplace_back(arg.name(), 0, dtype);
}
NodeDef fused_matmul;
TF_EXPECT_OK(NodeDefBuilder("fused_matmul", "_FusedMatMul")
.Input({lhs.name(), 0, dtype})
.Input({rhs.name(), 0, dtype})
.Input(args)
.Attr("num_args", num_args)
.Attr("T", dtype)
.Attr("fused_ops", fused_ops)
.Attr("transpose_a", transpose_a)
.Attr("transpose_b", transpose_b)
.Finalize(&fused_matmul));
absl::Status last_status;
RunAndFetch(root, fused_matmul.name(), output, allow_gpu_device,
&fused_matmul, &last_status);
std::string what = "No algorithm worked!";
bool skip = absl::StrContains(last_status.message(), what);
if (test_skipped != nullptr) {
*test_skipped = skip;
}
if (skip) {
GTEST_SKIP() << what;
} else {
TF_ASSERT_OK(last_status);
}
}
void VerifyBiasAddTensorsNear(int m, int k, int n, bool transpose_a,
bool transpose_b,
const BiasAddGraphRunner& run_default,
const BiasAddGraphRunner& run_fused) {
DataType dtype = DataTypeToEnum<T>::v();
Tensor lhs(dtype, {transpose_a ? k : m, transpose_a ? m : k});
lhs.flat<T>() = lhs.flat<T>().setRandom();
Tensor rhs(dtype, {transpose_b ? n : k, transpose_b ? k : n});
rhs.flat<T>() = rhs.flat<T>().setRandom();
rhs.flat<T>() -= rhs.flat<T>().constant(static_cast<T>(0.5f));
const int bias_size = n;
Tensor bias(dtype, {bias_size});
bias.flat<T>() = bias.flat<T>().setRandom();
bias.flat<T>() += bias.flat<T>().constant(static_cast<T>(0.5f));
Tensor matmul;
Tensor fused_matmul;
run_default(lhs, rhs, bias, &matmul);
bool skipped = run_fused(lhs, rhs, bias, &fused_matmul);
if (!skipped) {
ASSERT_EQ(matmul.dtype(), fused_matmul.dtype());
ASSERT_EQ(matmul.shape(), fused_matmul.shape());
double atol = this->kTValueType == DT_HALF ? 1e-3 : 1e-5;
double rtol = this->kTValueType == DT_HALF ? 1e-3 : -1.0;
test::ExpectClose(matmul, fused_matmul, atol, rtol);
}
}
void VerifyMatMulWithBias(int m, int k, int n, bool transpose_a,
bool transpose_b) {
VLOG(2) << "=== VerifyMatMulWithBias (" << m << ", " << k << ", " << n
<< ", " << (int)transpose_a << ", " << (int)transpose_b << ") ===";
const BiasAddGraphRunner run_default =
[&](const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, Tensor* out) {
RunMatMulWithBias(input_data, filter_data, bias_data, transpose_a,
transpose_b, out, true);
return false;
};
const BiasAddGraphRunner run_fused =
[&](const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, Tensor* out) {
bool skipped = false;
RunFusedMatMulOp(input_data, filter_data, {bias_data}, {"BiasAdd"},
transpose_a, transpose_b, out,
true, &skipped);
return skipped;
};
VerifyBiasAddTensorsNear(m, k, n, transpose_a, transpose_b, run_default,
run_fused);
}
void VerifyConv2DWithBiasAndActivation(int m, int k, int n, bool transpose_a,
bool transpose_b,
const string& activation) {
bool use_gpu_device =
activation == "Relu" || (this->kTValueType == DT_HALF);
const BiasAddGraphRunner run_default =
[&](const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, Tensor* out) {
RunMatMulWithBiasAndActivation(input_data, filter_data, bias_data,
transpose_a, transpose_b, activation,
out, use_gpu_device);
return false;
};
const BiasAddGraphRunner run_fused =
[&](const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, Tensor* out) {
bool skipped = false;
RunFusedMatMulOp(input_data, filter_data, {bias_data},
{"BiasAdd", activation}, transpose_a, transpose_b,
out, use_gpu_device, &skipped);
return skipped;
};
VerifyBiasAddTensorsNear(m, k, n, transpose_a, transpose_b, run_default,
run_fused);
}
};
template <typename T>
class FusedMatMulWithBiasOpTest : public FusedMatMulOpTest<T> {};
TYPED_TEST_SUITE_P(FusedMatMulWithBiasOpTest);
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul256x128x64) {
this->VerifyMatMulWithBias(256, 128, 64, false, false);
this->VerifyMatMulWithBias(256, 128, 64, true, false);
this->VerifyMatMulWithBias(256, 128, 64, false, true);
this->VerifyMatMulWithBias(256, 128, 64, true, true);
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul1x256x256) {
this->VerifyMatMulWithBias(1, 256, 256, false, false);
this->VerifyMatMulWithBias(1, 256, 256, true, false);
this->VerifyMatMulWithBias(1, 256, 256, false, true);
this->VerifyMatMulWithBias(1, 256, 256, true, true);
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul256x256x1) {
this->VerifyMatMulWithBias(256, 256, 1, false, false);
this->VerifyMatMulWithBias(256, 256, 1, true, false);
this->VerifyMatMulWithBias(256, 256, 1, false, true);
this->VerifyMatMulWithBias(256, 256, 1, true, true);
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul1x256x1) {
this->VerifyMatMulWithBias(1, 256, 1, false, false);
}
static auto GetActivations(DataType dtype) {
switch (dtype) {
case DT_HALF:
return std::vector{ "Tanh", "Sigmoid"};
default:
return std::vector{"Relu", "Relu6", "Elu", "LeakyRelu"};
}
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul256x128x64WithActivation) {
for (const string& activation : GetActivations(this->kTValueType)) {
this->VerifyConv2DWithBiasAndActivation(256, 128, 64, false, false,
activation);
this->VerifyConv2DWithBiasAndActivation(256, 128, 64, true, false,
activation);
this->VerifyConv2DWithBiasAndActivation(256, 128, 64, false, true,
activation);
this->VerifyConv2DWithBiasAndActivation(256, 128, 64, true, true,
activation);
}
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul1x256x256WithActivation) {
for (const string& activation : GetActivations(this->kTValueType)) {
this->VerifyConv2DWithBiasAndActivation(1, 256, 256, false, false,
activation);
}
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul256x256x1WithActivation) {
for (const string& activation : GetActivations(this->kTValueType)) {
this->VerifyConv2DWithBiasAndActivation(256, 256, 1, false, false,
activation);
}
}
TYPED_TEST_P(FusedMatMulWithBiasOpTest, MatMul1x256x1WithActivation) {
for (const string& activation : GetActivations(this->kTValueType)) {
this->VerifyConv2DWithBiasAndActivation(1, 256, 1, false, false,
activation);
}
}
REGISTER_TYPED_TEST_SUITE_P(FusedMatMulWithBiasOpTest,
MatMul256x128x64,
MatMul1x256x256,
MatMul256x256x1,
MatMul1x256x1,
MatMul256x128x64WithActivation,
MatMul1x256x256WithActivation,
MatMul256x256x1WithActivation,
MatMul1x256x1WithActivation);
using FusedBiasAddDataTypes = ::testing::Types<float, Eigen::half>;
INSTANTIATE_TYPED_TEST_SUITE_P(Test, FusedMatMulWithBiasOpTest,
FusedBiasAddDataTypes);
template <typename T>
static Graph* Matmul(int m, int k, int n, bool transpose_a, bool transpose_b,
DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor in0(type, transpose_a ? TensorShape({k, m}) : TensorShape({m, k}));
in0.flat<T>().setRandom();
Tensor in1(type, transpose_b ? TensorShape({n, k}) : TensorShape({k, n}));
in1.flat<T>().setRandom();
test::graph::Matmul(g, test::graph::Constant(g, in0),
test::graph::Constant(g, in1), transpose_a, transpose_b);
return g;
}
#define BM_MatmulDev(M, K, N, TA, TB, T, TFTYPE, DEVICE) \
static void BM_Matmul##_##M##_##K##_##N##_##TA##_##TB##_##TFTYPE##_##DEVICE( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, Matmul<T>(M, K, N, TA, TB, TFTYPE)).Run(state); \
state.SetItemsProcessed(state.iterations() * M * K * N * 2); \
} \
BENCHMARK(BM_Matmul##_##M##_##K##_##N##_##TA##_##TB##_##TFTYPE##_##DEVICE) \
->MeasureProcessCPUTime();
#ifdef GOOGLE_CUDA
#define BM_Matmul(M, K, N, TA, TB) \
BM_MatmulDev(M, K, N, TA, TB, float, DT_FLOAT, cpu); \
BM_MatmulDev(M, K, N, TA, TB, std::complex<float>, DT_COMPLEX64, cpu); \
BM_MatmulDev(M, K, N, TA, TB, float, DT_FLOAT, gpu); \
BM_MatmulDev(M, K, N, TA, TB, std::complex<float>, DT_COMPLEX64, gpu); \
\
#else
#define BM_Matmul(M, K, N, TA, TB) \
BM_MatmulDev(M, K, N, TA, TB, float, DT_FLOAT, cpu); \
BM_MatmulDev(M, K, N, TA, TB, std::complex<float>, DT_COMPLEX64, cpu);
#endif
BM_Matmul(1, 512, 512, false, false);
BM_Matmul(8, 512, 512, false, false);
BM_Matmul(16, 512, 512, false, false);
BM_Matmul(128, 512, 512, false, false);
BM_Matmul(1, 1024, 1024, false, false);
BM_Matmul(8, 1024, 1024, false, false);
BM_Matmul(16, 1024, 1024, false, false);
BM_Matmul(128, 1024, 1024, false, false);
BM_Matmul(4096, 4096, 4096, false, false);
BM_Matmul(1, 1024, 1024, false, true);
BM_Matmul(8, 1024, 1024, false, true);
BM_Matmul(16, 1024, 1024, false, true);
BM_Matmul(128, 1024, 1024, false, true);
BM_Matmul(1, 200, 10000, false, false);
BM_Matmul(8, 200, 10000, false, false);
BM_Matmul(20, 200, 10000, false, false);
BM_Matmul(20, 200, 20000, false, false);
BM_Matmul(1, 10000, 200, false, true);
BM_Matmul(1, 10000, 200, false, false);
BM_Matmul(8, 10000, 200, false, true);
BM_Matmul(20, 10000, 200, false, true);
BM_Matmul(20, 20000, 200, false, true);
BM_Matmul(50, 50, 1, false, false);
BM_Matmul(50, 50, 1, true, false);
BM_Matmul(50, 50, 1, false, true);
BM_Matmul(50, 50, 1, true, true);
BM_Matmul(500, 500, 1, false, false);
BM_Matmul(500, 500, 1, true, false);
BM_Matmul(500, 500, 1, false, true);
BM_Matmul(500, 500, 1, true, true);
BM_Matmul(2000, 2000, 1, false, false);
BM_Matmul(2000, 2000, 1, true, false);
BM_Matmul(2000, 2000, 1, false, true);
BM_Matmul(2000, 2000, 1, true, true);
BM_Matmul(1, 50, 50, false, false);
BM_Matmul(1, 50, 50, true, false);
BM_Matmul(1, 50, 50, false, true);
BM_Matmul(1, 50, 50, true, true);
BM_Matmul(1, 500, 500, false, false);
BM_Matmul(1, 500, 500, true, false);
BM_Matmul(1, 500, 500, false, true);
BM_Matmul(1, 500, 500, true, true);
BM_Matmul(1, 2000, 2000, false, false);
BM_Matmul(1, 2000, 2000, true, false);
BM_Matmul(1, 2000, 2000, false, true);
BM_Matmul(1, 2000, 2000, true, true);
BM_Matmul(50, 1, 50, false, false);
BM_Matmul(50, 1, 50, true, false);
BM_Matmul(50, 1, 50, false, true);
BM_Matmul(50, 1, 50, true, true);
BM_Matmul(500, 1, 500, false, false);
BM_Matmul(500, 1, 500, true, false);
BM_Matmul(500, 1, 500, false, true);
BM_Matmul(500, 1, 500, true, true);
BM_Matmul(2000, 1, 2000, false, false);
BM_Matmul(2000, 1, 2000, true, false);
BM_Matmul(2000, 1, 2000, false, true);
BM_Matmul(2000, 1, 2000, true, true);
Node* BroadcastTo(Graph* g, Node* input, Node* shape) {
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "BroadcastTo")
.Input(input)
.Input(shape)
.Finalize(g, &ret));
return ret;
}
Node* BatchMatmulV2(Graph* g, Node* in0, Node* in1, bool adj_x, bool adj_y) {
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "BatchMatMulV2")
.Input(in0)
.Input(in1)
.Attr("adj_x", adj_x)
.Attr("adj_y", adj_y)
.Finalize(g, &ret));
return ret;
}
template <typename T>
static Graph* BatchMatmul(int b, int m, int k, int n, bool adjoint_a,
bool adjoint_b, DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor in0(type, adjoint_a ? TensorShape({b, k, m}) : TensorShape({b, m, k}));
in0.flat<T>().setRandom();
Tensor in1(type, adjoint_b ? TensorShape({b, n, k}) : TensorShape({b, k, n}));
in1.flat<T>().setRandom();
test::graph::BatchMatmul(g, test::graph::Constant(g, in0),
test::graph::Constant(g, in1), adjoint_a, adjoint_b);
return g;
}
template <typename T>
static Graph* BatchMatmulWithBroadcast(int b0, int b1, int m, int k, int n,
bool manual_broadcast, DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor in0(type, TensorShape({b0, m, k}));
in0.flat<T>().setRandom();
Tensor in1(type, TensorShape({b1, k, n}));
in1.flat<T>().setRandom();
Tensor broadcasted_in0_shape(DT_INT64, TensorShape({3}));
Tensor broadcasted_in1_shape(DT_INT64, TensorShape({3}));
Node* in0_node = nullptr;
Node* in1_node = nullptr;
if (manual_broadcast) {
for (int i = 0; i < 3; ++i) {
auto vec0 = broadcasted_in0_shape.vec<int64_t>();
auto vec1 = broadcasted_in1_shape.vec<int64_t>();
vec0(i) = (i == 0 ? std::max(b0, b1) : in0.shape().dim_size(i));
vec1(i) = (i == 0 ? std::max(b0, b1) : in1.shape().dim_size(i));
}
in0_node = BroadcastTo(g, test::graph::Constant(g, in0),
test::graph::Constant(g, broadcasted_in0_shape));
in1_node = BroadcastTo(g, test::graph::Constant(g, in1),
test::graph::Constant(g, broadcasted_in1_shape));
} else {
in0_node = test::graph::Constant(g, in0);
in1_node = test::graph::Constant(g, in1);
}
BatchMatmulV2(g, in0_node, in1_node, false, false);
return g;
}
#define BM_BatchMatmulDev(B, M, K, N, TA, TB, T, TFTYPE, DEVICE) \
static void \
BM_BatchMatmul##_##B##_##M##_##K##_##N##_##TA##_##TB##_##TFTYPE##_##DEVICE( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, BatchMatmul<T>(B, M, K, N, TA, TB, TFTYPE), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * B * M * K * N * 2); \
} \
BENCHMARK( \
BM_BatchMatmul##_##B##_##M##_##K##_##N##_##TA##_##TB##_##TFTYPE##_##DEVICE) \
->MeasureProcessCPUTime();
#define BM_BatchMatmul(B, M, K, N, TA, TB) \
BM_BatchMatmulDev(B, M, K, N, TA, TB, float, DT_FLOAT, cpu);
#define BM_BatchMatmulBCastDev(B1, B2, M, K, N, MB, T, TT, D) \
static void \
BM_BatchMatmulBCast##_##B1##_##B2##_##M##_##K##_##N##_##MB##_##TT##_##D( \
::testing::benchmark::State& state) { \
test::Benchmark(#D, BatchMatmulWithBroadcast<T>(B1, B2, M, K, N, MB, TT), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * std::max(B1, B2) * M * K * \
N * 2); \
} \
BENCHMARK( \
BM_BatchMatmulBCast##_##B1##_##B2##_##M##_##K##_##N##_##MB##_##TT##_##D) \
->MeasureProcessCPUTime();
#define BM_BatchMatmulBCast(B1, B2, M, K, N, MB) \
BM_BatchMatmulBCastDev(B1, B2, M, K, N, MB, float, DT_FLOAT, cpu);
BM_BatchMatmulBCast(1, 128, 1, 1024, 1024, true);
BM_BatchMatmulBCast(1, 128, 1, 1024, 1024, false);
BM_BatchMatmulBCast(128, 1, 1, 1024, 1024, true);
BM_BatchMatmulBCast(128, 1, 1, 1024, 1024, false);
BM_BatchMatmulBCast(1, 128, 128, 1024, 1024, true);
BM_BatchMatmulBCast(1, 128, 128, 1024, 1024, false);
BM_BatchMatmulBCast(128, 1, 128, 1024, 1024, true);
BM_BatchMatmulBCast(128, 1, 128, 1024, 1024, false);
BM_BatchMatmulBCast(1, 128, 512, 512, 512, true);
BM_BatchMatmulBCast(1, 128, 512, 512, 512, false);
BM_BatchMatmulBCast(128, 1, 512, 512, 512, true);
BM_BatchMatmulBCast(128, 1, 512, 512, 512, false);
BM_BatchMatmulBCast(1, 128, 1024, 1024, 1024, true);
BM_BatchMatmulBCast(1, 128, 1024, 1024, 1024, false);
BM_BatchMatmulBCast(128, 1, 1024, 1024, 1024, true);
BM_BatchMatmulBCast(128, 1, 1024, 1024, 1024, false);
BM_BatchMatmulBCast(1, 128, 10000, 200, 1, true);
BM_BatchMatmulBCast(1, 128, 10000, 200, 1, false);
BM_BatchMatmulBCast(128, 1, 10000, 200, 1, true);
BM_BatchMatmulBCast(128, 1, 10000, 200, 1, false);
BM_BatchMatmulBCast(1, 128, 1, 200, 10000, true);
BM_BatchMatmulBCast(1, 128, 1, 200, 10000, false);
BM_BatchMatmulBCast(128, 1, 1, 200, 10000, true);
BM_BatchMatmulBCast(128, 1, 1, 200, 10000, false);
BM_BatchMatmul(1, 1, 1024, 1024, false, false);
BM_BatchMatmul(1, 8, 1024, 1024, false, false);
BM_BatchMatmul(1, 16, 1024, 1024, false, false);
BM_BatchMatmul(1, 128, 1024, 1024, false, false);
BM_BatchMatmul(2, 1, 1024, 1024, false, false);
BM_BatchMatmul(2, 8, 1024, 1024, false, false);
BM_BatchMatmul(2, 16, 1024, 1024, false, false);
BM_BatchMatmul(2, 128, 1024, 1024, false, false);
BM_BatchMatmul(8, 1, 1024, 1024, false, false);
BM_BatchMatmul(8, 8, 1024, 1024, false, false);
BM_BatchMatmul(8, 16, 1024, 1024, false, false);
BM_BatchMatmul(8, 128, 1024, 1024, false, false);
BM_BatchMatmul(32, 1, 1024, 1024, false, false);
BM_BatchMatmul(32, 8, 1024, 1024, false, false);
BM_BatchMatmul(32, 16, 1024, 1024, false, false);
BM_BatchMatmul(32, 128, 1024, 1024, false, false);
BM_BatchMatmul(1, 32, 32, 32, false, false);
BM_BatchMatmul(1, 128, 128, 128, false, false);
BM_BatchMatmul(1, 256, 256, 256, false, false);
BM_BatchMatmul(1, 1024, 1024, 1024, false, false);
BM_BatchMatmul(1, 2048, 2048, 2048, false, false);
BM_BatchMatmul(2, 32, 32, 32, false, false);
BM_BatchMatmul(2, 128, 128, 128, false, false);
BM_BatchMatmul(2, 256, 256, 256, false, false);
BM_BatchMatmul(2, 1024, 1024, 1024, false, false);
BM_BatchMatmul(2, 2048, 2048, 2048, false, false);
BM_BatchMatmul(4, 32, 32, 32, false, false);
BM_BatchMatmul(4, 128, 128, 128, false, false);
BM_BatchMatmul(4, 256, 256, 256, false, false);
BM_BatchMatmul(4, 1024, 1024, 1024, false, false);
BM_BatchMatmul(4, 2048, 2048, 2048, false, false);
BM_BatchMatmul(8, 32, 32, 32, false, false);
BM_BatchMatmul(8, 128, 128, 128, false, false);
BM_BatchMatmul(8, 256, 256, 256, false, false);
BM_BatchMatmul(8, 1024, 1024, 1024, false, false);
BM_BatchMatmul(8, 2048, 2048, 2048, false, false);
BM_BatchMatmul(32, 32, 32, 32, false, false);
BM_BatchMatmul(32, 128, 128, 128, false, false);
BM_BatchMatmul(32, 256, 256, 256, false, false);
BM_BatchMatmul(32, 1024, 1024, 1024, false, false);
BM_BatchMatmul(32, 2048, 2048, 2048, false, false);
BM_BatchMatmul(1, 10000, 200, 1, false, false);
BM_BatchMatmul(8, 10000, 200, 1, false, false);
BM_BatchMatmul(32, 10000, 200, 1, false, false);
BM_BatchMatmul(1, 10000, 200, 1, true, false);
BM_BatchMatmul(8, 10000, 200, 1, true, false);
BM_BatchMatmul(32, 10000, 200, 1, true, false);
BM_BatchMatmul(1, 10000, 200, 1, false, true);
BM_BatchMatmul(8, 10000, 200, 1, false, true);
BM_BatchMatmul(32, 10000, 200, 1, false, true);
BM_BatchMatmul(1, 10000, 200, 1, true, true);
BM_BatchMatmul(8, 10000, 200, 1, true, true);
BM_BatchMatmul(32, 10000, 200, 1, true, true);
BM_BatchMatmul(1, 1, 200, 10000, false, false);
BM_BatchMatmul(8, 1, 200, 10000, false, false);
BM_BatchMatmul(32, 1, 200, 10000, false, false);
BM_BatchMatmul(1, 1, 200, 10000, true, false);
BM_BatchMatmul(8, 1, 200, 10000, true, false);
BM_BatchMatmul(32, 1, 200, 10000, true, false);
BM_BatchMatmul(1, 1, 200, 10000, false, true);
BM_BatchMatmul(8, 1, 200, 10000, false, true);
BM_BatchMatmul(32, 1, 200, 10000, false, true);
BM_BatchMatmul(1, 1, 200, 10000, true, true);
BM_BatchMatmul(8, 1, 200, 10000, true, true);
BM_BatchMatmul(32, 1, 200, 10000, true, true);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/matmul_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/matmul_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8adb0ed0-9542-4a16-afc5-37b3e581de81 | cpp | tensorflow/tensorflow | pjrt_compiler | third_party/xla/xla/python/pjrt_ifrt/pjrt_compiler.cc | third_party/xla/xla/pjrt/pjrt_compiler_test.cc | #include "xla/python/pjrt_ifrt/pjrt_compiler.h"
#include <memory>
#include <optional>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/python/ifrt/compiler.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/hlo/hlo_program.h"
#include "xla/python/ifrt/program.h"
#include "xla/python/ifrt/topology.h"
#include "xla/python/pjrt_ifrt/pjrt_client.h"
#include "xla/python/pjrt_ifrt/pjrt_executable.h"
#include "xla/python/pjrt_ifrt/pjrt_topology.h"
#include "xla/python/pjrt_ifrt/xla_compiler.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
char PjRtCompiler::ID = 0;
absl::StatusOr<std::unique_ptr<LoadedExecutable>> PjRtCompiler::Compile(
std::unique_ptr<Program> program, std::unique_ptr<CompileOptions> options) {
DCHECK(this);
const auto* xla_program = llvm::dyn_cast<HloProgram>(program.get());
if (xla_program == nullptr) {
return absl::InvalidArgumentError("PjRtCompiler requires an HloProgram");
}
TF_ASSIGN_OR_RETURN(auto xla_compile_options,
GetXlaCompileOptions(std::move(options)));
return PjRtLoadedExecutable::Create(
client_, xla_program->mlir_module,
std::move(xla_compile_options->compile_options),
std::move(xla_compile_options->loaded_host_callbacks));
}
absl::StatusOr<std::unique_ptr<Executable>> PjRtCompiler::Compile(
std::unique_ptr<Program> program, const Topology& topology,
std::unique_ptr<CompileOptions> options) {
DCHECK(this);
const auto* xla_program = llvm::dyn_cast<HloProgram>(program.get());
if (xla_program == nullptr) {
return absl::InvalidArgumentError("PjRtCompiler requires an HloProgram");
}
TF_ASSIGN_OR_RETURN(auto xla_compile_options,
GetXlaCompileOptions(std::move(options)));
const auto* pjrt_topology = llvm::dyn_cast<PjRtTopology>(&topology);
if (pjrt_topology == nullptr) {
return absl::InvalidArgumentError("PjRtCompiler requires a PjRtTopology");
}
TF_ASSIGN_OR_RETURN(
auto executable,
PjRtCompile(xla_compile_options->compile_options,
xla_program->mlir_module, *pjrt_topology->description()));
return PjRtExecutable::Create(std::move(executable),
std::move(xla_compile_options));
}
absl::StatusOr<std::unique_ptr<LoadedExecutable>>
PjRtCompiler::DeserializeLoadedExecutable(
absl::string_view serialized,
std::unique_ptr<DeserializeExecutableOptions> options) {
DCHECK(this);
TF_ASSIGN_OR_RETURN(auto xla_deserialize_options,
GetXlaDeserializeExecutableOptions(std::move(options)));
TF_ASSIGN_OR_RETURN(
auto pjrt_loaded_executable,
client_->pjrt_client()->DeserializeExecutable(
serialized, std::move(xla_deserialize_options->compile_options)));
return PjRtLoadedExecutable::Create(
client_,
std::shared_ptr<xla::PjRtLoadedExecutable>(
std::move(pjrt_loaded_executable)),
std::move(xla_deserialize_options->loaded_host_callbacks));
}
}
} | #include "xla/pjrt/pjrt_compiler.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/client/xla_computation.h"
#include "xla/pjrt/metrics.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_device_description.h"
#include "xla/tsl/lib/monitoring/cell_reader.h"
#include "tsl/platform/status_matchers.h"
namespace xla {
using metrics::kPjrtCompilerCompileComputationMetricName;
using metrics::kPjrtCompilerCompileModuleMetricName;
using ::tsl::monitoring::testing::CellReader;
using ::tsl::testing::StatusIs;
namespace {
class PjRtTestTopology : public PjRtTopologyDescription {
public:
PjRtPlatformId platform_id() const override { return 0; }
absl::string_view platform_name() const override { return "not_registered"; }
absl::string_view platform_version() const override { return "test"; }
std::vector<std::unique_ptr<const PjRtDeviceDescription>> DeviceDescriptions()
const override {
LOG(FATAL) << "Unused";
}
absl::StatusOr<std::string> Serialize() const override { return "test_topo"; }
const absl::flat_hash_map<std::string, PjRtDeviceAttribute>& Attributes()
const override {
LOG(FATAL) << "Unused";
}
absl::StatusOr<Layout> GetDefaultLayout(
PrimitiveType element_type,
absl::Span<const int64_t> dims) const override {
return Unimplemented("TestTopology does not support GetDefaultLayout");
}
};
TEST(PjRtCompilerTest, CompilerNotRegistered) {
PjRtTestTopology topology;
CompileOptions options;
XlaComputation computation;
auto res = PjRtCompile(options, computation, topology);
EXPECT_TRUE(tsl::errors::IsNotFound(res.status()));
}
TEST(PjRtCompilerTest, CompilerRegistered) {
class PjRtTestTopology : public PjRtTopologyDescription {
public:
PjRtPlatformId platform_id() const override { return 0; }
absl::string_view platform_name() const override { return "registered"; }
absl::string_view platform_version() const override { return "test"; }
std::vector<std::unique_ptr<const PjRtDeviceDescription>>
DeviceDescriptions() const override {
LOG(FATAL) << "Unused";
}
absl::StatusOr<std::string> Serialize() const override {
return "test_topo";
}
const absl::flat_hash_map<std::string, PjRtDeviceAttribute>& Attributes()
const override {
LOG(FATAL) << "Unused";
}
absl::StatusOr<Layout> GetDefaultLayout(
PrimitiveType element_type,
absl::Span<const int64_t> dims) const override {
return Unimplemented("TestTopology does not support GetDefaultLayout");
}
};
PjRtTestTopology topology;
class PjRtTestCompiler : public PjRtCompiler {
public:
absl::StatusOr<std::unique_ptr<PjRtExecutable>> Compile(
CompileOptions options, const XlaComputation& computation,
const PjRtTopologyDescription& topology, PjRtClient* client) override {
return tsl::errors::Unimplemented("test compiler!");
}
absl::StatusOr<std::unique_ptr<PjRtExecutable>> Compile(
CompileOptions options, mlir::ModuleOp module,
const PjRtTopologyDescription& topology, PjRtClient* client) override {
return tsl::errors::Unimplemented("test compiler!");
}
};
std::unique_ptr<PjRtCompiler> compiler = std::make_unique<PjRtTestCompiler>();
PjRtRegisterCompiler(topology.platform_name(), std::move(compiler));
CompileOptions options;
XlaComputation computation;
auto res = PjRtCompile(options, computation, topology);
EXPECT_TRUE(tsl::errors::IsUnimplemented(res.status()));
}
TEST(PjRtCompilerTest, PjrtCompileComputationMetric) {
PjRtTestTopology topology;
xla::CompileOptions compile_options;
XlaComputation xla_computation;
CellReader<bool> metric_reader(
std::string{kPjrtCompilerCompileComputationMetricName});
EXPECT_THAT(PjRtCompile(compile_options, xla_computation, topology,
nullptr),
StatusIs(tensorflow::error::NOT_FOUND));
EXPECT_FALSE(metric_reader.Read());
}
TEST(PjRtCompilerTest, PjrtCompileModuleMetric) {
PjRtTestTopology topology;
xla::CompileOptions compile_options;
mlir::ModuleOp module;
CellReader<bool> metric_reader(
std::string{kPjrtCompilerCompileModuleMetricName});
EXPECT_THAT(PjRtCompile(compile_options, module, topology,
nullptr),
StatusIs(tensorflow::error::NOT_FOUND));
EXPECT_FALSE(metric_reader.Read());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/pjrt_compiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
22374c95-74ad-4522-a47e-9eb6cff3e0a9 | cpp | abseil/abseil-cpp | mock_distributions | absl/random/mock_distributions.h | absl/random/mock_distributions_test.cc | #ifndef ABSL_RANDOM_MOCK_DISTRIBUTIONS_H_
#define ABSL_RANDOM_MOCK_DISTRIBUTIONS_H_
#include "absl/base/config.h"
#include "absl/random/bernoulli_distribution.h"
#include "absl/random/beta_distribution.h"
#include "absl/random/distributions.h"
#include "absl/random/exponential_distribution.h"
#include "absl/random/gaussian_distribution.h"
#include "absl/random/internal/mock_overload_set.h"
#include "absl/random/internal/mock_validators.h"
#include "absl/random/log_uniform_int_distribution.h"
#include "absl/random/mocking_bit_gen.h"
#include "absl/random/poisson_distribution.h"
#include "absl/random/zipf_distribution.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
template <typename R>
using MockUniform = random_internal::MockOverloadSetWithValidator<
random_internal::UniformDistributionWrapper<R>,
random_internal::UniformDistributionValidator<R>,
R(IntervalClosedOpenTag, MockingBitGen&, R, R),
R(IntervalClosedClosedTag, MockingBitGen&, R, R),
R(IntervalOpenOpenTag, MockingBitGen&, R, R),
R(IntervalOpenClosedTag, MockingBitGen&, R, R), R(MockingBitGen&, R, R),
R(MockingBitGen&)>;
using MockBernoulli =
random_internal::MockOverloadSet<absl::bernoulli_distribution,
bool(MockingBitGen&, double)>;
template <typename RealType>
using MockBeta =
random_internal::MockOverloadSet<absl::beta_distribution<RealType>,
RealType(MockingBitGen&, RealType,
RealType)>;
template <typename RealType>
using MockExponential =
random_internal::MockOverloadSet<absl::exponential_distribution<RealType>,
RealType(MockingBitGen&, RealType)>;
template <typename RealType>
using MockGaussian =
random_internal::MockOverloadSet<absl::gaussian_distribution<RealType>,
RealType(MockingBitGen&, RealType,
RealType)>;
template <typename IntType>
using MockLogUniform = random_internal::MockOverloadSet<
absl::log_uniform_int_distribution<IntType>,
IntType(MockingBitGen&, IntType, IntType, IntType)>;
template <typename IntType>
using MockPoisson =
random_internal::MockOverloadSet<absl::poisson_distribution<IntType>,
IntType(MockingBitGen&, double)>;
template <typename IntType>
using MockZipf =
random_internal::MockOverloadSet<absl::zipf_distribution<IntType>,
IntType(MockingBitGen&, IntType, double,
double)>;
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/mock_distributions.h"
#include <cmath>
#include <limits>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/numeric/int128.h"
#include "absl/random/distributions.h"
#include "absl/random/mocking_bit_gen.h"
#include "absl/random/random.h"
namespace {
using ::testing::Return;
TEST(MockDistributions, Examples) {
absl::MockingBitGen gen;
EXPECT_NE(absl::Uniform<int>(gen, 1, 1000000), 20);
EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 1, 1000000))
.WillOnce(Return(20));
EXPECT_EQ(absl::Uniform<int>(gen, 1, 1000000), 20);
EXPECT_NE(absl::Uniform<double>(gen, 0.0, 100.0), 5.0);
EXPECT_CALL(absl::MockUniform<double>(), Call(gen, 0.0, 100.0))
.WillOnce(Return(5.0));
EXPECT_EQ(absl::Uniform<double>(gen, 0.0, 100.0), 5.0);
EXPECT_NE(absl::Exponential<double>(gen, 1.0), 42);
EXPECT_CALL(absl::MockExponential<double>(), Call(gen, 1.0))
.WillOnce(Return(42));
EXPECT_EQ(absl::Exponential<double>(gen, 1.0), 42);
EXPECT_NE(absl::Poisson<int>(gen, 1.0), 500);
EXPECT_CALL(absl::MockPoisson<int>(), Call(gen, 1.0)).WillOnce(Return(500));
EXPECT_EQ(absl::Poisson<int>(gen, 1.0), 500);
EXPECT_NE(absl::Bernoulli(gen, 0.000001), true);
EXPECT_CALL(absl::MockBernoulli(), Call(gen, 0.000001))
.WillOnce(Return(true));
EXPECT_EQ(absl::Bernoulli(gen, 0.000001), true);
EXPECT_NE(absl::Beta<double>(gen, 3.0, 2.0), 0.567);
EXPECT_CALL(absl::MockBeta<double>(), Call(gen, 3.0, 2.0))
.WillOnce(Return(0.567));
EXPECT_EQ(absl::Beta<double>(gen, 3.0, 2.0), 0.567);
EXPECT_NE(absl::Zipf<int>(gen, 1000000, 2.0, 1.0), 1221);
EXPECT_CALL(absl::MockZipf<int>(), Call(gen, 1000000, 2.0, 1.0))
.WillOnce(Return(1221));
EXPECT_EQ(absl::Zipf<int>(gen, 1000000, 2.0, 1.0), 1221);
EXPECT_NE(absl::Gaussian<double>(gen, 0.0, 1.0), 0.001);
EXPECT_CALL(absl::MockGaussian<double>(), Call(gen, 0.0, 1.0))
.WillOnce(Return(0.001));
EXPECT_EQ(absl::Gaussian<double>(gen, 0.0, 1.0), 0.001);
EXPECT_NE(absl::LogUniform<int>(gen, 0, 1000000, 2), 2040);
EXPECT_CALL(absl::MockLogUniform<int>(), Call(gen, 0, 1000000, 2))
.WillOnce(Return(2040));
EXPECT_EQ(absl::LogUniform<int>(gen, 0, 1000000, 2), 2040);
}
TEST(MockUniform, OutOfBoundsIsAllowed) {
absl::UnvalidatedMockingBitGen gen;
EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 1, 100)).WillOnce(Return(0));
EXPECT_EQ(absl::Uniform<int>(gen, 1, 100), 0);
}
TEST(ValidatedMockDistributions, UniformUInt128Works) {
absl::MockingBitGen gen;
EXPECT_CALL(absl::MockUniform<absl::uint128>(), Call(gen))
.WillOnce(Return(absl::Uint128Max()));
EXPECT_EQ(absl::Uniform<absl::uint128>(gen), absl::Uint128Max());
}
TEST(ValidatedMockDistributions, UniformDoubleBoundaryCases) {
absl::MockingBitGen gen;
EXPECT_CALL(absl::MockUniform<double>(), Call(gen, 1.0, 10.0))
.WillOnce(Return(
std::nextafter(10.0, -std::numeric_limits<double>::infinity())));
EXPECT_EQ(absl::Uniform<double>(gen, 1.0, 10.0),
std::nextafter(10.0, -std::numeric_limits<double>::infinity()));
EXPECT_CALL(absl::MockUniform<double>(),
Call(absl::IntervalOpen, gen, 1.0, 10.0))
.WillOnce(Return(
std::nextafter(10.0, -std::numeric_limits<double>::infinity())));
EXPECT_EQ(absl::Uniform<double>(absl::IntervalOpen, gen, 1.0, 10.0),
std::nextafter(10.0, -std::numeric_limits<double>::infinity()));
EXPECT_CALL(absl::MockUniform<double>(),
Call(absl::IntervalOpen, gen, 1.0, 10.0))
.WillOnce(
Return(std::nextafter(1.0, std::numeric_limits<double>::infinity())));
EXPECT_EQ(absl::Uniform<double>(absl::IntervalOpen, gen, 1.0, 10.0),
std::nextafter(1.0, std::numeric_limits<double>::infinity()));
}
TEST(ValidatedMockDistributions, UniformDoubleEmptyRangeCases) {
absl::MockingBitGen gen;
ON_CALL(absl::MockUniform<double>(), Call(absl::IntervalOpen, gen, 1.0, 1.0))
.WillByDefault(Return(1.0));
EXPECT_EQ(absl::Uniform<double>(absl::IntervalOpen, gen, 1.0, 1.0), 1.0);
ON_CALL(absl::MockUniform<double>(),
Call(absl::IntervalOpenClosed, gen, 1.0, 1.0))
.WillByDefault(Return(1.0));
EXPECT_EQ(absl::Uniform<double>(absl::IntervalOpenClosed, gen, 1.0, 1.0),
1.0);
ON_CALL(absl::MockUniform<double>(),
Call(absl::IntervalClosedOpen, gen, 1.0, 1.0))
.WillByDefault(Return(1.0));
EXPECT_EQ(absl::Uniform<double>(absl::IntervalClosedOpen, gen, 1.0, 1.0),
1.0);
}
TEST(ValidatedMockDistributions, UniformIntEmptyRangeCases) {
absl::MockingBitGen gen;
ON_CALL(absl::MockUniform<int>(), Call(absl::IntervalOpen, gen, 1, 1))
.WillByDefault(Return(1));
EXPECT_EQ(absl::Uniform<int>(absl::IntervalOpen, gen, 1, 1), 1);
ON_CALL(absl::MockUniform<int>(), Call(absl::IntervalOpenClosed, gen, 1, 1))
.WillByDefault(Return(1));
EXPECT_EQ(absl::Uniform<int>(absl::IntervalOpenClosed, gen, 1, 1), 1);
ON_CALL(absl::MockUniform<int>(), Call(absl::IntervalClosedOpen, gen, 1, 1))
.WillByDefault(Return(1));
EXPECT_EQ(absl::Uniform<int>(absl::IntervalClosedOpen, gen, 1, 1), 1);
}
TEST(ValidatedMockUniformDeathTest, Examples) {
absl::MockingBitGen gen;
EXPECT_DEATH_IF_SUPPORTED(
{
EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 1, 100))
.WillOnce(Return(0));
absl::Uniform<int>(gen, 1, 100);
},
" 0 is not in \\[1, 100\\)");
EXPECT_DEATH_IF_SUPPORTED(
{
EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 1, 100))
.WillOnce(Return(101));
absl::Uniform<int>(gen, 1, 100);
},
" 101 is not in \\[1, 100\\)");
EXPECT_DEATH_IF_SUPPORTED(
{
EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 1, 100))
.WillOnce(Return(100));
absl::Uniform<int>(gen, 1, 100);
},
" 100 is not in \\[1, 100\\)");
EXPECT_DEATH_IF_SUPPORTED(
{
EXPECT_CALL(absl::MockUniform<int>(),
Call(absl::IntervalOpen, gen, 1, 100))
.WillOnce(Return(1));
absl::Uniform<int>(absl::IntervalOpen, gen, 1, 100);
},
" 1 is not in \\(1, 100\\)");
EXPECT_DEATH_IF_SUPPORTED(
{
EXPECT_CALL(absl::MockUniform<int>(),
Call(absl::IntervalOpen, gen, 1, 100))
.WillOnce(Return(101));
absl::Uniform<int>(absl::IntervalOpen, gen, 1, 100);
},
" 101 is not in \\(1, 100\\)");
EXPECT_DEATH_IF_SUPPORTED(
{
EXPECT_CALL(absl::MockUniform<int>(),
Call(absl::IntervalOpen, gen, 1, 100))
.WillOnce(Return(100));
absl::Uniform<int>(absl::IntervalOpen, gen, 1, 100);
},
" 100 is not in \\(1, 100\\)");
EXPECT_DEATH_IF_SUPPORTED(
{
EXPECT_CALL(absl::MockUniform<int>(),
Call(absl::IntervalOpenClosed, gen, 1, 100))
.WillOnce(Return(1));
absl::Uniform<int>(absl::IntervalOpenClosed, gen, 1, 100);
},
" 1 is not in \\(1, 100\\]");
EXPECT_DEATH_IF_SUPPORTED(
{
EXPECT_CALL(absl::MockUniform<int>(),
Call(absl::IntervalOpenClosed, gen, 1, 100))
.WillOnce(Return(101));
absl::Uniform<int>(absl::IntervalOpenClosed, gen, 1, 100);
},
" 101 is not in \\(1, 100\\]");
EXPECT_DEATH_IF_SUPPORTED(
{
EXPECT_CALL(absl::MockUniform<int>(),
Call(absl::IntervalOpenClosed, gen, 1, 100))
.WillOnce(Return(0));
absl::Uniform<int>(absl::IntervalOpenClosed, gen, 1, 100);
},
" 0 is not in \\(1, 100\\]");
EXPECT_DEATH_IF_SUPPORTED(
{
EXPECT_CALL(absl::MockUniform<int>(),
Call(absl::IntervalOpenClosed, gen, 1, 100))
.WillOnce(Return(101));
absl::Uniform<int>(absl::IntervalOpenClosed, gen, 1, 100);
},
" 101 is not in \\(1, 100\\]");
EXPECT_DEATH_IF_SUPPORTED(
{
EXPECT_CALL(absl::MockUniform<int>(),
Call(absl::IntervalClosed, gen, 1, 100))
.WillOnce(Return(0));
absl::Uniform<int>(absl::IntervalClosed, gen, 1, 100);
},
" 0 is not in \\[1, 100\\]");
EXPECT_DEATH_IF_SUPPORTED(
{
EXPECT_CALL(absl::MockUniform<int>(),
Call(absl::IntervalClosed, gen, 1, 100))
.WillOnce(Return(101));
absl::Uniform<int>(absl::IntervalClosed, gen, 1, 100);
},
" 101 is not in \\[1, 100\\]");
}
TEST(ValidatedMockUniformDeathTest, DoubleBoundaryCases) {
absl::MockingBitGen gen;
EXPECT_DEATH_IF_SUPPORTED(
{
EXPECT_CALL(absl::MockUniform<double>(), Call(gen, 1.0, 10.0))
.WillOnce(Return(10.0));
EXPECT_EQ(absl::Uniform<double>(gen, 1.0, 10.0), 10.0);
},
" 10 is not in \\[1, 10\\)");
EXPECT_DEATH_IF_SUPPORTED(
{
EXPECT_CALL(absl::MockUniform<double>(),
Call(absl::IntervalOpen, gen, 1.0, 10.0))
.WillOnce(Return(10.0));
EXPECT_EQ(absl::Uniform<double>(absl::IntervalOpen, gen, 1.0, 10.0),
10.0);
},
" 10 is not in \\(1, 10\\)");
EXPECT_DEATH_IF_SUPPORTED(
{
EXPECT_CALL(absl::MockUniform<double>(),
Call(absl::IntervalOpen, gen, 1.0, 10.0))
.WillOnce(Return(1.0));
EXPECT_EQ(absl::Uniform<double>(absl::IntervalOpen, gen, 1.0, 10.0),
1.0);
},
" 1 is not in \\(1, 10\\)");
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/mock_distributions.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/mock_distributions_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
8c6606ee-28ef-4e3b-be17-ef3a8e2c4e7f | cpp | google/cel-cpp | native_type | common/native_type.cc | common/native_type_test.cc | #include "common/native_type.h"
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <string>
#include "absl/base/casts.h"
#include "absl/strings/str_cat.h"
#ifdef CEL_INTERNAL_HAVE_RTTI
#ifdef _WIN32
extern "C" char* __unDName(char*, const char*, int, void* (*)(size_t),
void (*)(void*), int);
#else
#include <cxxabi.h>
#endif
#endif
namespace cel {
namespace {
#ifdef CEL_INTERNAL_HAVE_RTTI
struct FreeDeleter {
void operator()(char* ptr) const { std::free(ptr); }
};
#endif
}
std::string NativeTypeId::DebugString() const {
if (rep_ == nullptr) {
return std::string();
}
#ifdef CEL_INTERNAL_HAVE_RTTI
#ifdef _WIN32
std::unique_ptr<char, FreeDeleter> demangled(
__unDName(nullptr, rep_->raw_name(), 0, std::malloc, std::free, 0x2800));
if (demangled == nullptr) {
return std::string(rep_->name());
}
return std::string(demangled.get());
#else
size_t length = 0;
int status = 0;
std::unique_ptr<char, FreeDeleter> demangled(
abi::__cxa_demangle(rep_->name(), nullptr, &length, &status));
if (status != 0 || demangled == nullptr) {
return std::string(rep_->name());
}
while (length != 0 && demangled.get()[length - 1] == '\0') {
--length;
}
return std::string(demangled.get(), length);
#endif
#else
return absl::StrCat("0x", absl::Hex(absl::bit_cast<uintptr_t>(rep_)));
#endif
}
} | #include "common/native_type.h"
#include <cstring>
#include <sstream>
#include "absl/hash/hash_testing.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::SizeIs;
struct Type1 {};
struct Type2 {};
struct Type3 {};
TEST(NativeTypeId, ImplementsAbslHashCorrectly) {
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
{NativeTypeId(), NativeTypeId::For<Type1>(), NativeTypeId::For<Type2>(),
NativeTypeId::For<Type3>()}));
}
TEST(NativeTypeId, DebugString) {
std::ostringstream out;
out << NativeTypeId();
EXPECT_THAT(out.str(), IsEmpty());
out << NativeTypeId::For<Type1>();
auto string = out.str();
EXPECT_THAT(string, Not(IsEmpty()));
EXPECT_THAT(string, SizeIs(std::strlen(string.c_str())));
}
struct TestType {};
}
template <>
struct NativeTypeTraits<TestType> final {
static NativeTypeId Id(const TestType&) {
return NativeTypeId::For<TestType>();
}
};
namespace {
TEST(NativeTypeId, Of) {
EXPECT_EQ(NativeTypeId::Of(TestType()), NativeTypeId::For<TestType>());
}
struct TrivialObject {};
TEST(NativeType, SkipDestructorTrivial) {
EXPECT_TRUE(NativeType::SkipDestructor(TrivialObject{}));
}
struct NonTrivialObject {
~NonTrivialObject() {}
};
TEST(NativeType, SkipDestructorNonTrivial) {
EXPECT_FALSE(NativeType::SkipDestructor(NonTrivialObject{}));
}
struct SkippableDestructObject {
~SkippableDestructObject() {}
};
}
template <>
struct NativeTypeTraits<SkippableDestructObject> final {
static bool SkipDestructor(const SkippableDestructObject&) { return true; }
};
namespace {
TEST(NativeType, SkipDestructorTraits) {
EXPECT_TRUE(NativeType::SkipDestructor(SkippableDestructObject{}));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/native_type.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/native_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
288f78e0-d9c8-4919-8c6f-aad0fdd90c4a | cpp | abseil/abseil-cpp | randen_engine | absl/random/internal/randen_engine.h | absl/random/internal/randen_engine_test.cc | #ifndef ABSL_RANDOM_INTERNAL_RANDEN_ENGINE_H_
#define ABSL_RANDOM_INTERNAL_RANDEN_ENGINE_H_
#include <algorithm>
#include <cinttypes>
#include <cstdlib>
#include <iostream>
#include <iterator>
#include <limits>
#include <type_traits>
#include "absl/base/internal/endian.h"
#include "absl/meta/type_traits.h"
#include "absl/random/internal/iostream_state_saver.h"
#include "absl/random/internal/randen.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
template <typename T>
class alignas(8) randen_engine {
public:
using result_type = T;
static_assert(std::is_unsigned<result_type>::value,
"randen_engine template argument must be a built-in unsigned "
"integer type");
static constexpr result_type(min)() {
return (std::numeric_limits<result_type>::min)();
}
static constexpr result_type(max)() {
return (std::numeric_limits<result_type>::max)();
}
randen_engine() : randen_engine(0) {}
explicit randen_engine(result_type seed_value) { seed(seed_value); }
template <class SeedSequence,
typename = typename absl::enable_if_t<
!std::is_same<SeedSequence, randen_engine>::value>>
explicit randen_engine(SeedSequence&& seq) {
seed(seq);
}
randen_engine(const randen_engine& other)
: next_(other.next_), impl_(other.impl_) {
std::memcpy(state(), other.state(), kStateSizeT * sizeof(result_type));
}
randen_engine& operator=(const randen_engine& other) {
next_ = other.next_;
impl_ = other.impl_;
std::memcpy(state(), other.state(), kStateSizeT * sizeof(result_type));
return *this;
}
result_type operator()() {
auto* begin = state();
if (next_ >= kStateSizeT) {
next_ = kCapacityT;
impl_.Generate(begin);
}
return little_endian::ToHost(begin[next_++]);
}
template <class SeedSequence>
typename absl::enable_if_t<
!std::is_convertible<SeedSequence, result_type>::value>
seed(SeedSequence&& seq) {
seed();
reseed(seq);
}
void seed(result_type seed_value = 0) {
next_ = kStateSizeT;
auto* begin = state();
std::fill(begin, begin + kCapacityT, 0);
std::fill(begin + kCapacityT, begin + kStateSizeT, seed_value);
}
template <class SeedSequence>
void reseed(SeedSequence& seq) {
using sequence_result_type = typename SeedSequence::result_type;
static_assert(sizeof(sequence_result_type) == 4,
"SeedSequence::result_type must be 32-bit");
constexpr size_t kBufferSize =
Randen::kSeedBytes / sizeof(sequence_result_type);
alignas(16) sequence_result_type buffer[kBufferSize];
const size_t entropy_size = seq.size();
if (entropy_size < kBufferSize) {
const size_t requested_entropy = (entropy_size == 0) ? 8u : entropy_size;
std::fill(buffer + requested_entropy, buffer + kBufferSize, 0);
seq.generate(buffer, buffer + requested_entropy);
#ifdef ABSL_IS_BIG_ENDIAN
for (sequence_result_type& e : buffer) {
e = absl::little_endian::FromHost(e);
}
#endif
size_t dst = kBufferSize;
while (dst > 7) {
dst -= 4;
size_t src = dst >> 1;
std::swap(buffer[--dst], buffer[--src]);
std::swap(buffer[--dst], buffer[--src]);
std::swap(buffer[--dst], buffer[--src]);
std::swap(buffer[--dst], buffer[--src]);
}
} else {
seq.generate(buffer, buffer + kBufferSize);
}
impl_.Absorb(buffer, state());
next_ = kStateSizeT;
}
void discard(uint64_t count) {
uint64_t step = std::min<uint64_t>(kStateSizeT - next_, count);
count -= step;
constexpr uint64_t kRateT = kStateSizeT - kCapacityT;
auto* begin = state();
while (count > 0) {
next_ = kCapacityT;
impl_.Generate(*reinterpret_cast<result_type(*)[kStateSizeT]>(begin));
step = std::min<uint64_t>(kRateT, count);
count -= step;
}
next_ += step;
}
bool operator==(const randen_engine& other) const {
const auto* begin = state();
return next_ == other.next_ &&
std::equal(begin, begin + kStateSizeT, other.state());
}
bool operator!=(const randen_engine& other) const {
return !(*this == other);
}
template <class CharT, class Traits>
friend std::basic_ostream<CharT, Traits>& operator<<(
std::basic_ostream<CharT, Traits>& os,
const randen_engine<T>& engine) {
using numeric_type =
typename random_internal::stream_format_type<result_type>::type;
auto saver = random_internal::make_ostream_state_saver(os);
auto* it = engine.state();
for (auto* end = it + kStateSizeT; it < end; ++it) {
os << static_cast<numeric_type>(little_endian::FromHost(*it))
<< os.fill();
}
os << engine.next_;
return os;
}
template <class CharT, class Traits>
friend std::basic_istream<CharT, Traits>& operator>>(
std::basic_istream<CharT, Traits>& is,
randen_engine<T>& engine) {
using numeric_type =
typename random_internal::stream_format_type<result_type>::type;
result_type state[kStateSizeT];
size_t next;
for (auto& elem : state) {
numeric_type value;
is >> value;
elem = little_endian::ToHost(static_cast<result_type>(value));
}
is >> next;
if (is.fail()) {
return is;
}
std::memcpy(engine.state(), state, sizeof(state));
engine.next_ = next;
return is;
}
private:
static constexpr size_t kStateSizeT =
Randen::kStateBytes / sizeof(result_type);
static constexpr size_t kCapacityT =
Randen::kCapacityBytes / sizeof(result_type);
result_type* state() {
return reinterpret_cast<result_type*>(
(reinterpret_cast<uintptr_t>(&raw_state_) & 0xf) ? (raw_state_ + 8)
: raw_state_);
}
const result_type* state() const {
return const_cast<randen_engine*>(this)->state();
}
alignas(8) char raw_state_[Randen::kStateBytes + 8];
size_t next_;
Randen impl_;
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/internal/randen_engine.h"
#include <algorithm>
#include <bitset>
#include <random>
#include <sstream>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/log.h"
#include "absl/random/internal/explicit_seed_seq.h"
#include "absl/strings/str_cat.h"
#include "absl/time/clock.h"
#define UPDATE_GOLDEN 0
using randen_u64 = absl::random_internal::randen_engine<uint64_t>;
using randen_u32 = absl::random_internal::randen_engine<uint32_t>;
using absl::random_internal::ExplicitSeedSeq;
namespace {
template <typename UIntType>
class RandenEngineTypedTest : public ::testing::Test {};
using UIntTypes = ::testing::Types<uint8_t, uint16_t, uint32_t, uint64_t>;
TYPED_TEST_SUITE(RandenEngineTypedTest, UIntTypes);
TYPED_TEST(RandenEngineTypedTest, VerifyReseedChangesAllValues) {
using randen = typename absl::random_internal::randen_engine<TypeParam>;
using result_type = typename randen::result_type;
const size_t kNumOutputs = (sizeof(randen) * 2 / sizeof(TypeParam)) + 1;
randen engine;
{
std::seed_seq seq1{1, 2, 3, 4, 5, 6, 7};
engine.seed(seq1);
}
result_type a[kNumOutputs];
std::generate(std::begin(a), std::end(a), std::ref(engine));
{
std::random_device rd;
std::seed_seq seq2{rd(), rd(), rd()};
engine.seed(seq2);
}
result_type b[kNumOutputs];
std::generate(std::begin(b), std::end(b), std::ref(engine));
size_t changed_bits = 0;
size_t unchanged_bits = 0;
size_t total_set = 0;
size_t total_bits = 0;
size_t equal_count = 0;
for (size_t i = 0; i < kNumOutputs; ++i) {
equal_count += (a[i] == b[i]) ? 1 : 0;
std::bitset<sizeof(result_type) * 8> bitset(a[i] ^ b[i]);
changed_bits += bitset.count();
unchanged_bits += bitset.size() - bitset.count();
std::bitset<sizeof(result_type) * 8> a_set(a[i]);
std::bitset<sizeof(result_type) * 8> b_set(b[i]);
total_set += a_set.count() + b_set.count();
total_bits += 2 * 8 * sizeof(result_type);
}
EXPECT_LE(changed_bits, 0.60 * (changed_bits + unchanged_bits));
EXPECT_GE(changed_bits, 0.40 * (changed_bits + unchanged_bits));
EXPECT_NEAR(total_set, total_bits * 0.5, 4 * std::sqrt(total_bits))
<< "@" << total_set / static_cast<double>(total_bits);
const double kExpected = kNumOutputs / (1.0 * sizeof(result_type) * 8);
EXPECT_LE(equal_count, 1.0 + kExpected);
}
constexpr size_t kTwoBufferValues = sizeof(randen_u64) / sizeof(uint16_t) + 1;
TYPED_TEST(RandenEngineTypedTest, VerifyDiscard) {
using randen = typename absl::random_internal::randen_engine<TypeParam>;
for (size_t num_used = 0; num_used < kTwoBufferValues; ++num_used) {
randen engine_used;
for (size_t i = 0; i < num_used; ++i) {
engine_used();
}
for (size_t num_discard = 0; num_discard < kTwoBufferValues;
++num_discard) {
randen engine1 = engine_used;
randen engine2 = engine_used;
for (size_t i = 0; i < num_discard; ++i) {
engine1();
}
engine2.discard(num_discard);
for (size_t i = 0; i < kTwoBufferValues; ++i) {
const auto r1 = engine1();
const auto r2 = engine2();
ASSERT_EQ(r1, r2) << "used=" << num_used << " discard=" << num_discard;
}
}
}
}
TYPED_TEST(RandenEngineTypedTest, StreamOperatorsResult) {
using randen = typename absl::random_internal::randen_engine<TypeParam>;
std::wostringstream os;
std::wistringstream is;
randen engine;
EXPECT_EQ(&(os << engine), &os);
EXPECT_EQ(&(is >> engine), &is);
}
TYPED_TEST(RandenEngineTypedTest, StreamSerialization) {
using randen = typename absl::random_internal::randen_engine<TypeParam>;
for (size_t discard = 0; discard < kTwoBufferValues; ++discard) {
ExplicitSeedSeq seed_sequence{12, 34, 56};
randen engine(seed_sequence);
engine.discard(discard);
std::stringstream stream;
stream << engine;
randen new_engine;
stream >> new_engine;
for (size_t i = 0; i < 64; ++i) {
EXPECT_EQ(engine(), new_engine()) << " " << i;
}
}
}
constexpr size_t kNumGoldenOutputs = 127;
TYPED_TEST(RandenEngineTypedTest, RandomNumberEngineInterface) {
using randen = typename absl::random_internal::randen_engine<TypeParam>;
using E = randen;
using T = typename E::result_type;
static_assert(std::is_copy_constructible<E>::value,
"randen_engine must be copy constructible");
static_assert(absl::is_copy_assignable<E>::value,
"randen_engine must be copy assignable");
static_assert(std::is_move_constructible<E>::value,
"randen_engine must be move constructible");
static_assert(absl::is_move_assignable<E>::value,
"randen_engine must be move assignable");
static_assert(std::is_same<decltype(std::declval<E>()()), T>::value,
"return type of operator() must be result_type");
E e, v;
const E x, y;
T s = 1;
std::seed_seq q{1, 2, 3};
unsigned long long z = 1;
std::wostringstream os;
std::wistringstream is;
E{};
E{x};
E{s};
E{q};
e.seed();
EXPECT_TRUE(e == x);
e.seed(q);
{
E tmp(q);
EXPECT_TRUE(e == tmp);
}
e();
{
E tmp(q);
EXPECT_TRUE(e != tmp);
}
e.discard(z);
static_assert(std::is_same<decltype(x == y), bool>::value,
"return type of operator== must be bool");
static_assert(std::is_same<decltype(x != y), bool>::value,
"return type of operator== must be bool");
}
TYPED_TEST(RandenEngineTypedTest, RandenEngineSFINAETest) {
using randen = typename absl::random_internal::randen_engine<TypeParam>;
using result_type = typename randen::result_type;
{
randen engine(result_type(1));
engine.seed(result_type(1));
}
{
result_type n = 1;
randen engine(n);
engine.seed(n);
}
{
randen engine(1);
engine.seed(1);
}
{
int n = 1;
randen engine(n);
engine.seed(n);
}
{
std::seed_seq seed_seq;
randen engine(seed_seq);
engine.seed(seed_seq);
}
{
randen engine{std::seed_seq()};
engine.seed(std::seed_seq());
}
}
TEST(RandenTest, VerifyGoldenRanden64Default) {
constexpr uint64_t kGolden[kNumGoldenOutputs] = {
0xc3c14f134e433977, 0xdda9f47cd90410ee, 0x887bf3087fd8ca10,
0xf0b780f545c72912, 0x15dbb1d37696599f, 0x30ec63baff3c6d59,
0xb29f73606f7f20a6, 0x02808a316f49a54c, 0x3b8feaf9d5c8e50e,
0x9cbf605e3fd9de8a, 0xc970ae1a78183bbb, 0xd8b2ffd356301ed5,
0xf4b327fe0fc73c37, 0xcdfd8d76eb8f9a19, 0xc3a506eb91420c9d,
0xd5af05dd3eff9556, 0x48db1bb78f83c4a1, 0x7023920e0d6bfe8c,
0x58d3575834956d42, 0xed1ef4c26b87b840, 0x8eef32a23e0b2df3,
0x497cabf3431154fc, 0x4e24370570029a8b, 0xd88b5749f090e5ea,
0xc651a582a970692f, 0x78fcec2cbb6342f5, 0x463cb745612f55db,
0x352ee4ad1816afe3, 0x026ff374c101da7e, 0x811ef0821c3de851,
0x6f7e616704c4fa59, 0xa0660379992d58fc, 0x04b0a374a3b795c7,
0x915f3445685da798, 0x26802a8ac76571ce, 0x4663352533ce1882,
0xb9fdefb4a24dc738, 0x5588ba3a4d6e6c51, 0xa2101a42d35f1956,
0x607195a5e200f5fd, 0x7e100308f3290764, 0xe1e5e03c759c0709,
0x082572cc5da6606f, 0xcbcf585399e432f1, 0xe8a2be4f8335d8f1,
0x0904469acbfee8f2, 0xf08bd31b6daecd51, 0x08e8a1f1a69da69a,
0x6542a20aad57bff5, 0x2e9705bb053d6b46, 0xda2fc9db0713c391,
0x78e3a810213b6ffb, 0xdc16a59cdd85f8a6, 0xc0932718cd55781f,
0xb9bfb29c2b20bfe5, 0xb97289c1be0f2f9c, 0xc0a2a0e403a892d4,
0x5524bb834771435b, 0x8265da3d39d1a750, 0xff4af3ab8d1b78c5,
0xf0ec5f424bcad77f, 0x66e455f627495189, 0xc82d3120b57e3270,
0x3424e47dc22596e3, 0xbc0c95129ccedcdd, 0xc191c595afc4dcbf,
0x120392bd2bb70939, 0x7f90650ea6cd6ab4, 0x7287491832695ad3,
0xa7c8fac5a7917eb0, 0xd088cb9418be0361, 0x7c1bf9839c7c1ce5,
0xe2e991fa58e1e79e, 0x78565cdefd28c4ad, 0x7351b9fef98bafad,
0x2a9eac28b08c96bf, 0x6c4f179696cb2225, 0x13a685861bab87e0,
0x64c6de5aa0501971, 0x30537425cac70991, 0x01590d9dc6c532b7,
0x7e05e3aa8ec720dc, 0x74a07d9c54e3e63f, 0x738184388f3bc1d2,
0x26ffdc5067be3acb, 0x6bcdf185561f255f, 0xa0eaf2e1cf99b1c6,
0x171df81934f68604, 0x7ea5a21665683e5a, 0x5d1cb02075ba1cea,
0x957f38cbd2123fdf, 0xba6364eff80de02f, 0x606e0a0e41d452ee,
0x892d8317de82f7a2, 0xe707b1db50f7b43e, 0x4eb28826766fcf5b,
0x5a362d56e80a0951, 0x6ee217df16527d78, 0xf6737962ba6b23dd,
0x443e63857d4076ca, 0x790d9a5f048adfeb, 0xd796b052151ee94d,
0x033ed95c12b04a03, 0x8b833ff84893da5d, 0x3d6724b1bb15eab9,
0x9877c4225061ca76, 0xd68d6810adf74fb3, 0x42e5352fe30ce989,
0x265b565a7431fde7, 0x3cdbf7e358df4b8b, 0x2922a47f6d3e8779,
0x52d2242f65b37f88, 0x5d836d6e2958d6b5, 0x29d40f00566d5e26,
0x288db0e1124b14a0, 0x6c056608b7d9c1b6, 0x0b9471bdb8f19d32,
0x8fb946504faa6c9d, 0x8943a9464540251c, 0xfd1fe27d144a09e0,
0xea6ac458da141bda, 0x8048f217633fce36, 0xfeda1384ade74d31,
0x4334b8b02ff7612f, 0xdbc8441f5227e216, 0x096d119a3605c85b,
0x2b72b31c21b7d7d0};
randen_u64 engine;
#if UPDATE_GOLDEN
(void)kGolden;
for (size_t i = 0; i < kNumGoldenOutputs; ++i) {
printf("0x%016lx, ", engine());
if (i % 3 == 2) {
printf("\n");
}
}
printf("\n\n\n");
#else
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
engine.seed();
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
#endif
}
TEST(RandenTest, VerifyGoldenRanden64Seeded) {
constexpr uint64_t kGolden[kNumGoldenOutputs] = {
0x83a9e58f94d3dcd5, 0x70bbdff3d97949fb, 0x0438481f7471c1b4,
0x34fdc58ee5fb5930, 0xceee4f2d2a937d17, 0xb5a26a68e432aea9,
0x8b64774a3fb51740, 0xd89ac1fc74249c74, 0x03910d1d23fc3fdf,
0xd38f630878aa897f, 0x0ee8f0f5615f7e44, 0x98f5a53df8279d52,
0xb403f52c25938d0e, 0x240072996ea6e838, 0xd3a791246190fa61,
0xaaedd3df7a7b4f80, 0xc6eacabe05deaf6e, 0xb7967dd8790edf4d,
0x9a0a8e67e049d279, 0x0494f606aebc23e7, 0x598dcd687bc3e0ee,
0x010ac81802d452a1, 0x6407c87160aa2842, 0x5a56e276486f93a0,
0xc887a399d46a8f02, 0x9e1e6100fe93b740, 0x12d02e330f8901f6,
0xc39ca52b47e790b7, 0xb0b0a2fa11e82e61, 0x1542d841a303806a,
0x1fe659fd7d6e9d86, 0xb8c90d80746541ac, 0x239d56a5669ddc94,
0xd40db57c8123d13c, 0x3abc2414153a0db0, 0x9bad665630cb8d61,
0x0bd1fb90ee3f4bbc, 0x8f0b4d7e079b4e42, 0xfa0fb0e0ee59e793,
0x51080b283e071100, 0x2c4b9e715081cc15, 0xbe10ed49de4941df,
0xf8eaac9d4b1b0d37, 0x4bcce4b54605e139, 0xa64722b76765dda6,
0xb9377d738ca28ab5, 0x779fad81a8ccc1af, 0x65cb3ee61ffd3ba7,
0xd74e79087862836f, 0xd05b9c584c3f25bf, 0x2ba93a4693579827,
0xd81530aff05420ce, 0xec06cea215478621, 0x4b1798a6796d65ad,
0xf142f3fb3a6f6fa6, 0x002b7bf7e237b560, 0xf47f2605ef65b4f8,
0x9804ec5517effc18, 0xaed3d7f8b7d481cd, 0x5651c24c1ce338d1,
0x3e7a38208bf0a3c6, 0x6796a7b614534aed, 0x0d0f3b848358460f,
0x0fa5fe7600b19524, 0x2b0cf38253faaedc, 0x10df9188233a9fd6,
0x3a10033880138b59, 0x5fb0b0d23948e80f, 0x9e76f7b02fbf5350,
0x0816052304b1a985, 0x30c9880db41fd218, 0x14aa399b65e20f28,
0xe1454a8cace787b4, 0x325ac971b6c6f0f5, 0x716b1aa2784f3d36,
0x3d5ce14accfd144f, 0x6c0c97710f651792, 0xbc5b0f59fb333532,
0x2a90a7d2140470bc, 0x8da269f55c1e1c8d, 0xcfc37143895792ca,
0xbe21eab1f30b238f, 0x8c47229dee4d65fd, 0x5743614ed1ed7d54,
0x351372a99e9c476e, 0x2bd5ea15e5db085f, 0x6925fde46e0af4ca,
0xed3eda2bdc1f45bd, 0xdef68c68d460fa6e, 0xe42a0de76253e2b5,
0x4e5176dcbc29c305, 0xbfd85fba9f810f6e, 0x76a5a2a9beb815c6,
0x01edc4ddceaf414c, 0xa4e98904b4bb3b4b, 0x00bd63ac7d2f1ddd,
0xb8491fe6e998ddbb, 0xb386a3463dda6800, 0x0081887688871619,
0x33d394b3344e9a38, 0x815dba65a3a8baf9, 0x4232f6ec02c2fd1a,
0xb5cff603edd20834, 0x580189243f687663, 0xa8d5a2cbdc27fe99,
0x725d881693fa0131, 0xa2be2c13db2c7ac5, 0x7b6a9614b509fd78,
0xb6b136d71e717636, 0x660f1a71aff046ea, 0x0ba10ae346c8ec9e,
0xe66dde53e3145b41, 0x3b18288c88c26be6, 0x4d9d9d2ff02db933,
0x4167da8c70f46e8a, 0xf183beef8c6318b4, 0x4d889e1e71eeeef1,
0x7175c71ad6689b6b, 0xfb9e42beacd1b7dd, 0xc33d0e91b29b5e0d,
0xd39b83291ce47922, 0xc4d570fb8493d12e, 0x23d5a5724f424ae6,
0x5245f161876b6616, 0x38d77dbd21ab578d, 0x9c3423311f4ecbfe,
0x76fe31389bacd9d5,
};
ExplicitSeedSeq seed_sequence{12, 34, 56};
randen_u64 engine(seed_sequence);
#if UPDATE_GOLDEN
(void)kGolden;
for (size_t i = 0; i < kNumGoldenOutputs; ++i) {
printf("0x%016lx, ", engine());
if (i % 3 == 2) {
printf("\n");
}
}
printf("\n\n\n");
#else
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
engine.seed(seed_sequence);
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
#endif
}
TEST(RandenTest, VerifyGoldenRanden32Default) {
constexpr uint64_t kGolden[2 * kNumGoldenOutputs] = {
0x4e433977, 0xc3c14f13, 0xd90410ee, 0xdda9f47c, 0x7fd8ca10, 0x887bf308,
0x45c72912, 0xf0b780f5, 0x7696599f, 0x15dbb1d3, 0xff3c6d59, 0x30ec63ba,
0x6f7f20a6, 0xb29f7360, 0x6f49a54c, 0x02808a31, 0xd5c8e50e, 0x3b8feaf9,
0x3fd9de8a, 0x9cbf605e, 0x78183bbb, 0xc970ae1a, 0x56301ed5, 0xd8b2ffd3,
0x0fc73c37, 0xf4b327fe, 0xeb8f9a19, 0xcdfd8d76, 0x91420c9d, 0xc3a506eb,
0x3eff9556, 0xd5af05dd, 0x8f83c4a1, 0x48db1bb7, 0x0d6bfe8c, 0x7023920e,
0x34956d42, 0x58d35758, 0x6b87b840, 0xed1ef4c2, 0x3e0b2df3, 0x8eef32a2,
0x431154fc, 0x497cabf3, 0x70029a8b, 0x4e243705, 0xf090e5ea, 0xd88b5749,
0xa970692f, 0xc651a582, 0xbb6342f5, 0x78fcec2c, 0x612f55db, 0x463cb745,
0x1816afe3, 0x352ee4ad, 0xc101da7e, 0x026ff374, 0x1c3de851, 0x811ef082,
0x04c4fa59, 0x6f7e6167, 0x992d58fc, 0xa0660379, 0xa3b795c7, 0x04b0a374,
0x685da798, 0x915f3445, 0xc76571ce, 0x26802a8a, 0x33ce1882, 0x46633525,
0xa24dc738, 0xb9fdefb4, 0x4d6e6c51, 0x5588ba3a, 0xd35f1956, 0xa2101a42,
0xe200f5fd, 0x607195a5, 0xf3290764, 0x7e100308, 0x759c0709, 0xe1e5e03c,
0x5da6606f, 0x082572cc, 0x99e432f1, 0xcbcf5853, 0x8335d8f1, 0xe8a2be4f,
0xcbfee8f2, 0x0904469a, 0x6daecd51, 0xf08bd31b, 0xa69da69a, 0x08e8a1f1,
0xad57bff5, 0x6542a20a, 0x053d6b46, 0x2e9705bb, 0x0713c391, 0xda2fc9db,
0x213b6ffb, 0x78e3a810, 0xdd85f8a6, 0xdc16a59c, 0xcd55781f, 0xc0932718,
0x2b20bfe5, 0xb9bfb29c, 0xbe0f2f9c, 0xb97289c1, 0x03a892d4, 0xc0a2a0e4,
0x4771435b, 0x5524bb83, 0x39d1a750, 0x8265da3d, 0x8d1b78c5, 0xff4af3ab,
0x4bcad77f, 0xf0ec5f42, 0x27495189, 0x66e455f6, 0xb57e3270, 0xc82d3120,
0xc22596e3, 0x3424e47d, 0x9ccedcdd, 0xbc0c9512, 0xafc4dcbf, 0xc191c595,
0x2bb70939, 0x120392bd, 0xa6cd6ab4, 0x7f90650e, 0x32695ad3, 0x72874918,
0xa7917eb0, 0xa7c8fac5, 0x18be0361, 0xd088cb94, 0x9c7c1ce5, 0x7c1bf983,
0x58e1e79e, 0xe2e991fa, 0xfd28c4ad, 0x78565cde, 0xf98bafad, 0x7351b9fe,
0xb08c96bf, 0x2a9eac28, 0x96cb2225, 0x6c4f1796, 0x1bab87e0, 0x13a68586,
0xa0501971, 0x64c6de5a, 0xcac70991, 0x30537425, 0xc6c532b7, 0x01590d9d,
0x8ec720dc, 0x7e05e3aa, 0x54e3e63f, 0x74a07d9c, 0x8f3bc1d2, 0x73818438,
0x67be3acb, 0x26ffdc50, 0x561f255f, 0x6bcdf185, 0xcf99b1c6, 0xa0eaf2e1,
0x34f68604, 0x171df819, 0x65683e5a, 0x7ea5a216, 0x75ba1cea, 0x5d1cb020,
0xd2123fdf, 0x957f38cb, 0xf80de02f, 0xba6364ef, 0x41d452ee, 0x606e0a0e,
0xde82f7a2, 0x892d8317, 0x50f7b43e, 0xe707b1db, 0x766fcf5b, 0x4eb28826,
0xe80a0951, 0x5a362d56, 0x16527d78, 0x6ee217df, 0xba6b23dd, 0xf6737962,
0x7d4076ca, 0x443e6385, 0x048adfeb, 0x790d9a5f, 0x151ee94d, 0xd796b052,
0x12b04a03, 0x033ed95c, 0x4893da5d, 0x8b833ff8, 0xbb15eab9, 0x3d6724b1,
0x5061ca76, 0x9877c422, 0xadf74fb3, 0xd68d6810, 0xe30ce989, 0x42e5352f,
0x7431fde7, 0x265b565a, 0x58df4b8b, 0x3cdbf7e3, 0x6d3e8779, 0x2922a47f,
0x65b37f88, 0x52d2242f, 0x2958d6b5, 0x5d836d6e, 0x566d5e26, 0x29d40f00,
0x124b14a0, 0x288db0e1, 0xb7d9c1b6, 0x6c056608, 0xb8f19d32, 0x0b9471bd,
0x4faa6c9d, 0x8fb94650, 0x4540251c, 0x8943a946, 0x144a09e0, 0xfd1fe27d,
0xda141bda, 0xea6ac458, 0x633fce36, 0x8048f217, 0xade74d31, 0xfeda1384,
0x2ff7612f, 0x4334b8b0, 0x5227e216, 0xdbc8441f, 0x3605c85b, 0x096d119a,
0x21b7d7d0, 0x2b72b31c};
randen_u32 engine;
#if UPDATE_GOLDEN
(void)kGolden;
for (size_t i = 0; i < 2 * kNumGoldenOutputs; ++i) {
printf("0x%08x, ", engine());
if (i % 6 == 5) {
printf("\n");
}
}
printf("\n\n\n");
#else
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
engine.seed();
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
#endif
}
TEST(RandenTest, VerifyGoldenRanden32Seeded) {
constexpr uint64_t kGolden[2 * kNumGoldenOutputs] = {
0x94d3dcd5, 0x83a9e58f, 0xd97949fb, 0x70bbdff3, 0x7471c1b4, 0x0438481f,
0xe5fb5930, 0x34fdc58e, 0x2a937d17, 0xceee4f2d, 0xe432aea9, 0xb5a26a68,
0x3fb51740, 0x8b64774a, 0x74249c74, 0xd89ac1fc, 0x23fc3fdf, 0x03910d1d,
0x78aa897f, 0xd38f6308, 0x615f7e44, 0x0ee8f0f5, 0xf8279d52, 0x98f5a53d,
0x25938d0e, 0xb403f52c, 0x6ea6e838, 0x24007299, 0x6190fa61, 0xd3a79124,
0x7a7b4f80, 0xaaedd3df, 0x05deaf6e, 0xc6eacabe, 0x790edf4d, 0xb7967dd8,
0xe049d279, 0x9a0a8e67, 0xaebc23e7, 0x0494f606, 0x7bc3e0ee, 0x598dcd68,
0x02d452a1, 0x010ac818, 0x60aa2842, 0x6407c871, 0x486f93a0, 0x5a56e276,
0xd46a8f02, 0xc887a399, 0xfe93b740, 0x9e1e6100, 0x0f8901f6, 0x12d02e33,
0x47e790b7, 0xc39ca52b, 0x11e82e61, 0xb0b0a2fa, 0xa303806a, 0x1542d841,
0x7d6e9d86, 0x1fe659fd, 0x746541ac, 0xb8c90d80, 0x669ddc94, 0x239d56a5,
0x8123d13c, 0xd40db57c, 0x153a0db0, 0x3abc2414, 0x30cb8d61, 0x9bad6656,
0xee3f4bbc, 0x0bd1fb90, 0x079b4e42, 0x8f0b4d7e, 0xee59e793, 0xfa0fb0e0,
0x3e071100, 0x51080b28, 0x5081cc15, 0x2c4b9e71, 0xde4941df, 0xbe10ed49,
0x4b1b0d37, 0xf8eaac9d, 0x4605e139, 0x4bcce4b5, 0x6765dda6, 0xa64722b7,
0x8ca28ab5, 0xb9377d73, 0xa8ccc1af, 0x779fad81, 0x1ffd3ba7, 0x65cb3ee6,
0x7862836f, 0xd74e7908, 0x4c3f25bf, 0xd05b9c58, 0x93579827, 0x2ba93a46,
0xf05420ce, 0xd81530af, 0x15478621, 0xec06cea2, 0x796d65ad, 0x4b1798a6,
0x3a6f6fa6, 0xf142f3fb, 0xe237b560, 0x002b7bf7, 0xef65b4f8, 0xf47f2605,
0x17effc18, 0x9804ec55, 0xb7d481cd, 0xaed3d7f8, 0x1ce338d1, 0x5651c24c,
0x8bf0a3c6, 0x3e7a3820, 0x14534aed, 0x6796a7b6, 0x8358460f, 0x0d0f3b84,
0x00b19524, 0x0fa5fe76, 0x53faaedc, 0x2b0cf382, 0x233a9fd6, 0x10df9188,
0x80138b59, 0x3a100338, 0x3948e80f, 0x5fb0b0d2, 0x2fbf5350, 0x9e76f7b0,
0x04b1a985, 0x08160523, 0xb41fd218, 0x30c9880d, 0x65e20f28, 0x14aa399b,
0xace787b4, 0xe1454a8c, 0xb6c6f0f5, 0x325ac971, 0x784f3d36, 0x716b1aa2,
0xccfd144f, 0x3d5ce14a, 0x0f651792, 0x6c0c9771, 0xfb333532, 0xbc5b0f59,
0x140470bc, 0x2a90a7d2, 0x5c1e1c8d, 0x8da269f5, 0x895792ca, 0xcfc37143,
0xf30b238f, 0xbe21eab1, 0xee4d65fd, 0x8c47229d, 0xd1ed7d54, 0x5743614e,
0x9e9c476e, 0x351372a9, 0xe5db085f, 0x2bd5ea15, 0x6e0af4ca, 0x6925fde4,
0xdc1f45bd, 0xed3eda2b, 0xd460fa6e, 0xdef68c68, 0x6253e2b5, 0xe42a0de7,
0xbc29c305, 0x4e5176dc, 0x9f810f6e, 0xbfd85fba, 0xbeb815c6, 0x76a5a2a9,
0xceaf414c, 0x01edc4dd, 0xb4bb3b4b, 0xa4e98904, 0x7d2f1ddd, 0x00bd63ac,
0xe998ddbb, 0xb8491fe6, 0x3dda6800, 0xb386a346, 0x88871619, 0x00818876,
0x344e9a38, 0x33d394b3, 0xa3a8baf9, 0x815dba65, 0x02c2fd1a, 0x4232f6ec,
0xedd20834, 0xb5cff603, 0x3f687663, 0x58018924, 0xdc27fe99, 0xa8d5a2cb,
0x93fa0131, 0x725d8816, 0xdb2c7ac5, 0xa2be2c13, 0xb509fd78, 0x7b6a9614,
0x1e717636, 0xb6b136d7, 0xaff046ea, 0x660f1a71, 0x46c8ec9e, 0x0ba10ae3,
0xe3145b41, 0xe66dde53, 0x88c26be6, 0x3b18288c, 0xf02db933, 0x4d9d9d2f,
0x70f46e8a, 0x4167da8c, 0x8c6318b4, 0xf183beef, 0x71eeeef1, 0x4d889e1e,
0xd6689b6b, 0x7175c71a, 0xacd1b7dd, 0xfb9e42be, 0xb29b5e0d, 0xc33d0e91,
0x1ce47922, 0xd39b8329, 0x8493d12e, 0xc4d570fb, 0x4f424ae6, 0x23d5a572,
0x876b6616, 0x5245f161, 0x21ab578d, 0x38d77dbd, 0x1f4ecbfe, 0x9c342331,
0x9bacd9d5, 0x76fe3138,
};
ExplicitSeedSeq seed_sequence{12, 34, 56};
randen_u32 engine(seed_sequence);
#if UPDATE_GOLDEN
(void)kGolden;
for (size_t i = 0; i < 2 * kNumGoldenOutputs; ++i) {
printf("0x%08x, ", engine());
if (i % 6 == 5) {
printf("\n");
}
}
printf("\n\n\n");
#else
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
engine.seed(seed_sequence);
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
#endif
}
TEST(RandenTest, VerifyGoldenFromDeserializedEngine) {
constexpr uint64_t kGolden[kNumGoldenOutputs] = {
0x067f9f9ab919657a, 0x0534605912988583, 0x8a303f72feaa673f,
0x77b7fd747909185c, 0xd9af90403c56d891, 0xd939c6cb204d14b5,
0x7fbe6b954a47b483, 0x8b31a47cc34c768d, 0x3a9e546da2701a9c,
0x5246539046253e71, 0x417191ffb2a848a1, 0x7b1c7bf5a5001d09,
0x9489b15d194f2361, 0xfcebdeea3bcd2461, 0xd643027c854cec97,
0x5885397f91e0d21c, 0x53173b0efae30d58, 0x1c9c71168449fac1,
0xe358202b711ed8aa, 0x94e3918ed1d8227c, 0x5bb4e251450144cf,
0xb5c7a519b489af3b, 0x6f8b560b1f7b3469, 0xfde11dd4a1c74eef,
0x33383d2f76457dcf, 0x3060c0ec6db9fce1, 0x18f451fcddeec766,
0xe73c5d6b9f26da2a, 0x8d4cc566671b32a4, 0xb8189b73776bc9ff,
0x497a70f9caf0bc23, 0x23afcc509791dcea, 0x18af70dc4b27d306,
0xd3853f955a0ce5b9, 0x441db6c01a0afb17, 0xd0136c3fb8e1f13f,
0x5e4fd6fc2f33783c, 0xe0d24548adb5da51, 0x0f4d8362a7d3485a,
0x9f572d68270fa563, 0x6351fbc823024393, 0xa66dbfc61810e9ab,
0x0ff17fc14b651af8, 0xd74c55dafb99e623, 0x36303bc1ad85c6c2,
0x4920cd6a2af7e897, 0x0b8848addc30fecd, 0x9e1562eda6488e93,
0x197553807d607828, 0xbef5eaeda5e21235, 0x18d91d2616aca527,
0xb7821937f5c873cd, 0x2cd4ae5650dbeefc, 0xb35a64376f75ffdf,
0x9226d414d647fe07, 0x663f3db455bbb35e, 0xa829eead6ae93247,
0x7fd69c204dd0d25f, 0xbe1411f891c9acb1, 0xd476f34a506d5f11,
0xf423d2831649c5ca, 0x1e503962951abd75, 0xeccc9e8b1e34b537,
0xb11a147294044854, 0xc4cf27f0abf4929d, 0xe9193abf6fa24c8c,
0xa94a259e3aba8808, 0x21dc414197deffa3, 0xa2ae211d1ff622ae,
0xfe3995c46be5a4f4, 0xe9984c284bf11128, 0xcb1ce9d2f0851a80,
0x42fee17971d87cd8, 0xac76a98d177adc88, 0xa0973b3dedc4af6f,
0xdf56d6bbcb1b8e86, 0xf1e6485f407b11c9, 0x2c63de4deccb15c0,
0x6fe69db32ed4fad7, 0xaa51a65f84bca1f1, 0x242f2ee81d608afc,
0x8eb88b2b69fc153b, 0x22c20098baf73fd1, 0x57759466f576488c,
0x075ca562cea1be9d, 0x9a74814d73d28891, 0x73d1555fc02f4d3d,
0xc17f8f210ee89337, 0x46cca7999eaeafd4, 0x5db8d6a327a0d8ac,
0xb79b4f93c738d7a1, 0x9994512f0036ded1, 0xd3883026f38747f4,
0xf31f7458078d097c, 0x736ce4d480680669, 0x7a496f4c7e1033e3,
0xecf85bf297fbc68c, 0x9e37e1d0f24f3c4e, 0x15b6e067ca0746fc,
0xdd4a39905c5db81c, 0xb5dfafa7bcfdf7da, 0xca6646fb6f92a276,
0x1c6b35f363ef0efd, 0x6a33d06037ad9f76, 0x45544241afd8f80f,
0x83f8d83f859c90c5, 0x22aea9c5365e8c19, 0xfac35b11f20b6a6a,
0xd1acf49d1a27dd2f, 0xf281cd09c4fed405, 0x076000a42cd38e4f,
0x6ace300565070445, 0x463a62781bddc4db, 0x1477126b46b569ac,
0x127f2bb15035fbb8, 0xdfa30946049c04a8, 0x89072a586ba8dd3e,
0x62c809582bb7e74d, 0x22c0c3641406c28b, 0x9b66e36c47ff004d,
0xb9cd2c7519653330, 0x18608d79cd7a598d, 0x92c0bd1323e53e32,
0x887ff00de8524aa5, 0xa074410b787abd10, 0x18ab41b8057a2063,
0x1560abf26bc5f987};
#if UPDATE_GOLDEN
(void)kGolden;
std::seed_seq seed_sequence{1, 2, 3, 4, 5};
randen_u64 engine(seed_sequence);
std::ostringstream stream;
stream << engine;
auto str = stream.str();
printf("%s\n\n", str.c_str());
for (size_t i = 0; i < kNumGoldenOutputs; ++i) {
printf("0x%016lx, ", engine());
if (i % 3 == 2) {
printf("\n");
}
}
printf("\n\n\n");
#else
randen_u64 engine;
std::istringstream stream(
"0 0 9824501439887287479 3242284395352394785 243836530774933777 "
"4047941804708365596 17165468127298385802 949276103645889255 "
"10659970394998657921 1657570836810929787 11697746266668051452 "
"9967209969299905230 14140390331161524430 7383014124183271684 "
"13146719127702337852 13983155220295807171 11121125587542359264 "
"195757810993252695 17138580243103178492 11326030747260920501 "
"8585097322474965590 18342582839328350995 15052982824209724634 "
"7321861343874683609 1806786911778767826 10100850842665572955 "
"9249328950653985078 13600624835326909759 11137960060943860251 "
"10208781341792329629 9282723971471525577 16373271619486811032 32");
stream >> engine;
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
#endif
}
TEST(RandenTest, IsFastOrSlow) {
static constexpr size_t kCount = 100000;
randen_u64 engine;
randen_u64::result_type sum = 0;
auto start = absl::GetCurrentTimeNanos();
for (int i = 0; i < kCount; i++) {
sum += engine();
}
auto duration = absl::GetCurrentTimeNanos() - start;
LOG(INFO) << static_cast<double>(duration) / static_cast<double>(kCount)
<< "ns";
EXPECT_GT(sum, 0);
EXPECT_GE(duration, kCount);
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/randen_engine.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/randen_engine_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
9f878577-68d2-4fd6-98e3-622434fd7322 | cpp | google/arolla | embedded_model | arolla/serving/embedded_model.h | arolla/serving/embedded_model_test.cc | #ifndef AROLLA_SERVING_EMBEDDED_MODEL_H_
#define AROLLA_SERVING_EMBEDDED_MODEL_H_
#include <functional>
#include <type_traits>
#include "absl/base/no_destructor.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/meta.h"
#include "arolla/util/status_macros_backport.h"
#define AROLLA_DEFINE_EMBEDDED_MODEL_FN(fn_name, model_or) \
namespace { \
const decltype(model_or)& _arolla_embed_model_or_status_##fn_name() { \
using ModelT = decltype(model_or); \
static const absl::NoDestructor<ModelT> model(model_or); \
return *model; \
} \
} \
\
const ::arolla::meta::strip_template_t<absl::StatusOr, decltype(model_or)>& \
fn_name() { \
const auto& model = _arolla_embed_model_or_status_##fn_name(); \
\
if (!model.ok()) { \
static ::arolla::meta::strip_template_t<absl::StatusOr, \
decltype(model_or)> \
error_fn = \
[status_(model.status())](const auto&...) { return status_; }; \
return error_fn; \
} \
return *model; \
} \
\
AROLLA_INITIALIZER( \
.deps = \
{ \
"@phony/serving_compiler_optimizer", \
::arolla::initializer_dep::kOperators, \
::arolla::initializer_dep::kS11n, \
}, \
.init_fn = []() -> absl::Status { \
RETURN_IF_ERROR( \
_arolla_embed_model_or_status_##fn_name().status()) \
<< "while initializing embedded model " << #fn_name << " at " \
<< __FILE__ << ":" << __LINE__; \
return absl::OkStatus(); \
})
#define AROLLA_DEFINE_EMBEDDED_MODEL_SET_FN(fn_name, model_set_or) \
namespace { \
const decltype(model_set_or)& \
_arolla_embed_model_set_or_status_##fn_name() { \
using ModelSetT = decltype(model_set_or); \
static const absl::NoDestructor<ModelSetT> model_set(model_set_or); \
return *model_set; \
} \
} \
\
absl::StatusOr<std::reference_wrapper< \
const std::decay_t<decltype(model_set_or->at(""))>>> \
fn_name(absl::string_view model_name) { \
const auto& model_set = _arolla_embed_model_set_or_status_##fn_name(); \
RETURN_IF_ERROR(model_set.status()); \
auto it = model_set->find(model_name); \
if (it == model_set->end()) { \
return absl::NotFoundError( \
absl::StrFormat("model \"%s\" not found in " #fn_name, model_name)); \
} \
return it->second; \
} \
\
AROLLA_INITIALIZER( \
.deps = \
{ \
"@phony/serving_compiler_optimizer", \
::arolla::initializer_dep::kOperators, \
::arolla::initializer_dep::kS11n, \
}, \
.init_fn = []() -> absl::Status { \
RETURN_IF_ERROR( \
_arolla_embed_model_set_or_status_##fn_name().status()) \
<< "while initializing embedded model " << #fn_name << " at " \
<< __FILE__ << ":" << __LINE__; \
return absl::OkStatus(); \
})
#endif | #include "arolla/serving/embedded_model.h"
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <type_traits>
#include "benchmark/benchmark.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/no_destructor.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/io/accessors_input_loader.h"
#include "arolla/io/accessors_slot_listener.h"
#include "arolla/io/input_loader.h"
#include "arolla/io/slot_listener.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/serving/expr_compiler.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/status_macros_backport.h"
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::Eq;
struct TestInput {
float x;
float y;
};
struct TestSideOutput {
std::optional<float> subtract;
};
absl::StatusOr<std::unique_ptr<arolla::InputLoader<TestInput>>>
CreateInputLoader() {
return ::arolla::CreateAccessorsInputLoader<TestInput>(
"x", [](const auto& x) { return x.x; },
"y", [](const auto& x) { return x.y; });
}
absl::StatusOr<std::unique_ptr<::arolla::SlotListener<TestSideOutput>>>
CreateSlotListener() {
return ::arolla::CreateAccessorsSlotListener<TestSideOutput>(
"subtract", [](float x, TestSideOutput* out) { out->subtract = x; });
}
absl::StatusOr<::arolla::expr::ExprNodePtr> CreateExpr() {
using ::arolla::expr::CallOp;
using ::arolla::expr::Leaf;
using ::arolla::testing::WithExportValueAnnotation;
ASSIGN_OR_RETURN(auto add_expr, CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSIGN_OR_RETURN(auto subtract_expr,
CallOp("math.subtract", {Leaf("x"), Leaf("y")}));
return WithExportValueAnnotation(add_expr, "subtract", subtract_expr);
}
absl::StatusOr<std::unique_ptr<::arolla::CompiledExpr>> CreateCompiledExpr() {
using ::arolla::GetQType;
using ::arolla::expr::CallOp;
using ::arolla::expr::Leaf;
ASSIGN_OR_RETURN(auto add_expr, CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSIGN_OR_RETURN(auto subtract_expr,
CallOp("math.subtract", {Leaf("x"), Leaf("y")}));
return ::arolla::expr::CompileForDynamicEvaluation(
::arolla::expr::DynamicEvaluationEngineOptions(), add_expr,
{{"x", GetQType<float>()}, {"y", GetQType<float>()}},
{{"subtract", subtract_expr}});
}
namespace test_namespace {
const ::arolla::ExprCompiler<TestInput, std::optional<float>>::Function&
MyDynamicEmbeddedModel();
AROLLA_DEFINE_EMBEDDED_MODEL_FN(
MyDynamicEmbeddedModel,
(::arolla::ExprCompiler<TestInput, std::optional<float>>()
.SetInputLoader(CreateInputLoader())
.AllowOutputCasting()
.Compile(CreateExpr().value())));
}
TEST(ExprCompilerTest, UseDynamicEmbeddedExpr) {
auto model = ::test_namespace::MyDynamicEmbeddedModel();
static_assert(
std::is_same_v<std::decay_t<decltype(model)>,
std::function<absl::StatusOr<std::optional<float>>(
const TestInput&)>>);
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input), IsOkAndHolds(57));
}
namespace test_namespace {
const ::arolla::ExprCompiler<TestInput, std::optional<float>,
TestSideOutput>::Function&
MyCompiledEmbeddedModel();
AROLLA_DEFINE_EMBEDDED_MODEL_FN(
MyCompiledEmbeddedModel,
(::arolla::ExprCompiler<TestInput, std::optional<float>, TestSideOutput>()
.SetInputLoader(CreateInputLoader())
.SetSlotListener(CreateSlotListener())
.AllowOutputCasting()
.Compile(*CreateCompiledExpr().value())));
}
TEST(ExprCompilerTest, UseCompiledEmbeddedExprWithSideOutput) {
auto model = ::test_namespace::MyCompiledEmbeddedModel();
static_assert(
std::is_same_v<std::decay_t<decltype(model)>,
std::function<absl::StatusOr<std::optional<float>>(
const TestInput&, TestSideOutput*)>>);
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input, nullptr), IsOkAndHolds(57));
TestSideOutput side_output;
EXPECT_THAT(model(input, &side_output), IsOkAndHolds(57));
EXPECT_THAT(side_output.subtract, Eq(-1));
}
namespace test_namespace {
absl::flat_hash_map<std::string, absl::StatusOr<::arolla::expr::ExprNodePtr>>
CreateExprSet() {
return {{"first_expr", CreateExpr()}, {"second_expr", CreateExpr()}};
}
absl::StatusOr<std::reference_wrapper<
const ::arolla::ExprCompiler<TestInput, std::optional<float>>::Function>>
MyDynamicEmbeddedExprSet(absl::string_view);
AROLLA_DEFINE_EMBEDDED_MODEL_SET_FN(
MyDynamicEmbeddedExprSet,
::arolla::CompileExprSet(
::arolla::ExprCompiler<TestInput, std::optional<float>>()
.SetInputLoader(CreateInputLoader())
.AllowOutputCasting(),
CreateExprSet()));
}
TEST(ExprCompilerTest, UseDynamicEmbeddedExprSet) {
ASSERT_OK_AND_ASSIGN(auto model,
test_namespace::MyDynamicEmbeddedExprSet("first_expr"));
static_assert(
std::is_same_v<decltype(model),
std::reference_wrapper<const std::function<absl::StatusOr<
std::optional<float>>(const TestInput&)>>>);
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input), IsOkAndHolds(57));
EXPECT_THAT(test_namespace::MyDynamicEmbeddedExprSet("second_expr"), IsOk());
EXPECT_THAT(
test_namespace::MyDynamicEmbeddedExprSet("missing_expr"),
StatusIs(absl::StatusCode::kNotFound,
"model \"missing_expr\" not found in MyDynamicEmbeddedExprSet"));
}
namespace test_namespace {
absl::flat_hash_map<std::string,
std::reference_wrapper<const ::arolla::CompiledExpr>>
CreateCompiledExprSet() {
static const absl::NoDestructor<std::unique_ptr<::arolla::CompiledExpr>>
compiled_expr(CreateCompiledExpr().value());
return absl::flat_hash_map<
std::string, std::reference_wrapper<const ::arolla::CompiledExpr>>{
{"first_expr", **compiled_expr}, {"second_expr", **compiled_expr}};
}
absl::StatusOr<std::reference_wrapper<
const ::arolla::ExprCompiler<TestInput, std::optional<float>>::Function>>
MyCompiledEmbeddedExprSet(absl::string_view);
AROLLA_DEFINE_EMBEDDED_MODEL_SET_FN(
MyCompiledEmbeddedExprSet,
::arolla::CompileExprSet(
::arolla::ExprCompiler<TestInput, std::optional<float>>()
.SetInputLoader(CreateInputLoader())
.AllowOutputCasting(),
CreateCompiledExprSet()));
}
TEST(ExprCompilerTest, UseCompiledEmbeddedExprSet) {
ASSERT_OK_AND_ASSIGN(auto model,
test_namespace::MyCompiledEmbeddedExprSet("first_expr"));
static_assert(
std::is_same_v<decltype(model),
std::reference_wrapper<const std::function<absl::StatusOr<
std::optional<float>>(const TestInput&)>>>);
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input), IsOkAndHolds(57));
EXPECT_THAT(test_namespace::MyCompiledEmbeddedExprSet("second_expr"), IsOk());
EXPECT_THAT(
test_namespace::MyCompiledEmbeddedExprSet("missing_expr"),
StatusIs(
absl::StatusCode::kNotFound,
"model \"missing_expr\" not found in MyCompiledEmbeddedExprSet"));
}
void BM_MyDynamicEmbeddedModel_Request(benchmark::State& state) {
arolla::InitArolla();
TestInput input{.x = 28, .y = 29};
for (auto _ : state) {
benchmark::DoNotOptimize(input);
CHECK_EQ(**test_namespace::MyDynamicEmbeddedModel()(input), 57);
}
}
void BM_MyDynamicEmbeddedModel_ConstructOutOfTheLoop(benchmark::State& state) {
arolla::InitArolla();
TestInput input{.x = 28, .y = 29};
auto model = test_namespace::MyDynamicEmbeddedModel();
for (auto _ : state) {
benchmark::DoNotOptimize(input);
CHECK_EQ(*model(input).value(), 57);
}
}
BENCHMARK(BM_MyDynamicEmbeddedModel_Request);
BENCHMARK(BM_MyDynamicEmbeddedModel_ConstructOutOfTheLoop);
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/serving/embedded_model.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/serving/embedded_model_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
4fffa125-f742-4a2d-bc4b-9936af28e8c4 | cpp | google/quiche | unknown_payload_decoder | quiche/http2/decoder/payload_decoders/unknown_payload_decoder.cc | quiche/http2/decoder/payload_decoders/unknown_payload_decoder_test.cc | #include "quiche/http2/decoder/payload_decoders/unknown_payload_decoder.h"
#include <stddef.h>
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
DecodeStatus UnknownPayloadDecoder::StartDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
const Http2FrameHeader& frame_header = state->frame_header();
QUICHE_DVLOG(2) << "UnknownPayloadDecoder::StartDecodingPayload: "
<< frame_header;
QUICHE_DCHECK(!IsSupportedHttp2FrameType(frame_header.type)) << frame_header;
QUICHE_DCHECK_LE(db->Remaining(), frame_header.payload_length);
state->InitializeRemainders();
state->listener()->OnUnknownStart(frame_header);
return ResumeDecodingPayload(state, db);
}
DecodeStatus UnknownPayloadDecoder::ResumeDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2) << "UnknownPayloadDecoder::ResumeDecodingPayload "
<< "remaining_payload=" << state->remaining_payload()
<< "; db->Remaining=" << db->Remaining();
QUICHE_DCHECK(!IsSupportedHttp2FrameType(state->frame_header().type))
<< state->frame_header();
QUICHE_DCHECK_LE(state->remaining_payload(),
state->frame_header().payload_length);
QUICHE_DCHECK_LE(db->Remaining(), state->remaining_payload());
size_t avail = db->Remaining();
if (avail > 0) {
state->listener()->OnUnknownPayload(db->cursor(), avail);
db->AdvanceCursor(avail);
state->ConsumePayload(avail);
}
if (state->remaining_payload() == 0) {
state->listener()->OnUnknownEnd();
return DecodeStatus::kDecodeDone;
}
return DecodeStatus::kDecodeInProgress;
}
} | #include "quiche/http2/decoder/payload_decoders/unknown_payload_decoder.h"
#include <stddef.h>
#include <string>
#include <type_traits>
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/http2/test_tools/frame_parts.h"
#include "quiche/http2/test_tools/frame_parts_collector.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/payload_decoder_base_test_util.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
namespace {
Http2FrameType g_unknown_frame_type;
}
class UnknownPayloadDecoderPeer {
public:
static Http2FrameType FrameType() { return g_unknown_frame_type; }
static constexpr uint8_t FlagsAffectingPayloadDecoding() { return 0; }
};
namespace {
struct Listener : public FramePartsCollector {
void OnUnknownStart(const Http2FrameHeader& header) override {
QUICHE_VLOG(1) << "OnUnknownStart: " << header;
StartFrame(header)->OnUnknownStart(header);
}
void OnUnknownPayload(const char* data, size_t len) override {
QUICHE_VLOG(1) << "OnUnknownPayload: len=" << len;
CurrentFrame()->OnUnknownPayload(data, len);
}
void OnUnknownEnd() override {
QUICHE_VLOG(1) << "OnUnknownEnd";
EndFrame()->OnUnknownEnd();
}
};
constexpr bool SupportedFrameType = false;
class UnknownPayloadDecoderTest
: public AbstractPayloadDecoderTest<UnknownPayloadDecoder,
UnknownPayloadDecoderPeer, Listener,
SupportedFrameType>,
public ::testing::WithParamInterface<uint32_t> {
protected:
UnknownPayloadDecoderTest() : length_(GetParam()) {
QUICHE_VLOG(1) << "################ length_=" << length_
<< " ################";
do {
g_unknown_frame_type = static_cast<Http2FrameType>(Random().Rand8());
} while (IsSupportedHttp2FrameType(g_unknown_frame_type));
}
const uint32_t length_;
};
INSTANTIATE_TEST_SUITE_P(VariousLengths, UnknownPayloadDecoderTest,
::testing::Values(0, 1, 2, 3, 255, 256));
TEST_P(UnknownPayloadDecoderTest, ValidLength) {
std::string unknown_payload = Random().RandString(length_);
Http2FrameHeader frame_header(length_, g_unknown_frame_type, Random().Rand8(),
RandStreamId());
set_frame_header(frame_header);
FrameParts expected(frame_header, unknown_payload);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(unknown_payload, expected));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/unknown_payload_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/unknown_payload_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
84de1a0c-21bf-43f6-a9d7-127424df4d8d | cpp | tensorflow/tensorflow | bucketize | tensorflow/lite/kernels/bucketize.cc | tensorflow/lite/kernels/bucketize_test.cc | #include <stdint.h>
#include <algorithm>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace bucketize {
namespace {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
struct OpData {
const float* boundaries;
int num_boundaries;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
const auto* params = reinterpret_cast<const TfLiteBucketizeParams*>(buffer);
if (!FLATBUFFERS_LITTLEENDIAN) {
int32_t* p =
reinterpret_cast<int32_t*>(const_cast<float*>(params->boundaries));
for (size_t i = 0; i < params->num_boundaries; i++, p++)
*p = flatbuffers::EndianSwap(*p);
}
op_data->boundaries = params->boundaries;
op_data->num_boundaries = params->num_boundaries;
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
if (!std::is_sorted(opdata->boundaries,
opdata->boundaries + opdata->num_boundaries)) {
TF_LITE_KERNEL_LOG(context, "Expected sorted boundaries");
return kTfLiteError;
}
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
if (input->type != kTfLiteInt32 && input->type != kTfLiteFloat32 &&
input->type != kTfLiteInt64 && input->type != kTfLiteFloat64) {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by bucketize.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = kTfLiteInt32;
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
return context->ResizeTensor(context, output, output_shape);
}
template <typename T>
inline void Bucketize(const RuntimeShape& input_shape, const T* input_data,
const float* boundaries, int num_boundaries,
const RuntimeShape& output_shape, int32_t* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
auto first_bigger_it = std::upper_bound(
boundaries, boundaries + num_boundaries, input_data[i]);
output_data[i] = first_bigger_it - boundaries;
}
}
template <typename T>
TfLiteStatus BucketizeImpl(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt32);
Bucketize<T>(GetTensorShape(input), GetTensorData<T>(input),
opdata->boundaries, opdata->num_boundaries,
GetTensorShape(output), GetTensorData<int32_t>(output));
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
switch (input->type) {
case kTfLiteFloat32: {
return BucketizeImpl<float>(context, node);
}
case kTfLiteFloat64: {
return BucketizeImpl<double>(context, node);
}
case kTfLiteInt32: {
return BucketizeImpl<int32_t>(context, node);
}
case kTfLiteInt64: {
return BucketizeImpl<int64_t>(context, node);
}
default: {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by bucketize.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
}
}
}
TfLiteRegistration* Register_BUCKETIZE() {
static TfLiteRegistration r = {bucketize::Init, bucketize::Free,
bucketize::Prepare, bucketize::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
template <typename T>
class BucketizeOpModel : public SingleOpModel {
public:
BucketizeOpModel(const TensorData& input,
const std::vector<float>& boundaries) {
input_ = AddInput(input);
boundaries_ = boundaries;
output_ = AddOutput({TensorType_INT32, input.shape});
SetBuiltinOp(BuiltinOperator_BUCKETIZE, BuiltinOptions_BucketizeOptions,
CreateBucketizeOptions(
builder_, builder_.CreateVector<float>(boundaries_))
.Union());
BuildInterpreter({GetShape(input_)});
}
int input() { return input_; }
const std::vector<float>& boundaries() { return boundaries_; }
std::vector<int> GetOutput() { return ExtractVector<int>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
std::vector<float> boundaries_;
int output_;
};
TEST(BucketizeOpTest, Float) {
BucketizeOpModel<float> model(
{TensorType_FLOAT32, {3, 2}},
{0.0f, 10.0f, 100.0f});
model.PopulateTensor<float>(model.input(),
{-5.0f, 10000.0f, 150.0f, 10.0f, 5.0f, 100.0f});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 2));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({0, 3, 3, 2, 1, 3}));
}
TEST(BucketizeOpTest, Int32) {
BucketizeOpModel<int32_t> model(
{TensorType_INT32, {3, 2}},
{0, 10, 100});
model.PopulateTensor<int32_t>(model.input(), {-5, 10000, 150, 10, 5, 100});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 2));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({0, 3, 3, 2, 1, 3}));
}
#if GTEST_HAS_DEATH_TEST
TEST(BucketizeOpTest, UnsortedBuckets) {
EXPECT_DEATH(BucketizeOpModel<float>(
{TensorType_INT32, {3, 2}},
{0, 10, -10}),
"Expected sorted boundaries");
}
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/bucketize.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/bucketize_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3fd2b1bc-61fe-4b2b-8e06-c2efe6bb07af | cpp | tensorflow/tensorflow | trace_viewer_visibility | tensorflow/core/profiler/convert/trace_viewer/trace_viewer_visibility.cc | tensorflow/core/profiler/convert/trace_viewer/trace_viewer_visibility_test.cc | #include "tensorflow/core/profiler/convert/trace_viewer/trace_viewer_visibility.h"
#include <cstdint>
#include "absl/log/check.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "tensorflow/core/profiler/protobuf/trace_events.pb.h"
namespace tensorflow {
namespace profiler {
TraceViewerVisibility::TraceViewerVisibility(
tsl::profiler::Timespan visible_span, uint64_t resolution_ps)
: visible_span_(visible_span), resolution_ps_(resolution_ps) {}
bool TraceViewerVisibility::Visible(const TraceEvent& event) {
if (visible_span_.Instant()) return true;
tsl::profiler::Timespan span(event.timestamp_ps(), event.duration_ps());
if (!visible_span_.Overlaps(span)) return false;
if (resolution_ps_ == 0) return true;
return VisibleAtResolution(event);
}
bool TraceViewerVisibility::VisibleAtResolution(const TraceEvent& event) {
DCHECK_NE(resolution_ps_, 0);
if (!event.has_resource_id()) {
#if 1
return true;
#else
CounterRowId counter_row_id(event.device_id(), event.name());
auto iter = last_counter_timestamp_ps_.find(counter_row_id);
bool found = (iter != last_counter_timestamp_ps_.end());
bool visible =
!found || ((event.timestamp_ps() - iter->second) >= resolution_ps_);
if (visible) {
if (found) {
iter->second = event.timestamp_ps();
} else {
last_counter_timestamp_ps_.emplace(counter_row_id,
event.timestamp_ps());
}
}
return visible;
#endif
}
tsl::profiler::Timespan span(event.timestamp_ps(), event.duration_ps());
bool visible = (span.duration_ps() >= resolution_ps_);
auto& row = rows_[RowId(event.device_id(), event.resource_id())];
size_t depth = row.Depth(span.begin_ps());
if (!visible) {
auto last_end_timestamp_ps = row.LastEndTimestampPs(depth);
visible = !last_end_timestamp_ps ||
(span.begin_ps() - *last_end_timestamp_ps >= resolution_ps_);
}
if (event.has_flow_id()) {
auto result = flows_.try_emplace(event.flow_id(), visible);
if (!visible) {
if (result.second) {
auto last_flow_timestamp_ps = row.LastFlowTimestampPs();
result.first->second =
!last_flow_timestamp_ps ||
(span.end_ps() - *last_flow_timestamp_ps >= resolution_ps_);
}
visible = result.first->second;
}
if (event.flow_entry_type() == TraceEvent::FLOW_END) {
flows_.erase(result.first);
}
if (visible) {
row.SetLastFlowTimestampPs(span.end_ps());
}
}
if (visible) {
row.SetLastEndTimestampPs(depth, span.end_ps());
}
return visible;
}
void TraceViewerVisibility::SetVisibleAtResolution(const TraceEvent& event) {
DCHECK_NE(resolution_ps_, 0);
if (!event.has_resource_id()) {
CounterRowId counter_row_id(event.device_id(), event.name());
last_counter_timestamp_ps_.insert_or_assign(counter_row_id,
event.timestamp_ps());
} else {
tsl::profiler::Timespan span(event.timestamp_ps(), event.duration_ps());
auto& row = rows_[RowId(event.device_id(), event.resource_id())];
if (event.has_flow_id()) {
if (event.flow_entry_type() == TraceEvent::FLOW_END) {
flows_.erase(event.flow_id());
} else {
flows_.try_emplace(event.flow_id(), true);
}
row.SetLastFlowTimestampPs(span.end_ps());
}
size_t depth = row.Depth(span.begin_ps());
row.SetLastEndTimestampPs(depth, span.end_ps());
}
}
size_t TraceViewerVisibility::RowVisibility::Depth(
uint64_t begin_timestamp_ps) const {
size_t depth = 0;
for (; depth < last_end_timestamp_ps_.size(); ++depth) {
if (last_end_timestamp_ps_[depth] <= begin_timestamp_ps) break;
}
return depth;
}
}
} | #include "tensorflow/core/profiler/convert/trace_viewer/trace_viewer_visibility.h"
#include <cstdint>
#include "xla/tsl/profiler/utils/timespan.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/protobuf/trace_events.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using tsl::profiler::Timespan;
constexpr uint32_t kDeviceId = 10;
constexpr uint32_t kResourceId = 1;
constexpr uint32_t kSrcResourceId = 2;
constexpr uint32_t kDstResourceId = 4;
TraceEvent Complete(Timespan span, uint32_t resource_id = kResourceId) {
TraceEvent event;
event.set_device_id(kDeviceId);
event.set_resource_id(resource_id);
event.set_timestamp_ps(span.begin_ps());
event.set_duration_ps(span.duration_ps());
return event;
}
TraceEvent Counter(uint64_t time_ps) {
TraceEvent event;
event.set_device_id(kDeviceId);
event.set_timestamp_ps(time_ps);
return event;
}
TraceEvent Flow(Timespan span, uint64_t flow_id, uint32_t resource_id) {
TraceEvent event;
event.set_flow_id(flow_id);
event.set_device_id(kDeviceId);
event.set_resource_id(resource_id);
event.set_timestamp_ps(span.begin_ps());
event.set_duration_ps(span.duration_ps());
return event;
}
TEST(TraceViewerVisibilityTest, VisibilityNoDownsampling) {
TraceViewerVisibility v(Timespan(1000, 1000));
EXPECT_FALSE(v.Visible(Complete(Timespan(999))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1000))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1500))));
EXPECT_TRUE(v.Visible(Complete(Timespan(2000))));
EXPECT_FALSE(v.Visible(Complete(Timespan(2001))));
EXPECT_FALSE(v.Visible(Complete(Timespan(900, 99))));
EXPECT_TRUE(v.Visible(Complete(Timespan(900, 100))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1450, 100))));
EXPECT_TRUE(v.Visible(Complete(Timespan(2000, 50))));
EXPECT_FALSE(v.Visible(Complete(Timespan(2001, 50))));
}
TEST(TraceViewerVisibilityTest, DISABLED_CounterEventsDownsampling) {
TraceViewerVisibility v(Timespan(1000, 1000), 100);
EXPECT_FALSE(v.Visible(Counter(999)));
EXPECT_TRUE(v.Visible(Counter(1000)));
EXPECT_FALSE(v.Visible(Counter(1099)));
EXPECT_TRUE(v.Visible(Counter(1100)));
EXPECT_TRUE(v.Visible(Counter(2000)));
EXPECT_FALSE(v.Visible(Counter(2001)));
}
TEST(TraceViewerVisibilityTest, CompleteEventsDownsampling) {
TraceViewerVisibility v(Timespan(1000, 1000), 100);
EXPECT_TRUE(v.Visible(Complete(Timespan(950, 50))));
EXPECT_FALSE(v.Visible(Complete(Timespan(1050, 50))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1055, 200))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1355, 50))));
}
TEST(TraceViewerVisibilityTest, CompleteNestedEventsDownsampling) {
TraceViewerVisibility v(Timespan(1000, 1000), 100);
EXPECT_TRUE(v.Visible(Complete(Timespan(1000, 200))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1200, 190))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1250, 20))));
EXPECT_FALSE(v.Visible(Complete(Timespan(1270, 20))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1290, 100))));
}
TEST(TraceViewerVisibilityTest, FlowEventsDownsampling) {
TraceViewerVisibility v(Timespan(1000, 1000), 100);
EXPECT_TRUE(v.Visible(Flow(Timespan(1000, 50), 1, kSrcResourceId)));
EXPECT_FALSE(v.Visible(Flow(Timespan(1050, 50), 2, kSrcResourceId)));
EXPECT_TRUE(v.Visible(Flow(Timespan(1100, 50), 3, kSrcResourceId)));
EXPECT_TRUE(v.Visible(Flow(Timespan(1100, 50), 1, kDstResourceId)));
EXPECT_FALSE(v.Visible(Flow(Timespan(1200, 52), 2, kDstResourceId)));
EXPECT_TRUE(v.Visible(Flow(Timespan(1252, 10), 3, kDstResourceId)));
EXPECT_TRUE(v.Visible(Complete(Timespan(1300, 50))));
EXPECT_FALSE(v.Visible(Complete(Timespan(1350, 50))));
EXPECT_FALSE(v.Visible(Complete(Timespan(1400, 50))));
EXPECT_TRUE(v.Visible(Flow(Timespan(1600, 50), 4, kResourceId)));
EXPECT_TRUE(v.Visible(Flow(Timespan(1700, 52), 5, kResourceId)));
EXPECT_FALSE(v.Visible(Flow(Timespan(1752, 10), 6, kResourceId)));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/trace_viewer/trace_viewer_visibility.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/trace_viewer/trace_viewer_visibility_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
453427bf-9eea-4191-ab2c-2632dbdaa00c | cpp | tensorflow/tensorflow | converter | tensorflow/lite/delegates/gpu/cl/kernels/converter.cc | tensorflow/lite/delegates/gpu/gl/kernels/converter_test.cc | #include "tensorflow/lite/delegates/gpu/cl/kernels/converter.h"
#include <algorithm>
#include <array>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/strings/substitute.h"
#include "tensorflow/lite/delegates/gpu/cl/buffer.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_arguments.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_command_queue.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_errors.h"
#include "tensorflow/lite/delegates/gpu/cl/tensor.h"
#include "tensorflow/lite/delegates/gpu/cl/tensor_type_util.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/task/arguments.h"
#include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
#include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h"
#include "tensorflow/lite/delegates/gpu/common/task/util.h"
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/conversion.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
class OpenClConverterImpl : public TensorObjectConverter {
public:
virtual absl::Status Init(const TensorObjectDef& input_def,
const TensorObjectDef& output_def,
Environment* environment) = 0;
void SetGpuInfo(const GpuInfo& info) { gpu_info_ = info; }
protected:
absl::Status DispatchKernel(Buffer* buffer, Tensor* tensor) {
RETURN_IF_ERROR(cl_args_.SetObjectRef("buffer", buffer));
RETURN_IF_ERROR(cl_args_.SetObjectRef("tensor", tensor));
RETURN_IF_ERROR(cl_args_.Bind(kernel_.kernel()));
const int3 grid = int3(tensor->Width() * tensor->Batch(), tensor->Height(),
tensor->Slices());
std::vector<int3> work_groups;
GetPossibleWorkGroupsConv(TuningType::kFast, gpu_info_, kernel_.info_, grid,
&work_groups);
const int3 work_group_size = work_groups[0];
const int3 work_groups_count = GetWorkGroupsCount(grid, work_group_size);
return queue_->Dispatch(kernel_, work_groups_count, work_group_size);
}
CLArguments cl_args_;
BHWC shape_;
CLKernel kernel_;
TensorDescriptor tensor_descriptor_;
GpuInfo gpu_info_;
CLCommandQueue* queue_ = nullptr;
const CLContext* context_ = nullptr;
};
bool IsSupportedDataType(DataType type) {
return type == DataType::FLOAT16 || type == DataType::FLOAT32 ||
type == DataType::INT32 || type == DataType::BOOL;
}
bool IsBHWCOpenCLBuffer(const ObjectDef& def) {
return IsSupportedDataType(def.data_type) &&
def.object_type == ObjectType::OPENCL_BUFFER &&
def.data_layout == DataLayout::BHWC;
}
bool IsOpenCLTensor(const ObjectDef& def) {
const bool is_buffer_tensor = def.object_type == ObjectType::OPENCL_BUFFER &&
def.data_layout == DataLayout::DHWC4;
const bool is_image2d_tensor =
def.object_type == ObjectType::OPENCL_TEXTURE &&
def.data_layout == DataLayout::HDWC4;
const bool is_image2d_array_tensor =
def.object_type == ObjectType::OPENCL_TEXTURE &&
def.data_layout == DataLayout::DHWC4;
const bool is_single_image_tensor =
def.object_type == ObjectType::OPENCL_TEXTURE &&
def.data_layout == DataLayout::BHWC;
return IsSupportedDataType(def.data_type) &&
(is_buffer_tensor || is_image2d_tensor || is_image2d_array_tensor ||
is_single_image_tensor);
}
absl::Status GetOpenCLMemory(const TensorObject& obj, cl_mem* memory) {
auto texture = std::get_if<OpenClTexture>(&obj);
auto buffer = std::get_if<OpenClBuffer>(&obj);
if (texture && texture->memobj) {
*memory = texture->memobj;
} else if (buffer && buffer->memobj) {
*memory = buffer->memobj;
} else {
return absl::InvalidArgumentError("Missing OpenCL object.");
}
return absl::OkStatus();
}
class TensorToTensorConverter : public OpenClConverterImpl {
public:
static bool IsSupported(const ObjectDef& input, const ObjectDef& output) {
return IsOpenCLTensor(input) && IsOpenCLTensor(output);
}
absl::Status Init(const TensorObjectDef& input_def,
const TensorObjectDef& output_def,
Environment* environment) final {
src_tensor_descriptor_ =
TensorDescriptor(input_def.object_def.data_type,
ToTensorStorageType(input_def.object_def.object_type,
input_def.object_def.data_layout),
Layout::BHWC);
dst_tensor_descriptor_ =
TensorDescriptor(output_def.object_def.data_type,
ToTensorStorageType(output_def.object_def.object_type,
output_def.object_def.data_layout),
Layout::BHWC);
GPUOperation gpu_op =
CreateTensorToTensorOp(environment->GetDevicePtr()->GetInfo(),
src_tensor_descriptor_, dst_tensor_descriptor_);
gpu_op.code_ =
"#define MAIN_FUNCTION __kernel void tensor_to_tensor\n" + gpu_op.code_;
const bool need_fp16_support =
input_def.object_def.data_type == DataType::FLOAT16 ||
output_def.object_def.data_type == DataType::FLOAT16;
if (need_fp16_support) {
gpu_op.code_ =
"#pragma OPENCL EXTENSION cl_khr_fp16 : enable\n" + gpu_op.code_;
}
queue_ = environment->queue();
context_ = &environment->context();
shape_ = BHWC(input_def.dimensions.b, input_def.dimensions.h,
input_def.dimensions.w, input_def.dimensions.c);
RETURN_IF_ERROR(gpu_op.AssembleCode(environment->device().GetInfo()));
RETURN_IF_ERROR(cl_args_.Init(environment->device().GetInfo(), nullptr,
&gpu_op.args_, &gpu_op.code_));
return environment->program_cache()->GetOrCreateCLKernel(
gpu_op.code_, "tensor_to_tensor", environment->context(),
environment->device(), &kernel_);
}
absl::Status Convert(const TensorObject& input_obj,
const TensorObject& output_obj) override {
cl_mem in_memory;
RETURN_IF_ERROR(GetOpenCLMemory(input_obj, &in_memory));
cl_mem out_memory;
RETURN_IF_ERROR(GetOpenCLMemory(output_obj, &out_memory));
Tensor src_tensor;
TensorDescriptor descriptor_with_shape = src_tensor_descriptor_;
descriptor_with_shape.SetBHWCShape(shape_);
RETURN_IF_ERROR(CreateTensorShared(*context_, in_memory,
descriptor_with_shape, &src_tensor));
Tensor dst_tensor;
descriptor_with_shape = dst_tensor_descriptor_;
descriptor_with_shape.SetBHWCShape(shape_);
RETURN_IF_ERROR(CreateTensorShared(*context_, out_memory,
descriptor_with_shape, &dst_tensor));
RETURN_IF_ERROR(cl_args_.SetObjectRef("src_tensor", &src_tensor));
RETURN_IF_ERROR(cl_args_.SetObjectRef("dst_tensor", &dst_tensor));
RETURN_IF_ERROR(cl_args_.Bind(kernel_.kernel()));
const int3 grid = int3(dst_tensor.Width() * dst_tensor.Batch(),
dst_tensor.Height(), dst_tensor.Slices());
const int3 work_group_size = {16, 8, 1};
const int3 work_groups_count = GetWorkGroupsCount(grid, work_group_size);
return queue_->Dispatch(kernel_, work_groups_count, work_group_size);
}
private:
TensorDescriptor src_tensor_descriptor_;
TensorDescriptor dst_tensor_descriptor_;
};
class TensorToBHWCBufferConverter : public OpenClConverterImpl {
public:
static bool IsSupported(const ObjectDef& input, const ObjectDef& output) {
return IsOpenCLTensor(input) && IsBHWCOpenCLBuffer(output);
}
absl::Status Init(const TensorObjectDef& input_def,
const TensorObjectDef& output_def,
Environment* environment) final {
TensorStorageType src_tensor_type = ToTensorStorageType(
input_def.object_def.object_type, input_def.object_def.data_layout);
tensor_descriptor_ = TensorDescriptor(input_def.object_def.data_type,
src_tensor_type, Layout::BHWC);
BufferDescriptor buffer_desc;
buffer_desc.element_type = output_def.object_def.data_type;
buffer_desc.element_size = 1;
buffer_desc.memory_type = MemoryType::GLOBAL;
GPUOperation gpu_op =
CreateTensorToBhwcBufferOp(environment->GetDevicePtr()->GetInfo(),
tensor_descriptor_, buffer_desc);
gpu_op.code_ =
"#define MAIN_FUNCTION __kernel void tensor_to_bhwc\n" + gpu_op.code_;
if (output_def.object_def.data_type == DataType::BOOL ||
input_def.object_def.data_type == DataType::BOOL) {
gpu_op.code_ =
"#define convert_bool4(value) (convert_uchar4((value) != 0) & "
"(uchar4) 1)\n#define bool4 uchar4\n" +
gpu_op.code_;
}
const bool need_fp16_support =
input_def.object_def.data_type == DataType::FLOAT16 ||
output_def.object_def.data_type == DataType::FLOAT16;
if (need_fp16_support) {
gpu_op.code_ =
"#pragma OPENCL EXTENSION cl_khr_fp16 : enable\n" + gpu_op.code_;
}
queue_ = environment->queue();
context_ = &environment->context();
shape_ = BHWC(input_def.dimensions.b, input_def.dimensions.h,
input_def.dimensions.w, input_def.dimensions.c);
RETURN_IF_ERROR(gpu_op.AssembleCode(environment->device().GetInfo()));
RETURN_IF_ERROR(cl_args_.Init(environment->device().GetInfo(), nullptr,
&gpu_op.args_, &gpu_op.code_));
return environment->program_cache()->GetOrCreateCLKernel(
gpu_op.code_, "tensor_to_bhwc", environment->context(),
environment->device(), &kernel_);
}
absl::Status Convert(const TensorObject& input_obj,
const TensorObject& output_obj) override {
auto output = std::get_if<OpenClBuffer>(&output_obj);
if (!output || !output->memobj) {
return absl::InvalidArgumentError(
"Missing output in tensor_to_bhwc converter");
}
cl_mem in_memory;
RETURN_IF_ERROR(GetOpenCLMemory(input_obj, &in_memory));
Tensor tensor;
TensorDescriptor descriptor_with_shape = tensor_descriptor_;
descriptor_with_shape.SetBHWCShape(shape_);
RETURN_IF_ERROR(CreateTensorShared(*context_, in_memory,
descriptor_with_shape, &tensor));
Buffer buffer = CreateBufferShared(output->memobj);
return DispatchKernel(&buffer, &tensor);
}
};
class BHWCBufferToTensorConverter : public OpenClConverterImpl {
public:
static bool IsSupported(const ObjectDef& input, const ObjectDef& output) {
return IsBHWCOpenCLBuffer(input) && IsOpenCLTensor(output);
}
absl::Status Init(const TensorObjectDef& input_def,
const TensorObjectDef& output_def,
Environment* environment) final {
TensorStorageType dst_tensor_type = ToTensorStorageType(
output_def.object_def.object_type, output_def.object_def.data_layout);
tensor_descriptor_ = TensorDescriptor(output_def.object_def.data_type,
dst_tensor_type, Layout::BHWC);
BufferDescriptor buffer_desc;
buffer_desc.element_type = input_def.object_def.data_type;
buffer_desc.element_size = 1;
buffer_desc.memory_type = MemoryType::GLOBAL;
GPUOperation gpu_op =
CreateBhwcBufferToTensorOp(environment->GetDevicePtr()->GetInfo(),
buffer_desc, tensor_descriptor_);
gpu_op.code_ =
"#define MAIN_FUNCTION __kernel void bhwc_to_tensor\n" + gpu_op.code_;
if (output_def.object_def.data_type == DataType::BOOL ||
input_def.object_def.data_type == DataType::BOOL) {
gpu_op.code_ =
"#define convert_bool4(value) (convert_uchar4((value) != 0) & "
"(uchar4) 1)\n#define bool4 uchar4\n" +
gpu_op.code_;
}
const bool need_fp16_support =
input_def.object_def.data_type == DataType::FLOAT16 ||
output_def.object_def.data_type == DataType::FLOAT16;
if (need_fp16_support) {
gpu_op.code_ =
"#pragma OPENCL EXTENSION cl_khr_fp16 : enable\n" + gpu_op.code_;
}
queue_ = environment->queue();
context_ = &environment->context();
shape_ = BHWC(output_def.dimensions.b, output_def.dimensions.h,
output_def.dimensions.w, output_def.dimensions.c);
RETURN_IF_ERROR(gpu_op.AssembleCode(environment->device().GetInfo()));
RETURN_IF_ERROR(cl_args_.Init(environment->device().GetInfo(), nullptr,
&gpu_op.args_, &gpu_op.code_));
return environment->program_cache()->GetOrCreateCLKernel(
gpu_op.code_, "bhwc_to_tensor", environment->context(),
environment->device(), &kernel_);
}
absl::Status Convert(const TensorObject& input_obj,
const TensorObject& output_obj) override {
auto input = std::get_if<OpenClBuffer>(&input_obj);
if (!input || !input->memobj) {
return absl::InvalidArgumentError(
"Missing input in bhwc_to_tensor converter");
}
cl_mem out_memory;
RETURN_IF_ERROR(GetOpenCLMemory(output_obj, &out_memory));
Tensor tensor;
TensorDescriptor descriptor_with_shape = tensor_descriptor_;
descriptor_with_shape.SetBHWCShape(shape_);
RETURN_IF_ERROR(CreateTensorShared(*context_, out_memory,
descriptor_with_shape, &tensor));
Buffer buffer = CreateBufferShared(input->memobj);
return DispatchKernel(&buffer, &tensor);
}
};
std::array<size_t, 3> CalculateTextureRegion(const TensorObjectDef& def) {
const auto& dims = def.dimensions;
std::array<size_t, 3> region = {0, 0, 1};
switch (ToTensorStorageType(def.object_def.object_type,
def.object_def.data_layout)) {
case TensorStorageType::SINGLE_TEXTURE_2D:
region[0] = static_cast<size_t>(dims.w * dims.b);
region[1] = static_cast<size_t>(dims.h);
break;
case TensorStorageType::TEXTURE_2D:
region[0] = static_cast<size_t>(dims.w * dims.b);
region[1] = static_cast<size_t>(dims.h * dims.d());
break;
case TensorStorageType::TEXTURE_ARRAY:
region[0] = static_cast<size_t>(dims.w * dims.b);
region[1] = static_cast<size_t>(dims.h);
region[2] = static_cast<size_t>(dims.d());
break;
default:
break;
}
return region;
}
bool IsOpenClTextureOrBuffer(ObjectType type) {
return type == ObjectType::OPENCL_BUFFER ||
type == ObjectType::OPENCL_TEXTURE;
}
class TrivialCopier : public OpenClConverterImpl {
public:
static bool IsSupported(const ObjectDef& input, const ObjectDef& output) {
return IsOpenClTextureOrBuffer(input.object_type) &&
input.data_type == output.data_type &&
input.object_type == output.object_type &&
input.data_layout == output.data_layout;
}
absl::Status Init(const TensorObjectDef& input_def,
const TensorObjectDef& output_def,
Environment* environment) final {
shape_ = BHWC(input_def.dimensions.b, input_def.dimensions.h,
input_def.dimensions.w, input_def.dimensions.c);
data_type_ = input_def.object_def.data_type;
queue_ = environment->queue();
region_ = CalculateTextureRegion(output_def);
return absl::OkStatus();
}
absl::Status Convert(const TensorObject& input_obj,
const TensorObject& output_obj) override {
auto texture_input = std::get_if<OpenClTexture>(&input_obj);
auto texture_output = std::get_if<OpenClTexture>(&output_obj);
if (texture_input && texture_output) {
return Copy(*texture_input, *texture_output);
}
auto buffer_input = std::get_if<OpenClBuffer>(&input_obj);
auto buffer_output = std::get_if<OpenClBuffer>(&output_obj);
if (buffer_input && buffer_output) {
return Copy(*buffer_input, *buffer_output);
}
return absl::InternalError("Unexpected object");
}
absl::Status Copy(const OpenClBuffer& input, const OpenClBuffer& output) {
if (input.memobj == output.memobj) {
return absl::OkStatus();
}
return GetOpenCLError(
clEnqueueCopyBuffer(queue_->queue(), input.memobj, output.memobj, 0, 0,
SizeOf(data_type_) * shape_.w * shape_.h *
AlignByN(shape_.c, 4) * shape_.b,
0, nullptr, nullptr));
}
absl::Status Copy(const OpenClTexture& input, const OpenClTexture& output) {
if (input.memobj == output.memobj) {
return absl::OkStatus();
}
size_t origin[3] = {0, 0, 0};
return GetOpenCLError(
clEnqueueCopyImage(queue_->queue(), input.memobj, output.memobj, origin,
origin, region_.data(), 0, nullptr, nullptr));
}
private:
DataType data_type_ = DataType::UNKNOWN;
std::array<size_t, 3> region_;
};
class CpuCopier : public OpenClConverterImpl {
public:
explicit CpuCopier(bool asynchronous = false) : async_(asynchronous) {}
static bool IsSupported(const ObjectDef& input, const ObjectDef& output) {
return input.data_type == output.data_type &&
input.data_layout == output.data_layout &&
((input.object_type == ObjectType::CPU_MEMORY &&
IsOpenClTextureOrBuffer(output.object_type)) ||
(output.object_type == ObjectType::CPU_MEMORY &&
IsOpenClTextureOrBuffer(input.object_type)));
}
absl::Status Init(const TensorObjectDef& input_def,
const TensorObjectDef& output_def,
Environment* environment) final {
region_ = CalculateTextureRegion(
input_def.object_def.object_type == ObjectType::CPU_MEMORY ? output_def
: input_def);
input_data_type_ = input_def.object_def.data_type;
output_data_type_ = output_def.object_def.data_type;
queue_ = environment->queue();
return absl::OkStatus();
}
absl::Status Convert(const TensorObject& input_obj,
const TensorObject& output_obj) override {
auto cpu_input = std::get_if<CpuMemory>(&input_obj);
auto cpu_output = std::get_if<CpuMemory>(&output_obj);
if (cpu_input) {
if (output_data_type_ == DataType::BOOL) {
return CopyFromBoolCpu(cpu_input, output_obj);
}
auto texture_output = std::get_if<OpenClTexture>(&output_obj);
if (texture_output) {
return queue_->EnqueueWriteImage(
texture_output->memobj, int3(region_[0], region_[1], region_[2]),
cpu_input->data, async_);
}
auto buffer_output = std::get_if<OpenClBuffer>(&output_obj);
if (buffer_output) {
return queue_->EnqueueWriteBuffer(buffer_output->memobj,
cpu_input->size_bytes,
cpu_input->data, async_);
}
} else if (cpu_output) {
if (input_data_type_ == DataType::BOOL) {
return CopyToBoolCpu(input_obj, cpu_output);
}
auto texture_input = std::get_if<OpenClTexture>(&input_obj);
if (texture_input) {
return queue_->EnqueueReadImage(
texture_input->memobj, int3(region_[0], region_[1], region_[2]),
cpu_output->data, async_);
}
auto buffer_input = std::get_if<OpenClBuffer>(&input_obj);
if (buffer_input) {
return queue_->EnqueueReadBuffer(buffer_input->memobj,
cpu_output->size_bytes,
cpu_output->data, async_);
}
}
return absl::InternalError("Unexpected object");
}
private:
absl::Status CopyToBoolCpu(const TensorObject& tensor_obj,
const CpuMemory* cpu_memory) {
const size_t num_elements = cpu_memory->size_bytes;
std::vector<uint8_t> tmp_data(num_elements);
auto texture_input = std::get_if<OpenClTexture>(&tensor_obj);
if (texture_input) {
RETURN_IF_ERROR(queue_->EnqueueReadImage(
texture_input->memobj, int3(region_[0], region_[1], region_[2]),
tmp_data.data(), false));
} else {
auto buffer_input = std::get_if<OpenClBuffer>(&tensor_obj);
if (!buffer_input) {
return absl::InternalError("Unexpected object");
}
RETURN_IF_ERROR(queue_->EnqueueReadBuffer(
buffer_input->memobj, tmp_data.size(), tmp_data.data(), false));
}
bool* output_data = reinterpret_cast<bool*>(cpu_memory->data);
for (int i = 0; i < num_elements; ++i) {
output_data[i] = tmp_data[i];
}
return absl::OkStatus();
}
absl::Status CopyFromBoolCpu(const CpuMemory* cpu_memory,
const TensorObject& tensor_obj) {
const size_t num_elements = cpu_memory->size_bytes;
const bool* bool_data = reinterpret_cast<bool*>(cpu_memory->data);
tmp_bool_data_ = std::make_unique<std::vector<uint8_t>>();
tmp_bool_data_->reserve(num_elements);
for (int i = 0; i < num_elements; ++i) {
tmp_bool_data_->push_back(bool_data[i]);
}
auto texture_output = std::get_if<OpenClTexture>(&tensor_obj);
if (texture_output) {
return queue_->EnqueueWriteImage(texture_output->memobj,
int3(region_[0], region_[1], region_[2]),
tmp_bool_data_->data(), async_);
}
auto buffer_output = std::get_if<OpenClBuffer>(&tensor_obj);
if (buffer_output) {
return queue_->EnqueueWriteBuffer(buffer_output->memobj,
tmp_bool_data_->size(),
tmp_bool_data_->data(), async_);
}
return absl::InternalError("Unexpected object");
}
std::array<size_t, 3> region_;
bool async_;
DataType input_data_type_;
DataType output_data_type_;
std::unique_ptr<std::vector<uint8_t>> tmp_bool_data_;
};
class OpenClTensorConverterBuilder : public TensorObjectConverterBuilder {
public:
explicit OpenClTensorConverterBuilder(Environment* environment)
: environment_(environment) {}
bool IsSupported(const TensorObjectDef& input,
const TensorObjectDef& output) const final {
const auto& input_def = input.object_def;
const auto& output_def = output.object_def;
return input.dimensions == output.dimensions &&
(TrivialCopier::IsSupported(input_def, output_def) ||
TensorToTensorConverter::IsSupported(input_def, output_def) ||
CpuCopier::IsSupported(input_def, output_def) ||
TensorToBHWCBufferConverter::IsSupported(input_def, output_def) ||
BHWCBufferToTensorConverter::IsSupported(input_def, output_def));
}
absl::Status MakeConverter(
const TensorObjectDef& input, const TensorObjectDef& output,
std::unique_ptr<TensorObjectConverter>* converter) final {
std::unique_ptr<OpenClConverterImpl> impl;
const auto& input_def = input.object_def;
const auto& output_def = output.object_def;
if (TrivialCopier::IsSupported(input_def, output_def)) {
impl = std::make_unique<TrivialCopier>();
} else if (TensorToTensorConverter::IsSupported(input_def, output_def)) {
impl = std::make_unique<TensorToTensorConverter>();
} else if (CpuCopier::IsSupported(input_def, output_def)) {
impl = std::make_unique<CpuCopier>( true);
} else if (TensorToBHWCBufferConverter::IsSupported(input_def,
output_def)) {
impl = std::make_unique<TensorToBHWCBufferConverter>();
} else if (BHWCBufferToTensorConverter::IsSupported(input_def,
output_def)) {
impl = std::make_unique<BHWCBufferToTensorConverter>();
} else {
return absl::UnimplementedError("Unsupported conversion");
}
RETURN_IF_ERROR(impl->Init(input, output, environment_));
impl->SetGpuInfo(environment_->GetDevicePtr()->GetInfo());
*converter = std::move(impl);
return absl::OkStatus();
}
Environment* environment_;
};
}
std::unique_ptr<TensorObjectConverterBuilder> NewConverterBuilder(
Environment* environment) {
return std::make_unique<OpenClTensorConverterBuilder>(environment);
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/converter.h"
#include <algorithm>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/gl/egl_environment.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h"
#include "tensorflow/lite/delegates/gpu/gl/portable_gl31.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
inline std::vector<float> GenerateFloats(float multiplier, int size) {
std::vector<float> v(size);
for (int i = 0; i < size; ++i) {
v[i] = multiplier * i * (i % 2 == 0 ? -1 : 1);
}
return v;
}
Dimensions ToDimensions(const BHWC& shape) {
return Dimensions(shape.b, shape.h, shape.w, shape.c);
}
absl::Status RunFromTensorTest(const BHWC& shape) {
std::vector<float> input =
GenerateFloats(0.01, GetElementsSizeForPHWC4(shape));
std::vector<float> output(shape.DimensionsProduct(), 0);
RETURN_IF_ERROR(
ConvertFromPHWC4(absl::MakeConstSpan(input.data(), input.size()), shape,
absl::MakeSpan(output.data(), output.size())));
std::unique_ptr<EglEnvironment> env;
RETURN_IF_ERROR(EglEnvironment::NewEglEnvironment(&env));
GlBuffer input_buffer;
RETURN_IF_ERROR(CreateReadOnlyShaderStorageBuffer(
absl::MakeConstSpan(input.data(), input.size()), &input_buffer));
GlBuffer output_buffer;
RETURN_IF_ERROR(CreateReadWriteShaderStorageBuffer<float>(
shape.DimensionsProduct(), &output_buffer));
auto builder = NewConverterBuilder(nullptr);
TensorObjectDef input_def;
input_def.object_def.data_type = DataType::FLOAT32;
input_def.object_def.data_layout = DataLayout::DHWC4;
input_def.object_def.object_type = ObjectType::OPENGL_SSBO;
input_def.dimensions = ToDimensions(shape);
TensorObjectDef output_def = input_def;
output_def.object_def.data_layout = DataLayout::BHWC;
std::unique_ptr<TensorObjectConverter> converter;
RETURN_IF_ERROR(builder->MakeConverter(input_def, output_def, &converter));
RETURN_IF_ERROR(converter->Convert(OpenGlBuffer{input_buffer.id()},
OpenGlBuffer{output_buffer.id()}));
std::vector<float> converted_output(output.size(), 0);
RETURN_IF_ERROR(output_buffer.Read(
absl::MakeSpan(converted_output.data(), converted_output.size())));
if (output != converted_output) {
return absl::InternalError("Outputs don't match");
}
return absl::OkStatus();
}
TEST(FromTensor, Smoke) {
for (int32_t h : {1, 2, 3, 7, 20}) {
for (int32_t w : {1, 2, 4, 5, 11}) {
for (int32_t c : {1, 2, 4, 5, 8, 9}) {
BHWC shape(1, h, w, c);
auto status = RunFromTensorTest(shape);
EXPECT_TRUE(status.ok()) << status << ", shape = " << shape.h << " "
<< shape.w << " " << shape.c;
}
}
}
}
absl::Status RunToTensorTest(const BHWC& shape) {
std::vector<float> input = GenerateFloats(0.01, shape.DimensionsProduct());
std::vector<float> output(GetElementsSizeForPHWC4(shape), 0);
RETURN_IF_ERROR(
ConvertToPHWC4(absl::MakeConstSpan(input.data(), input.size()), shape,
absl::MakeSpan(output.data(), output.size())));
std::unique_ptr<EglEnvironment> env;
RETURN_IF_ERROR(EglEnvironment::NewEglEnvironment(&env));
GlBuffer input_buffer;
RETURN_IF_ERROR(CreateReadOnlyShaderStorageBuffer(
absl::MakeConstSpan(input.data(), input.size()), &input_buffer));
GlBuffer output_buffer;
RETURN_IF_ERROR(CreateReadWriteShaderStorageBuffer<float>(
GetElementsSizeForPHWC4(shape), &output_buffer));
auto builder = NewConverterBuilder(nullptr);
TensorObjectDef input_def;
input_def.object_def.data_type = DataType::FLOAT32;
input_def.object_def.data_layout = DataLayout::BHWC;
input_def.object_def.object_type = ObjectType::OPENGL_SSBO;
input_def.dimensions = ToDimensions(shape);
TensorObjectDef output_def = input_def;
output_def.object_def.data_layout = DataLayout::DHWC4;
std::unique_ptr<TensorObjectConverter> converter;
RETURN_IF_ERROR(builder->MakeConverter(input_def, output_def, &converter));
RETURN_IF_ERROR(converter->Convert(OpenGlBuffer{input_buffer.id()},
OpenGlBuffer{output_buffer.id()}));
std::vector<float> converted_output(output.size(), 0);
RETURN_IF_ERROR(output_buffer.Read(
absl::MakeSpan(converted_output.data(), converted_output.size())));
if (output != converted_output) {
return absl::InternalError("Outputs don't match");
}
return absl::OkStatus();
}
TEST(ToTensor, Smoke) {
for (int32_t h : {1, 2, 3, 7, 20}) {
for (int32_t w : {1, 2, 4, 5, 11}) {
for (int32_t c : {1, 2, 4, 5, 8, 9}) {
BHWC shape(1, h, w, c);
auto status = RunToTensorTest(shape);
EXPECT_TRUE(status.ok()) << status << ", shape = " << shape.h << " "
<< shape.w << " " << shape.c;
}
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/converter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/converter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1b92c6bd-6885-430a-9126-bdb0a1b79412 | cpp | tensorflow/tensorflow | cancellation | third_party/xla/xla/tsl/framework/cancellation.cc | third_party/xla/xla/tsl/framework/cancellation_test.cc | #include "xla/tsl/framework/cancellation.h"
#include <forward_list>
#include "absl/memory/memory.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace tsl {
const CancellationToken CancellationManager::kInvalidToken = -1;
CancellationManager::CancellationManager()
: is_cancelling_(false),
is_cancelled_(false),
next_cancellation_token_(0) {}
CancellationManager::CancellationManager(CancellationManager* parent)
: is_cancelling_(false), next_cancellation_token_(0), parent_(parent) {
is_cancelled_ = parent->RegisterChild(this);
}
void CancellationManager::StartCancel() {
StartCancelWithStatus(absl::OkStatus());
}
void CancellationManager::StartCancelWithStatus(const absl::Status& status) {
gtl::FlatMap<CancellationToken, CallbackConfiguration> callbacks_to_run;
std::forward_list<CancellationManager*> children_to_cancel;
Notification* cancelled_notification = nullptr;
{
mutex_lock l(mu_);
if (is_cancelled_.load(std::memory_order_relaxed) || is_cancelling_) {
return;
}
is_cancelling_ = true;
if (state_) {
std::swap(state_->callbacks, callbacks_to_run);
CancellationManager* child = state_->first_child;
while (child != nullptr) {
children_to_cancel.push_front(child);
child->is_removed_from_parent_ = true;
child = child->next_sibling_;
}
state_->first_child = nullptr;
cancelled_notification = &state_->cancelled_notification;
}
}
for (auto key_and_value : callbacks_to_run) {
CallbackConfiguration& config = key_and_value.second;
if (!status.ok() && config.log_error) {
LOG(WARNING) << "Cancellation callback \"" << config.name
<< "\" is triggered due to a "
<< (StatusGroup::IsDerived(status) ? "derived" : "root")
<< " error: " << status.ToString();
}
config.callback();
}
for (CancellationManager* child : children_to_cancel) {
child->StartCancelWithStatus(status);
}
{
mutex_lock l(mu_);
is_cancelling_ = false;
is_cancelled_.store(true, std::memory_order_release);
}
if (cancelled_notification) {
cancelled_notification->Notify();
}
}
bool CancellationManager::RegisterCallback(CancellationToken token,
CancelCallback callback) {
return RegisterCallbackConfig(
token, CallbackConfiguration{callback, "", false});
}
bool CancellationManager::RegisterCallbackWithErrorLogging(
CancellationToken token, CancelCallback callback,
absl::string_view callback_name) {
return RegisterCallbackConfig(
token, CallbackConfiguration{callback, std::string(callback_name), true});
}
bool CancellationManager::RegisterCallbackConfig(CancellationToken token,
CallbackConfiguration config) {
DCHECK_LT(token, next_cancellation_token_) << "Invalid cancellation token";
mutex_lock l(mu_);
bool should_register = !is_cancelled_ && !is_cancelling_;
if (should_register) {
if (!state_) {
state_ = absl::make_unique<State>();
}
std::swap(state_->callbacks[token], config);
}
return should_register;
}
bool CancellationManager::DeregisterCallback(CancellationToken token) {
mu_.lock();
if (is_cancelled_) {
mu_.unlock();
return false;
} else if (is_cancelling_) {
Notification* cancelled_notification =
state_ ? &state_->cancelled_notification : nullptr;
mu_.unlock();
if (cancelled_notification) {
cancelled_notification->WaitForNotification();
}
return false;
} else {
if (state_) {
state_->callbacks.erase(token);
}
mu_.unlock();
return true;
}
}
bool CancellationManager::RegisterChild(CancellationManager* child) {
mutex_lock l(mu_);
if (is_cancelled_.load(std::memory_order_relaxed) || is_cancelling_) {
child->is_removed_from_parent_ = true;
return true;
}
if (!state_) {
state_ = absl::make_unique<State>();
}
CancellationManager* current_head = state_->first_child;
state_->first_child = child;
child->prev_sibling_ = nullptr;
child->next_sibling_ = current_head;
if (current_head) {
current_head->prev_sibling_ = child;
}
return false;
}
void CancellationManager::DeregisterChild(CancellationManager* child) {
DCHECK_EQ(child->parent_, this);
Notification* cancelled_notification = nullptr;
{
mutex_lock l(mu_);
if (!child->is_removed_from_parent_) {
DCHECK(state_);
if (child->prev_sibling_ == nullptr) {
DCHECK_EQ(state_->first_child, child);
state_->first_child = child->next_sibling_;
} else {
child->prev_sibling_->next_sibling_ = child->next_sibling_;
}
if (child->next_sibling_ != nullptr) {
child->next_sibling_->prev_sibling_ = child->prev_sibling_;
}
child->is_removed_from_parent_ = true;
}
if (is_cancelling_) {
cancelled_notification = &state_->cancelled_notification;
}
}
if (cancelled_notification) {
cancelled_notification->WaitForNotification();
}
}
bool CancellationManager::TryDeregisterCallback(CancellationToken token) {
mutex_lock lock(mu_);
if (is_cancelled_ || is_cancelling_) {
return false;
} else {
if (state_) {
state_->callbacks.erase(token);
}
return true;
}
}
CancellationManager::~CancellationManager() {
if (parent_) {
parent_->DeregisterChild(this);
}
if (state_) {
StartCancel();
}
}
bool CancellationManager::IsCancelling() {
mutex_lock lock(mu_);
return is_cancelling_;
}
absl::Status RegisterCancellationCallback(
CancellationManager* cancellation_manager, CancelCallback callback,
std::function<void()>* deregister_fn) {
if (cancellation_manager) {
CancellationToken token = cancellation_manager->get_cancellation_token();
if (!cancellation_manager->RegisterCallback(token, std::move(callback))) {
return errors::Cancelled("Operation was cancelled");
}
*deregister_fn = [cancellation_manager, token]() {
cancellation_manager->DeregisterCallback(token);
};
} else {
VLOG(1) << "Cancellation manager is not set. Cancellation callback will "
"not be registered.";
*deregister_fn = []() {};
}
return absl::OkStatus();
}
} | #include "xla/tsl/framework/cancellation.h"
#include <algorithm>
#include <memory>
#include <numeric>
#include <random>
#include <vector>
#include "tsl/platform/notification.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl {
TEST(Cancellation, SimpleNoCancel) {
bool is_cancelled = false;
CancellationManager* manager = new CancellationManager();
auto token = manager->get_cancellation_token();
bool registered = manager->RegisterCallback(
token, [&is_cancelled]() { is_cancelled = true; });
EXPECT_TRUE(registered);
bool deregistered = manager->DeregisterCallback(token);
EXPECT_TRUE(deregistered);
delete manager;
EXPECT_FALSE(is_cancelled);
}
TEST(Cancellation, SimpleCancel) {
bool is_cancelled = false;
CancellationManager* manager = new CancellationManager();
auto token = manager->get_cancellation_token();
bool registered = manager->RegisterCallback(
token, [&is_cancelled]() { is_cancelled = true; });
EXPECT_TRUE(registered);
manager->StartCancel();
EXPECT_TRUE(is_cancelled);
delete manager;
}
TEST(Cancellation, StartCancelTriggersAllCallbacks) {
bool is_cancelled_1 = false;
bool is_cancelled_2 = false;
auto manager = std::make_unique<CancellationManager>();
auto token_1 = manager->get_cancellation_token();
EXPECT_TRUE(manager->RegisterCallbackWithErrorLogging(
token_1, [&is_cancelled_1]() { is_cancelled_1 = true; }, "TestCallback"));
auto token_2 = manager->get_cancellation_token();
EXPECT_TRUE(manager->RegisterCallback(
token_2, [&is_cancelled_2]() { is_cancelled_2 = true; }));
manager->StartCancel();
EXPECT_TRUE(is_cancelled_1);
EXPECT_TRUE(is_cancelled_2);
}
TEST(Cancellation, StartCancelWithStatusTriggersAllCallbacks) {
bool is_cancelled_1 = false;
bool is_cancelled_2 = false;
auto manager = std::make_unique<CancellationManager>();
auto token_1 = manager->get_cancellation_token();
EXPECT_TRUE(manager->RegisterCallbackWithErrorLogging(
token_1, [&is_cancelled_1]() { is_cancelled_1 = true; }, "TestCallback"));
auto token_2 = manager->get_cancellation_token();
EXPECT_TRUE(manager->RegisterCallback(
token_2, [&is_cancelled_2]() { is_cancelled_2 = true; }));
manager->StartCancelWithStatus(absl::OkStatus());
EXPECT_TRUE(is_cancelled_1);
EXPECT_TRUE(is_cancelled_2);
}
TEST(Cancellation, CancelBeforeRegister) {
auto manager = std::make_unique<CancellationManager>();
auto token = manager->get_cancellation_token();
manager->StartCancel();
bool registered = manager->RegisterCallback(token, nullptr);
EXPECT_FALSE(registered);
}
TEST(Cancellation, DeregisterAfterCancel) {
bool is_cancelled = false;
auto manager = std::make_unique<CancellationManager>();
auto token = manager->get_cancellation_token();
bool registered = manager->RegisterCallback(
token, [&is_cancelled]() { is_cancelled = true; });
EXPECT_TRUE(registered);
manager->StartCancel();
EXPECT_TRUE(is_cancelled);
bool deregistered = manager->DeregisterCallback(token);
EXPECT_FALSE(deregistered);
}
TEST(Cancellation, CancelMultiple) {
bool is_cancelled_1 = false, is_cancelled_2 = false, is_cancelled_3 = false;
auto manager = std::make_unique<CancellationManager>();
auto token_1 = manager->get_cancellation_token();
bool registered_1 = manager->RegisterCallback(
token_1, [&is_cancelled_1]() { is_cancelled_1 = true; });
EXPECT_TRUE(registered_1);
auto token_2 = manager->get_cancellation_token();
bool registered_2 = manager->RegisterCallback(
token_2, [&is_cancelled_2]() { is_cancelled_2 = true; });
EXPECT_TRUE(registered_2);
EXPECT_FALSE(is_cancelled_1);
EXPECT_FALSE(is_cancelled_2);
manager->StartCancel();
EXPECT_TRUE(is_cancelled_1);
EXPECT_TRUE(is_cancelled_2);
EXPECT_FALSE(is_cancelled_3);
auto token_3 = manager->get_cancellation_token();
bool registered_3 = manager->RegisterCallback(
token_3, [&is_cancelled_3]() { is_cancelled_3 = true; });
EXPECT_FALSE(registered_3);
EXPECT_FALSE(is_cancelled_3);
}
TEST(Cancellation, IsCancelled) {
auto cm = std::make_unique<CancellationManager>();
thread::ThreadPool w(Env::Default(), "test", 4);
std::vector<Notification> done(8);
for (size_t i = 0; i < done.size(); ++i) {
Notification* n = &done[i];
w.Schedule([n, &cm]() {
while (!cm->IsCancelled()) {
}
ASSERT_FALSE(cm->IsCancelling());
n->Notify();
});
}
Env::Default()->SleepForMicroseconds(1000000 );
cm->StartCancel();
for (size_t i = 0; i < done.size(); ++i) {
done[i].WaitForNotification();
}
}
TEST(Cancellation, IsCancelling) {
CancellationManager cm;
Notification started_cancelling;
Notification can_finish_cancel;
Notification cancel_done;
thread::ThreadPool w(Env::Default(), "test", 1);
auto token = cm.get_cancellation_token();
ASSERT_TRUE(
cm.RegisterCallback(token, [&started_cancelling, &can_finish_cancel]() {
started_cancelling.Notify();
can_finish_cancel.WaitForNotification();
}));
w.Schedule([&cm, &cancel_done]() {
cm.StartCancel();
cancel_done.Notify();
});
started_cancelling.WaitForNotification();
ASSERT_TRUE(cm.IsCancelling());
can_finish_cancel.Notify();
cancel_done.WaitForNotification();
ASSERT_FALSE(cm.IsCancelling());
ASSERT_TRUE(cm.IsCancelled());
}
TEST(Cancellation, TryDeregisterWithoutCancel) {
bool is_cancelled = false;
auto manager = std::make_unique<CancellationManager>();
auto token = manager->get_cancellation_token();
bool registered = manager->RegisterCallback(
token, [&is_cancelled]() { is_cancelled = true; });
EXPECT_TRUE(registered);
bool deregistered = manager->TryDeregisterCallback(token);
EXPECT_TRUE(deregistered);
EXPECT_FALSE(is_cancelled);
}
TEST(Cancellation, TryDeregisterAfterCancel) {
bool is_cancelled = false;
auto manager = std::make_unique<CancellationManager>();
auto token = manager->get_cancellation_token();
bool registered = manager->RegisterCallback(
token, [&is_cancelled]() { is_cancelled = true; });
EXPECT_TRUE(registered);
manager->StartCancel();
EXPECT_TRUE(is_cancelled);
bool deregistered = manager->TryDeregisterCallback(token);
EXPECT_FALSE(deregistered);
}
TEST(Cancellation, TryDeregisterDuringCancel) {
Notification cancel_started, finish_callback, cancel_complete;
auto manager = std::make_unique<CancellationManager>();
auto token = manager->get_cancellation_token();
bool registered = manager->RegisterCallback(token, [&]() {
cancel_started.Notify();
finish_callback.WaitForNotification();
});
EXPECT_TRUE(registered);
thread::ThreadPool w(Env::Default(), "test", 1);
w.Schedule([&]() {
manager->StartCancel();
cancel_complete.Notify();
});
cancel_started.WaitForNotification();
bool deregistered = manager->TryDeregisterCallback(token);
EXPECT_FALSE(deregistered);
finish_callback.Notify();
cancel_complete.WaitForNotification();
}
TEST(Cancellation, Parent_CancelManyChildren) {
CancellationManager parent;
std::vector<std::unique_ptr<CancellationManager>> children;
for (size_t i = 0; i < 5; ++i) {
children.push_back(absl::make_unique<CancellationManager>(&parent));
EXPECT_FALSE(children.back()->IsCancelled());
}
parent.StartCancel();
for (auto& child : children) {
EXPECT_TRUE(child->IsCancelled());
}
}
TEST(Cancellation, Parent_NotCancelled) {
CancellationManager parent;
{
CancellationManager child(&parent);
child.StartCancel();
EXPECT_TRUE(child.IsCancelled());
}
EXPECT_FALSE(parent.IsCancelled());
}
TEST(Cancellation, Parent_AlreadyCancelled) {
CancellationManager parent;
parent.StartCancel();
EXPECT_TRUE(parent.IsCancelled());
CancellationManager child(&parent);
EXPECT_TRUE(child.IsCancelled());
}
TEST(Cancellation, Parent_RandomDestructionOrder) {
CancellationManager parent;
std::random_device rd;
std::mt19937 g(rd());
for (int rounds = 0; rounds < 100; ++rounds) {
std::vector<std::unique_ptr<CancellationManager>> children;
std::uniform_int_distribution<int> dist(1, 9);
const size_t round_size = dist(rd);
for (size_t i = 0; i < round_size; ++i) {
children.push_back(absl::make_unique<CancellationManager>(&parent));
EXPECT_FALSE(children.back()->IsCancelled());
}
std::vector<size_t> destruction_order(round_size);
std::iota(destruction_order.begin(), destruction_order.end(), 0);
std::shuffle(destruction_order.begin(), destruction_order.end(), g);
for (size_t index : destruction_order) {
children[index].reset();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/framework/cancellation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/framework/cancellation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c83ba0db-1096-4396-a51b-56cd13874e1c | cpp | tensorflow/tensorflow | attr_builder | tensorflow/core/common_runtime/eager/attr_builder.cc | tensorflow/core/common_runtime/eager/attr_builder_test.cc | #include "tensorflow/core/common_runtime/eager/attr_builder.h"
#include <memory>
#include "absl/status/status.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/tensor_slice_reader_cache.h"
namespace tensorflow {
namespace {
mutex g_op_name_to_attr_type_map_lock(LINKER_INITIALIZED);
tensorflow::gtl::FlatMap<string, const AttrTypeMap*>* OpNameToAttrTypeMap() {
static auto* const m =
new tensorflow::gtl::FlatMap<string, const AttrTypeMap*>;
return m;
}
const uint32 kIsList = 1U << 31;
AttrTypeMap* DefaultFunctionAttrTypeMap() {
AttrTypeMap* map = new AttrTypeMap();
(*map)["executor_type"] = TF_ATTR_STRING;
(*map)["config_proto"] = TF_ATTR_STRING;
return map;
}
const AttrTypeMap* GetDefaultFunctionAttrTypeMap() {
static const AttrTypeMap* map = DefaultFunctionAttrTypeMap();
return map;
}
}
Status OpDefForOp(const string& op_name, const OpDef** op_def) {
const OpRegistrationData* op_reg_data = nullptr;
Status s = OpRegistry::Global()->LookUp(op_name, &op_reg_data);
if (s.ok()) {
*op_def = &op_reg_data->op_def;
}
return s;
}
Status AttrTypeMapForOp(const char* op_name, const AttrTypeMap** out,
bool* is_function) {
{
tf_shared_lock l(g_op_name_to_attr_type_map_lock);
*is_function = false;
*out = gtl::FindPtrOrNull(*OpNameToAttrTypeMap(), op_name);
if (*out != nullptr) return absl::OkStatus();
}
mutex_lock l(g_op_name_to_attr_type_map_lock);
*out = gtl::FindPtrOrNull(*OpNameToAttrTypeMap(), op_name);
if (*out != nullptr) return absl::OkStatus();
const OpDef* op_def = nullptr;
Status s = OpDefForOp(op_name, &op_def);
if (absl::IsNotFound(s)) {
*out = GetDefaultFunctionAttrTypeMap();
*is_function = true;
return absl::OkStatus();
} else if (!s.ok()) {
return s;
}
std::unique_ptr<AttrTypeMap> m(new AttrTypeMap);
for (const auto& attr : op_def->attr()) {
string type = attr.type();
const bool is_list = (type.length() > 6 && type.compare(0, 4, "list") == 0);
if (is_list) {
type = type.substr(5, type.length() - 6);
}
uint32 t = is_list ? kIsList : 0;
if (type == "string") {
t |= TF_ATTR_STRING;
} else if (type == "int") {
t |= TF_ATTR_INT;
} else if (type == "float") {
t |= TF_ATTR_FLOAT;
} else if (type == "bool") {
t |= TF_ATTR_BOOL;
} else if (type == "type") {
t |= TF_ATTR_TYPE;
} else if (type == "shape") {
t |= TF_ATTR_SHAPE;
} else if (type == "tensor") {
t |= TF_ATTR_TENSOR;
} else if (type == "func") {
t |= TF_ATTR_FUNC;
} else {
return errors::Unimplemented(
"TODO(agarwal): Enable support for ops with attributes of type '",
type, "'");
}
gtl::InsertIfNotPresent(m.get(), attr.name(), t);
}
*out = m.get();
auto r = OpNameToAttrTypeMap()->emplace(op_name, m.release());
DCHECK(r.second) << "AttrTypeMap already exists for " << op_name;
return absl::OkStatus();
}
#define DEFINE_GET_ATTR(TYPE, FIELD, ATTR_TYPE) \
template <> \
Status AttrBuilder::Get(StringPiece attr_name, TYPE* value) const { \
auto it = encoded_attrs_.find(string(attr_name)); \
if (it == encoded_attrs_.end()) { \
return errors::NotFound("No attr named '", attr_name, \
"' found in AttrBuilder for ", op_name_); \
} \
attr_tmp_.ParseFromString(it->second); \
TF_RETURN_IF_ERROR(AttrValueHasType(attr_tmp_, ATTR_TYPE)); \
*value = attr_tmp_.FIELD(); \
return OkStatus(); \
}
DEFINE_GET_ATTR(float, f, "float");
DEFINE_GET_ATTR(int, i, "int");
DEFINE_GET_ATTR(int64_t, i, "int");
DEFINE_GET_ATTR(bool, b, "bool");
DEFINE_GET_ATTR(tensorflow::DataType, type, "type");
#undef DEFINE_GET_ATTR
template <>
Status AttrBuilder::Get(StringPiece attr_name,
absl::InlinedVector<DataType, 4>* value) const {
auto it = encoded_attrs_.find(string(attr_name));
if (it == encoded_attrs_.end()) {
return errors::NotFound("No attr named '", attr_name,
"' found in AttrBuilder for ", op_name_);
}
attr_tmp_.ParseFromString(it->second);
TF_RETURN_IF_ERROR(AttrValueHasType(attr_tmp_, "list(type)"));
for (size_t i = 0; i < attr_tmp_.list().type_size(); i++) {
value->push_back(attr_tmp_.list().type(i));
}
return absl::OkStatus();
}
AttrBuilder& AttrBuilder::NumInputs(int n) {
num_inputs_ = n;
node_def_finalized_ = false;
return *this;
}
void AttrBuilder::FillAttrValueMap(AttrValueMap* m) const {
for (auto& entry : encoded_attrs_) {
attr_tmp_.ParseFromString(entry.second);
m->insert(AttrValueMap::value_type(entry.first, attr_tmp_));
}
const OpDef* op_def = nullptr;
Status s = OpDefForOp(op_name().c_str(), &op_def);
if (!s.ok()) return;
DCHECK(op_def);
for (const auto& attr_def : op_def->attr()) {
if (attr_def.has_default_value() && !m->count(attr_def.name())) {
SetInAttrValueMap(m, attr_def.name(), attr_def.default_value());
}
}
}
namespace {
bool ValueMatchesDefault(const OpDef* op_def, const string& attr_name,
const AttrValue& attr_value) {
for (const OpDef::AttrDef& attr_def : op_def->attr()) {
if (attr_def.name() == attr_name && attr_def.has_default_value() &&
AreAttrValuesEqual(attr_def.default_value(), attr_value)) {
return true;
}
}
return false;
}
}
void AttrBuilder::FillAttrValueMapWithoutDefaults(AttrValueMap* m) const {
const OpDef* op_def = nullptr;
Status s = OpDefForOp(op_name().c_str(), &op_def);
for (auto& entry : encoded_attrs_) {
attr_tmp_.ParseFromString(entry.second);
if (!s.ok() || !ValueMatchesDefault(op_def, entry.first, attr_tmp_)) {
m->insert(AttrValueMap::value_type(entry.first, attr_tmp_));
}
}
}
void AttrBuilder::AddAttrIfNotPresent(StringPiece attr_name,
const AttrValue& value) {
encoded_attrs_.emplace(string(attr_name), value.SerializeAsString());
}
const NodeDef& AttrBuilder::BuildNodeDef() {
if (node_def_finalized_) return node_def_;
node_def_.Clear();
node_def_.set_name(op_name_);
node_def_.set_op(op_name_);
for (int i = 0; i < num_inputs_; ++i) {
node_def_.add_input("dummy_input");
}
FillAttrValueMap(node_def_.mutable_attr());
node_def_finalized_ = true;
return node_def_;
}
void AttrBuilder::CopyAttributes(const AttrBuilder& other) {
encoded_attrs_.insert(other.encoded_attrs_.begin(),
other.encoded_attrs_.end());
}
Status AttrTypeByName(const AttrTypeMap& m, const string& attr_name,
TF_AttrType* out, unsigned char* is_list) {
auto* t = gtl::FindOrNull(m, attr_name);
if (t == nullptr) {
return errors::InvalidArgument("Attribute '", attr_name,
"' does not exist for this operation");
}
*out = static_cast<TF_AttrType>(*t & ~kIsList);
if (*t & kIsList) {
*is_list = 1;
} else {
*is_list = 0;
}
return absl::OkStatus();
}
namespace {
void CombineUnordered(const tensorflow::Fprint128& a,
tensorflow::Fprint128* b) {
b->low64 += a.low64;
b->high64 += a.high64;
}
inline tensorflow::Fprint128 CacheKeyHelper(StringPiece s,
const tensorflow::Fprint128& b) {
tensorflow::Fprint128 a = tensorflow::Fingerprint128(s);
return FingerprintCat128(a, b);
}
inline tensorflow::Fprint128 CacheKeyHelper(StringPiece s, uint64 b) {
return CacheKeyHelper(s, {b, b});
}
}
tensorflow::Fprint128 AttrBuilder::CacheKey(const StringPiece device) {
if (!cached_cache_key_ || device != device_for_cached_cache_key_) {
cached_cache_key_ = BuildCacheKeyForDevice(device);
device_for_cached_cache_key_ = string(device);
}
return *cached_cache_key_;
}
tensorflow::Fprint128 AttrBuilder::BuildCacheKeyForDevice(
const StringPiece device) const {
tensorflow::Fprint128 f = tensorflow::Fingerprint128(op_name());
f = tsl::FingerprintCat128(f, tensorflow::Fingerprint128(device));
for (const auto& p : encoded_attrs_) {
CombineUnordered(
CacheKeyHelper(p.first, tensorflow::Fingerprint128(p.second)), &f);
}
return f;
}
void AttrBuilder::GetNameAttrList(
tensorflow::NameAttrList* name_and_attrs) const {
FillAttrValueMap(name_and_attrs->mutable_attr());
name_and_attrs->set_name(op_name());
}
Status AttrBuilder::GetTypeList(
absl::string_view attr_name,
absl::InlinedVector<DataType, 4>* type_list) const {
return Get(attr_name, type_list);
}
bool AttrBuilder::GetInt(absl::string_view attr_name, int64_t* result) const {
Status s = Get(attr_name, result);
return s.ok();
}
bool AttrBuilder::GetFloat(absl::string_view attr_name, float* result) const {
Status s = Get(attr_name, result);
return s.ok();
}
bool AttrBuilder::GetBool(absl::string_view attr_name, bool* result) const {
Status s = Get(attr_name, result);
return s.ok();
}
bool AttrBuilder::GetType(absl::string_view attr_name,
tensorflow::DataType* result) const {
Status s = Get(attr_name, result);
return s.ok();
}
} | #include "tensorflow/core/common_runtime/eager/attr_builder.h"
#include <memory>
#include <vector>
#include "tensorflow/c/c_api.h"
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
TEST(AttrTypeMap, Lookup) {
const AttrTypeMap* m = nullptr;
bool is_function = false;
Status s = AttrTypeMapForOp("SomeFunctionName", &m, &is_function);
EXPECT_TRUE(s.ok());
EXPECT_TRUE(is_function);
ASSERT_NE(m->end(), m->find("executor_type"));
EXPECT_EQ(TF_ATTR_STRING, m->find("executor_type")->second);
ASSERT_NE(m->end(), m->find("config_proto"));
EXPECT_EQ(TF_ATTR_STRING, m->find("config_proto")->second);
is_function = true;
s = AttrTypeMapForOp("MatMul", &m, &is_function);
EXPECT_FALSE(is_function);
ASSERT_TRUE(s.ok()) << s;
TF_AttrType t;
unsigned char is_list = 1;
s = AttrTypeByName(*m, "ThisAttribyteCannotPossiblyExist", &t, &is_list);
EXPECT_FALSE(s.ok());
EXPECT_NE(is_list, 0);
s = AttrTypeByName(*m, "transpose_a", &t, &is_list);
ASSERT_TRUE(s.ok()) << s;
EXPECT_EQ(TF_ATTR_BOOL, t);
EXPECT_EQ(is_list, 0);
s = AttrTypeMapForOp("Squeeze", &m, &is_function);
ASSERT_TRUE(s.ok()) << s;
s = AttrTypeByName(*m, "squeeze_dims", &t, &is_list);
ASSERT_TRUE(s.ok()) << s;
EXPECT_EQ(TF_ATTR_INT, t);
EXPECT_NE(is_list, 0);
}
TEST(AttrTypeMap, CacheKey) {
AttrBuilder a("op_name");
a.NumInputs(2);
a.Set("T", TF_FLOAT);
tensorflow::Fprint128 cache_key = a.CacheKey("cpu:0");
ASSERT_FALSE(cache_key == a.CacheKey("cpu:1"));
ASSERT_TRUE(cache_key == a.CacheKey("cpu:0"));
a.Set("x", 1.0);
ASSERT_FALSE(cache_key == a.CacheKey("cpu:0"));
}
string ToString(const AttrValueMap& m) {
std::vector<string> strs;
for (const auto& e : m) {
strs.push_back(absl::StrCat(e.first, " -> ", e.second.DebugString()));
}
return absl::StrJoin(strs, "\n");
}
TEST(AttrBuilder, FillAttrValueMapWithoutDefaults_MatMul) {
AttrBuilder a("MatMul");
a.Set("transpose_a", true);
a.Set("transpose_b", false);
AttrValueMap m;
a.FillAttrValueMapWithoutDefaults(&m);
ASSERT_EQ(1, m.size()) << ToString(m);
ASSERT_EQ(true, m["transpose_a"].b()) << ToString(m);
}
TEST(AttrBuilder, FillAttrValueMapWithoutDefaults_UnknownOp) {
AttrBuilder a("SomeUnknownOp");
a.Set("transpose_a", true);
a.Set("transpose_b", false);
AttrValueMap m;
a.FillAttrValueMapWithoutDefaults(&m);
ASSERT_EQ(2, m.size()) << ToString(m);
ASSERT_EQ(true, m["transpose_a"].b()) << ToString(m);
ASSERT_EQ(false, m["transpose_b"].b()) << ToString(m);
}
TEST(AttrBuilder, GetTypeAndNumber) {
AttrBuilder a("Concat");
a.Set("T", DT_FLOAT);
a.Set("N", 2);
DataType type;
ASSERT_TRUE(a.GetType("T", &type));
ASSERT_EQ(DT_FLOAT, type);
int64_t num;
ASSERT_TRUE(a.GetInt("N", &num));
ASSERT_EQ(2, num);
}
TEST(AttrBuilder, GetTypeList) {
AttrBuilder a("IdentityN");
a.Set("T", absl::Span<const DataType>({DT_FLOAT, DT_INT64}));
absl::InlinedVector<DataType, 4> type_list;
Status s = a.GetTypeList("T", &type_list);
ASSERT_TRUE(s.ok()) << s;
ASSERT_EQ(2, type_list.size()) << type_list.size();
ASSERT_EQ(DT_FLOAT, type_list[0]) << type_list[0];
ASSERT_EQ(DT_INT64, type_list[1]) << type_list[1];
}
TEST(AttrBuilder, BuildNodeDef) {
AttrBuilder a("MatMul");
a.Set("transpose_a", true);
a.Set("transpose_b", false);
a.NumInputs(2);
const NodeDef& node_def = a.BuildNodeDef();
auto attrs = node_def.attr();
EXPECT_EQ(node_def.name(), "MatMul");
ASSERT_NE(attrs.find("transpose_a"), attrs.end());
EXPECT_EQ(attrs.find("transpose_a")->second.b(), true);
ASSERT_NE(attrs.find("transpose_b"), attrs.end());
EXPECT_EQ(attrs.find("transpose_b")->second.b(), false);
EXPECT_EQ(node_def.input_size(), 2);
}
TEST(AttrBuilder, BuildNodeDef_Modified) {
AttrBuilder a("MatMul");
a.Set("transpose_a", true);
a.Set("transpose_b", false);
a.Set("grad_x", true);
a.Set("grad_y", false);
a.NumInputs(2);
const NodeDef& node_def = a.BuildNodeDef();
EXPECT_EQ(node_def.attr().size(), 6);
a.Set("new_attr", 15);
a.NumInputs(3);
const NodeDef& node_def2 = a.BuildNodeDef();
auto attrs = node_def2.attr();
EXPECT_EQ(attrs.size(), 7);
ASSERT_NE(attrs.find("transpose_a"), attrs.end());
EXPECT_EQ(attrs.find("transpose_a")->second.b(), true);
ASSERT_NE(attrs.find("transpose_b"), attrs.end());
EXPECT_EQ(attrs.find("transpose_b")->second.b(), false);
ASSERT_NE(attrs.find("grad_x"), attrs.end());
EXPECT_EQ(attrs.find("grad_x")->second.b(), true);
ASSERT_NE(attrs.find("grad_y"), attrs.end());
EXPECT_EQ(attrs.find("grad_y")->second.b(), false);
ASSERT_NE(attrs.find("new_attr"), attrs.end());
EXPECT_EQ(attrs.find("new_attr")->second.i(), 15);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/attr_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/attr_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
33c10622-4577-400a-a33f-12d488fbe8c1 | cpp | google/tensorstore | compressed_tuple | tensorstore/internal/container/compressed_tuple.h | tensorstore/internal/container/compressed_tuple_test.cc | #ifndef TENSORSTORE_INTERNAL_CONTAINER_COMPRESSED_TUPLE_H_
#define TENSORSTORE_INTERNAL_CONTAINER_COMPRESSED_TUPLE_H_
#include <stddef.h>
#include <initializer_list>
#include <tuple>
#include <type_traits>
#include <utility>
#if defined(_MSC_VER) && !defined(__NVCC__)
#define TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases)
#else
#define TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
#endif
namespace tensorstore {
namespace internal_container {
template <typename... Ts>
class CompressedTuple;
namespace internal_compressed_tuple {
template <typename D, size_t I>
struct Elem;
template <typename... B, size_t I>
struct Elem<CompressedTuple<B...>, I>
: std::tuple_element<I, std::tuple<B...>> {};
template <typename D, size_t I>
using ElemT = typename Elem<D, I>::type;
struct uses_inheritance {};
template <typename T>
constexpr bool ShouldUseBase() {
return std::is_class<T>::value && std::is_empty<T>::value &&
!std::is_final<T>::value &&
!std::is_base_of<uses_inheritance, T>::value;
}
template <typename T, size_t I, bool UseBase = ShouldUseBase<T>()>
struct Storage {
T value;
constexpr Storage() = default;
template <typename V>
explicit constexpr Storage(std::in_place_t, V&& v)
: value(std::forward<V>(v)) {}
constexpr const T& get() const& { return value; }
T& get() & { return value; }
constexpr const T&& get() const&& { return std::move(*this).value; }
T&& get() && { return std::move(*this).value; }
};
template <typename T, size_t I>
struct TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<T, I, true> : T {
constexpr Storage() = default;
template <typename V>
explicit constexpr Storage(std::in_place_t, V&& v) : T(std::forward<V>(v)) {}
constexpr const T& get() const& { return *this; }
T& get() & { return *this; }
constexpr const T&& get() const&& { return std::move(*this); }
T&& get() && { return std::move(*this); }
};
template <typename D, typename I, bool ShouldAnyUseBase>
struct TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl;
template <typename... Ts, size_t... I, bool ShouldAnyUseBase>
struct TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
CompressedTuple<Ts...>, std::index_sequence<I...>, ShouldAnyUseBase>
: uses_inheritance,
Storage<Ts, std::integral_constant<size_t, I>::value>... {
constexpr CompressedTupleImpl() = default;
template <typename... Vs>
explicit constexpr CompressedTupleImpl(std::in_place_t, Vs&&... args)
: Storage<Ts, I>(std::in_place, std::forward<Vs>(args))... {}
friend CompressedTuple<Ts...>;
};
template <typename... Ts, size_t... I>
struct TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
CompressedTuple<Ts...>, std::index_sequence<I...>, false>
: Storage<Ts, std::integral_constant<size_t, I>::value, false>... {
constexpr CompressedTupleImpl() = default;
template <typename... Vs>
explicit constexpr CompressedTupleImpl(std::in_place_t, Vs&&... args)
: Storage<Ts, I, false>(std::in_place, std::forward<Vs>(args))... {}
friend CompressedTuple<Ts...>;
};
std::false_type Or(std::initializer_list<std::false_type>);
std::true_type Or(std::initializer_list<bool>);
template <typename... Ts>
constexpr bool ShouldAnyUseBase() {
return decltype(Or(
{std::integral_constant<bool, ShouldUseBase<Ts>()>()...})){};
}
template <typename T, typename V>
using TupleElementMoveConstructible =
typename std::conditional<std::is_reference<T>::value,
std::is_convertible<V, T>,
std::is_constructible<T, V&&>>::type;
template <bool SizeMatches, class T, class... Vs>
struct TupleMoveConstructible : std::false_type {};
template <class... Ts, class... Vs>
struct TupleMoveConstructible<true, CompressedTuple<Ts...>, Vs...>
: std::integral_constant<
bool,
std::conjunction<TupleElementMoveConstructible<Ts, Vs&&>...>::value> {
};
template <typename T>
struct compressed_tuple_size;
template <typename... Es>
struct compressed_tuple_size<CompressedTuple<Es...>>
: public std::integral_constant<size_t, sizeof...(Es)> {};
template <class T, class... Vs>
struct TupleItemsMoveConstructible
: std::integral_constant<
bool, TupleMoveConstructible<compressed_tuple_size<T>::value ==
sizeof...(Vs),
T, Vs...>::value> {};
}
template <typename... Ts>
class TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
: private internal_compressed_tuple::CompressedTupleImpl<
CompressedTuple<Ts...>, std::index_sequence_for<Ts...>,
internal_compressed_tuple::ShouldAnyUseBase<Ts...>()> {
private:
template <int I>
using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>;
template <int I>
using StorageT = internal_compressed_tuple::Storage<ElemT<I>, I>;
public:
#if defined(_MSC_VER)
constexpr CompressedTuple() : CompressedTuple::CompressedTupleImpl() {}
#else
constexpr CompressedTuple() = default;
#endif
explicit constexpr CompressedTuple(const Ts&... base)
: CompressedTuple::CompressedTupleImpl(std::in_place, base...) {}
template <typename First, typename... Vs,
std::enable_if_t<
std::conjunction<
std::negation<std::is_same<void(CompressedTuple),
void(std::decay_t<First>)>>,
internal_compressed_tuple::TupleItemsMoveConstructible<
CompressedTuple<Ts...>, First, Vs...>>::value,
bool> = true>
explicit constexpr CompressedTuple(First&& first, Vs&&... base)
: CompressedTuple::CompressedTupleImpl(std::in_place,
std::forward<First>(first),
std::forward<Vs>(base)...) {}
template <int I>
ElemT<I>& get() & {
return StorageT<I>::get();
}
template <int I>
constexpr const ElemT<I>& get() const& {
return StorageT<I>::get();
}
template <int I>
ElemT<I>&& get() && {
return std::move(*this).StorageT<I>::get();
}
template <int I>
constexpr const ElemT<I>&& get() const&& {
return std::move(*this).StorageT<I>::get();
}
};
template <>
class TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {};
}
}
#undef TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
#endif | #include "tensorstore/internal/container/compressed_tuple.h"
#include <any>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <type_traits>
#include <utility>
#include <gtest/gtest.h>
using tensorstore::internal_container::CompressedTuple;
namespace {
struct CopyableMovableInstance {
explicit CopyableMovableInstance(int x) : value_(x) { ++num_instances; }
CopyableMovableInstance(const CopyableMovableInstance& rhs) {
value_ = rhs.value_;
++num_copies;
}
CopyableMovableInstance(CopyableMovableInstance&& rhs) {
value_ = rhs.value_;
++num_moves;
}
CopyableMovableInstance& operator=(const CopyableMovableInstance& rhs) {
value_ = rhs.value_;
++num_copies;
return *this;
}
CopyableMovableInstance& operator=(CopyableMovableInstance&& rhs) {
value_ = rhs.value_;
++num_moves;
return *this;
}
int value() const& { return value_; }
int value() const&& { return value_; }
int value_;
static void Reset() {
num_instances = 0;
num_moves = 0;
num_copies = 0;
num_swaps = 0;
}
static int num_instances;
static int num_moves;
static int num_copies;
static int num_swaps;
};
int CopyableMovableInstance::num_instances{0};
int CopyableMovableInstance::num_moves{0};
int CopyableMovableInstance::num_copies{0};
int CopyableMovableInstance::num_swaps{0};
enum class CallType { kConstRef, kConstMove };
template <int>
struct Empty {
constexpr CallType value() const& { return CallType::kConstRef; }
constexpr CallType value() const&& { return CallType::kConstMove; }
};
template <typename T>
struct NotEmpty {
T value;
};
template <typename T, typename U>
struct TwoValues {
T value1;
U value2;
};
TEST(CompressedTupleTest, Sizeof) {
EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int>));
EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int, Empty<0>>));
EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int, Empty<0>, Empty<1>>));
EXPECT_EQ(sizeof(int),
sizeof(CompressedTuple<int, Empty<0>, Empty<1>, Empty<2>>));
EXPECT_EQ(sizeof(TwoValues<int, double>),
sizeof(CompressedTuple<int, NotEmpty<double>>));
EXPECT_EQ(sizeof(TwoValues<int, double>),
sizeof(CompressedTuple<int, Empty<0>, NotEmpty<double>>));
EXPECT_EQ(sizeof(TwoValues<int, double>),
sizeof(CompressedTuple<int, Empty<0>, NotEmpty<double>, Empty<1>>));
}
TEST(CompressedTupleTest, OneMoveOnRValueConstructionTemp) {
CopyableMovableInstance::Reset();
CompressedTuple<CopyableMovableInstance> x1(CopyableMovableInstance(1));
EXPECT_EQ(CopyableMovableInstance::num_instances, 1);
EXPECT_EQ(CopyableMovableInstance::num_copies, 0);
EXPECT_LE(CopyableMovableInstance::num_moves, 1);
EXPECT_EQ(x1.get<0>().value(), 1);
}
TEST(CompressedTupleTest, OneMoveOnRValueConstructionMove) {
CopyableMovableInstance::Reset();
CopyableMovableInstance i1(1);
CompressedTuple<CopyableMovableInstance> x1(std::move(i1));
EXPECT_EQ(CopyableMovableInstance::num_instances, 1);
EXPECT_EQ(CopyableMovableInstance::num_copies, 0);
EXPECT_LE(CopyableMovableInstance::num_moves, 1);
EXPECT_EQ(x1.get<0>().value(), 1);
}
TEST(CompressedTupleTest, OneMoveOnRValueConstructionMixedTypes) {
CopyableMovableInstance::Reset();
CopyableMovableInstance i1(1);
CopyableMovableInstance i2(2);
Empty<0> empty;
CompressedTuple<CopyableMovableInstance, CopyableMovableInstance&, Empty<0>>
x1(std::move(i1), i2, empty);
EXPECT_EQ(x1.get<0>().value(), 1);
EXPECT_EQ(x1.get<1>().value(), 2);
EXPECT_EQ(CopyableMovableInstance::num_copies, 0);
EXPECT_EQ(CopyableMovableInstance::num_moves, 1);
}
struct IncompleteType;
CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>>
MakeWithIncomplete(CopyableMovableInstance i1,
IncompleteType& t,
Empty<0> empty) {
return CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>>{
std::move(i1), t, empty};
}
struct IncompleteType {};
TEST(CompressedTupleTest, OneMoveOnRValueConstructionWithIncompleteType) {
CopyableMovableInstance::Reset();
CopyableMovableInstance i1(1);
Empty<0> empty;
struct DerivedType : IncompleteType {
int value = 0;
};
DerivedType fd;
fd.value = 7;
CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>> x1 =
MakeWithIncomplete(std::move(i1), fd, empty);
EXPECT_EQ(x1.get<0>().value(), 1);
EXPECT_EQ(static_cast<DerivedType&>(x1.get<1>()).value, 7);
EXPECT_EQ(CopyableMovableInstance::num_copies, 0);
EXPECT_EQ(CopyableMovableInstance::num_moves, 2);
}
TEST(CompressedTupleTest,
OneMoveOnRValueConstructionMixedTypes_BraceInitPoisonPillExpected) {
CopyableMovableInstance::Reset();
CopyableMovableInstance i1(1);
CopyableMovableInstance i2(2);
CompressedTuple<CopyableMovableInstance, CopyableMovableInstance&, Empty<0>>
x1(std::move(i1), i2, {});
EXPECT_EQ(x1.get<0>().value(), 1);
EXPECT_EQ(x1.get<1>().value(), 2);
EXPECT_EQ(CopyableMovableInstance::num_instances, 2);
EXPECT_EQ(CopyableMovableInstance::num_copies, 1);
EXPECT_EQ(CopyableMovableInstance::num_moves, 0);
}
TEST(CompressedTupleTest, OneCopyOnLValueConstruction) {
CopyableMovableInstance::Reset();
CopyableMovableInstance i1(1);
CompressedTuple<CopyableMovableInstance> x1(i1);
EXPECT_EQ(CopyableMovableInstance::num_copies, 1);
EXPECT_EQ(CopyableMovableInstance::num_moves, 0);
CopyableMovableInstance::Reset();
CopyableMovableInstance i2(2);
const CopyableMovableInstance& i2_ref = i2;
CompressedTuple<CopyableMovableInstance> x2(i2_ref);
EXPECT_EQ(CopyableMovableInstance::num_copies, 1);
EXPECT_EQ(CopyableMovableInstance::num_moves, 0);
}
TEST(CompressedTupleTest, OneMoveOnRValueAccess) {
CopyableMovableInstance i1(1);
CompressedTuple<CopyableMovableInstance> x(std::move(i1));
CopyableMovableInstance::Reset();
CopyableMovableInstance i2 = std::move(x).get<0>();
EXPECT_EQ(CopyableMovableInstance::num_copies, 0);
EXPECT_EQ(CopyableMovableInstance::num_moves, 1);
EXPECT_EQ(i2.value(), 1);
}
TEST(CompressedTupleTest, OneCopyOnLValueAccess) {
CopyableMovableInstance::Reset();
CompressedTuple<CopyableMovableInstance> x(CopyableMovableInstance(0));
EXPECT_EQ(CopyableMovableInstance::num_copies, 0);
EXPECT_EQ(CopyableMovableInstance::num_moves, 1);
CopyableMovableInstance t = x.get<0>();
EXPECT_EQ(CopyableMovableInstance::num_copies, 1);
EXPECT_EQ(CopyableMovableInstance::num_moves, 1);
EXPECT_EQ(t.value(), 0);
}
TEST(CompressedTupleTest, ZeroCopyOnRefAccess) {
CopyableMovableInstance::Reset();
CompressedTuple<CopyableMovableInstance> x(CopyableMovableInstance(0));
EXPECT_EQ(CopyableMovableInstance::num_copies, 0);
EXPECT_EQ(CopyableMovableInstance::num_moves, 1);
CopyableMovableInstance& t1 = x.get<0>();
const CopyableMovableInstance& t2 = x.get<0>();
EXPECT_EQ(CopyableMovableInstance::num_copies, 0);
EXPECT_EQ(CopyableMovableInstance::num_moves, 1);
EXPECT_EQ(t1.value(), 0);
EXPECT_EQ(t2.value(), 0);
}
TEST(CompressedTupleTest, Access) {
struct S {
std::string x;
};
CompressedTuple<int, Empty<0>, S> x(7, {}, S{"ABC"});
EXPECT_EQ(sizeof(x), sizeof(TwoValues<int, S>));
EXPECT_EQ(7, x.get<0>());
EXPECT_EQ("ABC", x.get<2>().x);
}
TEST(CompressedTupleTest, NonClasses) {
CompressedTuple<int, const char*> x(7, "ABC");
EXPECT_EQ(7, x.get<0>());
EXPECT_STREQ("ABC", x.get<1>());
}
TEST(CompressedTupleTest, MixClassAndNonClass) {
CompressedTuple<int, const char*, Empty<0>, NotEmpty<double>> x(7, "ABC", {},
{1.25});
struct Mock {
int v;
const char* p;
double d;
};
EXPECT_EQ(sizeof(x), sizeof(Mock));
EXPECT_EQ(7, x.get<0>());
EXPECT_STREQ("ABC", x.get<1>());
EXPECT_EQ(1.25, x.get<3>().value);
}
TEST(CompressedTupleTest, Nested) {
CompressedTuple<int, CompressedTuple<int>,
CompressedTuple<int, CompressedTuple<int>>>
x(1, CompressedTuple<int>(2),
CompressedTuple<int, CompressedTuple<int>>(3, CompressedTuple<int>(4)));
EXPECT_EQ(1, x.get<0>());
EXPECT_EQ(2, x.get<1>().get<0>());
EXPECT_EQ(3, x.get<2>().get<0>());
EXPECT_EQ(4, x.get<2>().get<1>().get<0>());
CompressedTuple<Empty<0>, Empty<0>,
CompressedTuple<Empty<0>, CompressedTuple<Empty<0>>>>
y;
std::set<Empty<0>*> empties{&y.get<0>(), &y.get<1>(), &y.get<2>().get<0>(),
&y.get<2>().get<1>().get<0>()};
#ifdef _MSC_VER
int expected = 1;
#else
int expected = 4;
#endif
EXPECT_EQ(expected, sizeof(y));
EXPECT_EQ(expected, empties.size());
EXPECT_EQ(sizeof(y), sizeof(Empty<0>) * empties.size());
EXPECT_EQ(4 * sizeof(char),
sizeof(CompressedTuple<CompressedTuple<char, char>,
CompressedTuple<char, char>>));
EXPECT_TRUE((std::is_empty<CompressedTuple<Empty<0>, Empty<1>>>::value));
struct CT_Empty : CompressedTuple<Empty<0>> {};
CompressedTuple<Empty<0>, CT_Empty> nested_empty;
auto contained = nested_empty.get<0>();
auto nested = nested_empty.get<1>().get<0>();
EXPECT_TRUE((std::is_same<decltype(contained), decltype(nested)>::value));
}
TEST(CompressedTupleTest, Reference) {
int i = 7;
std::string s = "Very long string that goes in the heap";
CompressedTuple<int, int&, std::string, std::string&> x(i, i, s, s);
EXPECT_EQ(s, "Very long string that goes in the heap");
EXPECT_EQ(x.get<0>(), x.get<1>());
EXPECT_NE(&x.get<0>(), &x.get<1>());
EXPECT_EQ(&x.get<1>(), &i);
EXPECT_EQ(x.get<2>(), x.get<3>());
EXPECT_NE(&x.get<2>(), &x.get<3>());
EXPECT_EQ(&x.get<3>(), &s);
}
TEST(CompressedTupleTest, NoElements) {
CompressedTuple<> x;
static_cast<void>(x);
EXPECT_TRUE(std::is_empty<CompressedTuple<>>::value);
}
TEST(CompressedTupleTest, MoveOnlyElements) {
CompressedTuple<std::unique_ptr<std::string>> str_tup(
std::make_unique<std::string>("str"));
CompressedTuple<CompressedTuple<std::unique_ptr<std::string>>,
std::unique_ptr<int>>
x(std::move(str_tup), std::make_unique<int>(5));
EXPECT_EQ(*x.get<0>().get<0>(), "str");
EXPECT_EQ(*x.get<1>(), 5);
std::unique_ptr<std::string> x0 = std::move(x.get<0>()).get<0>();
std::unique_ptr<int> x1 = std::move(x).get<1>();
EXPECT_EQ(*x0, "str");
EXPECT_EQ(*x1, 5);
}
TEST(CompressedTupleTest, MoveConstructionMoveOnlyElements) {
CompressedTuple<std::unique_ptr<std::string>> base(
std::make_unique<std::string>("str"));
EXPECT_EQ(*base.get<0>(), "str");
CompressedTuple<std::unique_ptr<std::string>> copy(std::move(base));
EXPECT_EQ(*copy.get<0>(), "str");
}
TEST(CompressedTupleTest, AnyElements) {
std::any a(std::string("str"));
CompressedTuple<std::any, std::any&> x(std::any(5), a);
EXPECT_EQ(std::any_cast<int>(x.get<0>()), 5);
EXPECT_EQ(std::any_cast<std::string>(x.get<1>()), "str");
a = 0.5f;
EXPECT_EQ(std::any_cast<float>(x.get<1>()), 0.5);
}
TEST(CompressedTupleTest, Constexpr) {
struct NonTrivialStruct {
constexpr NonTrivialStruct() = default;
constexpr int value() const { return v; }
int v = 5;
};
struct TrivialStruct {
TrivialStruct() = default;
constexpr int value() const { return v; }
int v;
};
constexpr CompressedTuple<int, double, CompressedTuple<int>, Empty<0>> x(
7, 1.25, CompressedTuple<int>(5), {});
constexpr int x0 = x.get<0>();
constexpr double x1 = x.get<1>();
constexpr int x2 = x.get<2>().get<0>();
constexpr CallType x3 = x.get<3>().value();
EXPECT_EQ(x0, 7);
EXPECT_EQ(x1, 1.25);
EXPECT_EQ(x2, 5);
EXPECT_EQ(x3, CallType::kConstRef);
#if !defined(__GNUC__) || defined(__clang__) || __GNUC__ > 4
constexpr CompressedTuple<Empty<0>, TrivialStruct, int> trivial = {};
constexpr CallType trivial0 = trivial.get<0>().value();
constexpr int trivial1 = trivial.get<1>().value();
constexpr int trivial2 = trivial.get<2>();
EXPECT_EQ(trivial0, CallType::kConstRef);
EXPECT_EQ(trivial1, 0);
EXPECT_EQ(trivial2, 0);
#endif
constexpr CompressedTuple<Empty<0>, NonTrivialStruct, std::optional<int>>
non_trivial = {};
constexpr CallType non_trivial0 = non_trivial.get<0>().value();
constexpr int non_trivial1 = non_trivial.get<1>().value();
constexpr std::optional<int> non_trivial2 = non_trivial.get<2>();
EXPECT_EQ(non_trivial0, CallType::kConstRef);
EXPECT_EQ(non_trivial1, 5);
EXPECT_EQ(non_trivial2, std::nullopt);
static constexpr char data[] = "DEF";
constexpr CompressedTuple<const char*> z(data);
constexpr const char* z1 = z.get<0>();
EXPECT_EQ(std::string(z1), std::string(data));
#if defined(__clang__)
constexpr int x2m = std::move(x.get<2>()).get<0>();
constexpr CallType x3m = std::move(x).get<3>().value();
EXPECT_EQ(x2m, 5);
EXPECT_EQ(x3m, CallType::kConstMove);
#endif
}
#if defined(__clang__) || defined(__GNUC__)
TEST(CompressedTupleTest, EmptyFinalClass) {
struct S final {
int f() const { return 5; }
};
CompressedTuple<S> x;
EXPECT_EQ(x.get<0>().f(), 5);
}
#endif
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/compressed_tuple.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/compressed_tuple_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
865578b9-155a-44ae-b3c1-91ffd8aa63d5 | cpp | google/tsl | hash | tsl/platform/hash.cc | tsl/platform/hash_test.cc | #include "tsl/platform/hash.h"
#include <string.h>
#include "tsl/platform/macros.h"
#include "tsl/platform/raw_coding.h"
#include "tsl/platform/types.h"
namespace tsl {
static inline uint32 ByteAs32(char c) { return static_cast<uint32>(c) & 0xff; }
static inline uint64 ByteAs64(char c) { return static_cast<uint64>(c) & 0xff; }
uint32 Hash32(const char* data, size_t n, uint32 seed) {
const uint32 m = 0x5bd1e995;
const int r = 24;
uint32 h = seed ^ n;
while (n >= 4) {
uint32 k = core::DecodeFixed32(data);
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
n -= 4;
}
switch (n) {
case 3:
h ^= ByteAs32(data[2]) << 16;
TF_FALLTHROUGH_INTENDED;
case 2:
h ^= ByteAs32(data[1]) << 8;
TF_FALLTHROUGH_INTENDED;
case 1:
h ^= ByteAs32(data[0]);
h *= m;
}
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
uint64 Hash64(const char* data, size_t n, uint64 seed) {
const uint64 m = 0xc6a4a7935bd1e995;
const int r = 47;
uint64 h = seed ^ (n * m);
while (n >= 8) {
uint64 k = core::DecodeFixed64(data);
data += 8;
n -= 8;
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
switch (n) {
case 7:
h ^= ByteAs64(data[6]) << 48;
TF_FALLTHROUGH_INTENDED;
case 6:
h ^= ByteAs64(data[5]) << 40;
TF_FALLTHROUGH_INTENDED;
case 5:
h ^= ByteAs64(data[4]) << 32;
TF_FALLTHROUGH_INTENDED;
case 4:
h ^= ByteAs64(data[3]) << 24;
TF_FALLTHROUGH_INTENDED;
case 3:
h ^= ByteAs64(data[2]) << 16;
TF_FALLTHROUGH_INTENDED;
case 2:
h ^= ByteAs64(data[1]) << 8;
TF_FALLTHROUGH_INTENDED;
case 1:
h ^= ByteAs64(data[0]);
h *= m;
}
h ^= h >> r;
h *= m;
h ^= h >> r;
return h;
}
} | #include <map>
#include <unordered_map>
#include <vector>
#include "tsl/platform/hash.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
TEST(Hash, SignedUnsignedIssue) {
const unsigned char d1[1] = {0x62};
const unsigned char d2[2] = {0xc3, 0x97};
const unsigned char d3[3] = {0xe2, 0x99, 0xa5};
const unsigned char d4[4] = {0xe1, 0x80, 0xb9, 0x32};
const unsigned char d5[48] = {
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
struct Case {
uint32 hash32;
uint64 hash64;
const unsigned char* data;
size_t size;
uint32 seed;
};
for (Case c : std::vector<Case>{
{0x471a8188u, 0x4c61ea3eeda4cb87ull, nullptr, 0, 0xbc9f1d34},
{0xd615eba5u, 0x091309f7ef916c8aull, d1, sizeof(d1), 0xbc9f1d34},
{0x0c3cccdau, 0xa815bcdf1d1af01cull, d2, sizeof(d2), 0xbc9f1d34},
{0x3ba37e0eu, 0x02167564e4d06430ull, d3, sizeof(d3), 0xbc9f1d34},
{0x16174eb3u, 0x8f7ed82ffc21071full, d4, sizeof(d4), 0xbc9f1d34},
{0x98b1926cu, 0xce196580c97aff1eull, d5, sizeof(d5), 0x12345678},
}) {
EXPECT_EQ(c.hash32,
Hash32(reinterpret_cast<const char*>(c.data), c.size, c.seed));
EXPECT_EQ(c.hash64,
Hash64(reinterpret_cast<const char*>(c.data), c.size, c.seed));
for (int align = 1; align <= 7; align++) {
std::string input(align, 'x');
input.append(reinterpret_cast<const char*>(c.data), c.size);
EXPECT_EQ(c.hash32, Hash32(&input[align], c.size, c.seed));
EXPECT_EQ(c.hash64, Hash64(&input[align], c.size, c.seed));
}
}
}
TEST(Hash, HashPtrIsNotIdentityFunction) {
int* ptr = reinterpret_cast<int*>(0xcafe0000);
EXPECT_NE(hash<int*>()(ptr), size_t{0xcafe0000});
}
static void BM_Hash32(::testing::benchmark::State& state) {
int len = state.range(0);
std::string input(len, 'x');
uint32 h = 0;
for (auto s : state) {
h = Hash32(input.data(), len, 1);
}
state.SetBytesProcessed(state.iterations() * len);
VLOG(1) << h;
}
BENCHMARK(BM_Hash32)->Range(1, 1024);
TEST(StringPieceHasher, Equality) {
StringPieceHasher hasher;
absl::string_view s1("foo");
absl::string_view s2("bar");
absl::string_view s3("baz");
absl::string_view s4("zot");
EXPECT_TRUE(hasher(s1) != hasher(s2));
EXPECT_TRUE(hasher(s1) != hasher(s3));
EXPECT_TRUE(hasher(s1) != hasher(s4));
EXPECT_TRUE(hasher(s2) != hasher(s3));
EXPECT_TRUE(hasher(s2) != hasher(s4));
EXPECT_TRUE(hasher(s3) != hasher(s4));
EXPECT_TRUE(hasher(s1) == hasher(s1));
EXPECT_TRUE(hasher(s2) == hasher(s2));
EXPECT_TRUE(hasher(s3) == hasher(s3));
EXPECT_TRUE(hasher(s4) == hasher(s4));
}
TEST(StringPieceHasher, HashMap) {
string s1("foo");
string s2("bar");
string s3("baz");
absl::string_view p1(s1);
absl::string_view p2(s2);
absl::string_view p3(s3);
std::unordered_map<absl::string_view, int, StringPieceHasher> map;
map.insert(std::make_pair(p1, 0));
map.insert(std::make_pair(p2, 1));
map.insert(std::make_pair(p3, 2));
EXPECT_EQ(map.size(), 3);
bool found[3] = {false, false, false};
for (auto const& val : map) {
int x = val.second;
EXPECT_TRUE(x >= 0 && x < 3);
EXPECT_TRUE(!found[x]);
found[x] = true;
}
EXPECT_EQ(found[0], true);
EXPECT_EQ(found[1], true);
EXPECT_EQ(found[2], true);
auto new_iter = map.find("zot");
EXPECT_TRUE(new_iter == map.end());
new_iter = map.find("bar");
EXPECT_TRUE(new_iter != map.end());
map.erase(new_iter);
EXPECT_EQ(map.size(), 2);
found[0] = false;
found[1] = false;
found[2] = false;
for (const auto& iter : map) {
int x = iter.second;
EXPECT_TRUE(x >= 0 && x < 3);
EXPECT_TRUE(!found[x]);
found[x] = true;
}
EXPECT_EQ(found[0], true);
EXPECT_EQ(found[1], false);
EXPECT_EQ(found[2], true);
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/hash.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/hash_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
c181e2d3-689a-486d-aee3-8f9ceee8eb9e | cpp | tensorflow/tensorflow | parsing_ops | tensorflow/core/ops/parsing_ops.cc | tensorflow/core/ops/parsing_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/util/example_proto_helper.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
namespace {
template <typename TensorShapeType>
Status AddDenseOutputShapes(const std::vector<TensorShapeType>& dense_shapes,
const ShapeHandle& prefix, InferenceContext* c,
int* output_idx) {
for (const auto& dense_shape : dense_shapes) {
ShapeHandle s;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(dense_shape, &s));
TF_RETURN_IF_ERROR(c->Concatenate(prefix, s, &s));
c->set_output((*output_idx)++, s);
}
return absl::OkStatus();
}
void AddSparseOutputShapes(int num_sparse, const ShapeHandle input_shape,
int64_t rank_delta, InferenceContext* c,
int* output_idx) {
shape_inference::DimensionOrConstant rank(c->UnknownDim());
if (c->RankKnown(input_shape)) {
rank = c->Rank(input_shape) + rank_delta;
}
for (int i = 0; i < num_sparse; ++i) {
c->set_output((*output_idx)++, c->Matrix(c->UnknownDim(), rank));
}
for (int i = 0; i < num_sparse; ++i) {
c->set_output((*output_idx)++, c->Vector(c->UnknownDim()));
}
for (int i = 0; i < num_sparse; ++i) {
c->set_output((*output_idx)++, c->Vector(rank));
}
}
Status AddRaggedOutputShapes(int num_ragged, bool ragged_rank_2,
const DimensionHandle& num_examples,
InferenceContext* c, int* output_idx) {
DimensionHandle num_splits;
TF_RETURN_IF_ERROR(c->Add(num_examples, 1, &num_splits));
for (int i = 0; i < num_ragged; ++i) {
c->set_output((*output_idx)++, c->Vector(c->UnknownDim()));
}
for (int i = 0; i < num_ragged; ++i) {
c->set_output((*output_idx)++, c->Vector(num_splits));
}
if (ragged_rank_2) {
for (int i = 0; i < num_ragged; ++i) {
c->set_output((*output_idx)++, c->Vector(c->UnknownDim()));
}
}
return absl::OkStatus();
}
void AddDenseLengthsShapes(int num_dense, const ShapeHandle& shape,
InferenceContext* c, int* output_idx) {
for (int i = 0; i < num_dense; ++i) {
c->set_output((*output_idx)++, shape);
}
}
}
REGISTER_OP("DecodeRaw")
.Input("bytes: string")
.Output("output: out_type")
.Attr(
"out_type: "
"{half,float,double,int32,uint16,uint8,int16,int8,int64,complex64,"
"complex128,bool,bfloat16}")
.Attr("little_endian: bool = true")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Concatenate(
c->input(0), c->Vector(InferenceContext::kUnknownDim), &out));
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("DecodePaddedRaw")
.Input("input_bytes: string")
.Input("fixed_length: int32")
.Output("output: out_type")
.Attr(
"out_type: {half,float,double,int32,uint16,uint8,int16,int8,int64,"
"bfloat16}")
.Attr("little_endian: bool = true")
.SetShapeFn([](InferenceContext* c) {
DimensionHandle fixed_length;
TF_RETURN_IF_ERROR(c->MakeDimForScalarInput(1, &fixed_length));
DataType out_type;
TF_RETURN_IF_ERROR(c->GetAttr("out_type", &out_type));
int32_t data_type_size = DataTypeSize(out_type);
DimensionHandle width;
TF_RETURN_IF_ERROR(c->Divide(fixed_length, data_type_size, true, &width));
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Concatenate(c->input(0), c->Vector(width), &out));
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("DecodeCompressed")
.Input("bytes: string")
.Output("output: string")
.Attr("compression_type: string = ''")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("ParseExample")
.Input("serialized: string")
.Input("names: string")
.Input("sparse_keys: Nsparse * string")
.Input("dense_keys: Ndense * string")
.Input("dense_defaults: Tdense")
.Output("sparse_indices: Nsparse * int64")
.Output("sparse_values: sparse_types")
.Output("sparse_shapes: Nsparse * int64")
.Output("dense_values: Tdense")
.Attr("Nsparse: int >= 0")
.Attr("Ndense: int >= 0")
.Attr("sparse_types: list({float,int64,string}) >= 0")
.Attr("Tdense: list({float,int64,string}) >= 0")
.Attr("dense_shapes: list(shape) >= 0")
.SetShapeFn([](InferenceContext* c) {
ParseExampleAttrs attrs;
TF_RETURN_IF_ERROR(attrs.Init(c, 1));
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &input));
ShapeHandle names;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &names));
int output_idx = 0;
AddSparseOutputShapes(attrs.num_sparse, input, 1, c, &output_idx);
TF_RETURN_IF_ERROR(
AddDenseOutputShapes(attrs.dense_shapes, input, c, &output_idx));
return absl::OkStatus();
});
REGISTER_OP("ParseExampleV2")
.Input("serialized: string")
.Input("names: string")
.Input("sparse_keys: string")
.Input("dense_keys: string")
.Input("ragged_keys: string")
.Input("dense_defaults: Tdense")
.Output("sparse_indices: num_sparse * int64")
.Output("sparse_values: sparse_types")
.Output("sparse_shapes: num_sparse * int64")
.Output("dense_values: Tdense")
.Output("ragged_values: ragged_value_types")
.Output("ragged_row_splits: ragged_split_types")
.Attr("Tdense: list({float,int64,string}) >= 0")
.Attr("num_sparse: int >= 0")
.Attr("sparse_types: list({float,int64,string}) >= 0")
.Attr("ragged_value_types: list({float,int64,string}) >= 0")
.Attr("ragged_split_types: list({int32,int64}) >= 0")
.Attr("dense_shapes: list(shape) >= 0")
.SetShapeFn([](InferenceContext* c) {
ParseExampleAttrs attrs;
TF_RETURN_IF_ERROR(attrs.Init(c, 2));
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(0), 1, &input));
ShapeHandle names;
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(1), 1, &names));
DimensionHandle num_examples = c->UnknownDim();
if (c->RankKnown(input) && c->Rank(input) == 1) {
num_examples = c->Dim(input, 0);
}
int output_idx = 0;
AddSparseOutputShapes(attrs.num_sparse, input, 1, c, &output_idx);
TF_RETURN_IF_ERROR(
AddDenseOutputShapes(attrs.dense_shapes, input, c, &output_idx));
TF_RETURN_IF_ERROR(AddRaggedOutputShapes(attrs.num_ragged, false,
num_examples, c, &output_idx));
return absl::OkStatus();
});
REGISTER_OP("ParseSingleExample")
.Input("serialized: string")
.Input("dense_defaults: Tdense")
.Output("sparse_indices: num_sparse * int64")
.Output("sparse_values: sparse_types")
.Output("sparse_shapes: num_sparse * int64")
.Output("dense_values: Tdense")
.Attr("num_sparse: int >= 0")
.Attr("sparse_keys: list(string) >= 0")
.Attr("dense_keys: list(string) >= 0")
.Attr("sparse_types: list({float,int64,string}) >= 0")
.Attr("Tdense: list({float,int64,string}) >= 0")
.Attr("dense_shapes: list(shape) >= 0")
.SetShapeFn([](InferenceContext* c) {
ParseSingleExampleAttrs attrs;
TF_RETURN_IF_ERROR(attrs.Init(c));
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &input));
int output_idx = 0;
AddSparseOutputShapes(attrs.sparse_keys.size(), input, 1, c, &output_idx);
TF_RETURN_IF_ERROR(
AddDenseOutputShapes(attrs.dense_shapes, input, c, &output_idx));
return absl::OkStatus();
});
REGISTER_OP("ParseSequenceExample")
.Input("serialized: string")
.Input("debug_name: string")
.Input("context_dense_defaults: Tcontext_dense")
.Output("context_sparse_indices: Ncontext_sparse * int64")
.Output("context_sparse_values: context_sparse_types")
.Output("context_sparse_shapes: Ncontext_sparse * int64")
.Output("context_dense_values: Tcontext_dense")
.Output("feature_list_sparse_indices: Nfeature_list_sparse * int64")
.Output("feature_list_sparse_values: feature_list_sparse_types")
.Output("feature_list_sparse_shapes: Nfeature_list_sparse * int64")
.Output("feature_list_dense_values: feature_list_dense_types")
.Output("feature_list_dense_lengths: Nfeature_list_dense * int64")
.Attr("feature_list_dense_missing_assumed_empty: list(string) >= 0")
.Attr("context_sparse_keys: list(string) >= 0")
.Attr("context_dense_keys: list(string) >= 0")
.Attr("feature_list_sparse_keys: list(string) >= 0")
.Attr("feature_list_dense_keys: list(string) >= 0")
.Attr("Ncontext_sparse: int >= 0 = 0")
.Attr("Ncontext_dense: int >= 0 = 0")
.Attr("Nfeature_list_sparse: int >= 0 = 0")
.Attr("Nfeature_list_dense: int >= 0 = 0")
.Attr("context_sparse_types: list({float,int64,string}) >= 0 = []")
.Attr("Tcontext_dense: list({float,int64,string}) >= 0 = []")
.Attr("feature_list_dense_types: list({float,int64,string}) >= 0 = []")
.Attr("context_dense_shapes: list(shape) >= 0 = []")
.Attr("feature_list_sparse_types: list({float,int64,string}) >= 0 = []")
.Attr("feature_list_dense_shapes: list(shape) >= 0 = []")
.SetShapeFn([](InferenceContext* c) {
ParseSequenceExampleAttrs attrs;
TF_RETURN_IF_ERROR(attrs.Init(c));
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &input));
ShapeHandle names;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &names));
DimensionHandle num_examples = c->Dim(input, 0);
ShapeHandle feature_list_dense_prefix =
c->Matrix(num_examples, c->UnknownDim());
int output_idx = 0;
AddSparseOutputShapes(attrs.num_context_sparse, input, 1, c, &output_idx);
TF_RETURN_IF_ERROR(AddDenseOutputShapes(attrs.context_dense_shapes, input,
c, &output_idx));
AddSparseOutputShapes(attrs.num_feature_list_sparse, input, 2, c,
&output_idx);
TF_RETURN_IF_ERROR(AddDenseOutputShapes(attrs.feature_list_dense_shapes,
feature_list_dense_prefix, c,
&output_idx));
AddDenseLengthsShapes(attrs.num_feature_list_dense, input, c,
&output_idx);
return absl::OkStatus();
});
REGISTER_OP("ParseSequenceExampleV2")
.Input("serialized: string")
.Input("debug_name: string")
.Input("context_sparse_keys: string")
.Input("context_dense_keys: string")
.Input("context_ragged_keys: string")
.Input("feature_list_sparse_keys: string")
.Input("feature_list_dense_keys: string")
.Input("feature_list_ragged_keys: string")
.Input("feature_list_dense_missing_assumed_empty: bool")
.Input("context_dense_defaults: Tcontext_dense")
.Output("context_sparse_indices: Ncontext_sparse * int64")
.Output("context_sparse_values: context_sparse_types")
.Output("context_sparse_shapes: Ncontext_sparse * int64")
.Output("context_dense_values: Tcontext_dense")
.Output("context_ragged_values: context_ragged_value_types")
.Output("context_ragged_row_splits: context_ragged_split_types")
.Output("feature_list_sparse_indices: Nfeature_list_sparse * int64")
.Output("feature_list_sparse_values: feature_list_sparse_types")
.Output("feature_list_sparse_shapes: Nfeature_list_sparse * int64")
.Output("feature_list_dense_values: feature_list_dense_types")
.Output("feature_list_dense_lengths: Nfeature_list_dense * int64")
.Output("feature_list_ragged_values: feature_list_ragged_value_types")
.Output("feature_list_ragged_outer_splits: feature_list_ragged_split_types")
.Output("feature_list_ragged_inner_splits: feature_list_ragged_split_types")
.Attr("Ncontext_sparse: int >= 0 = 0")
.Attr("Tcontext_dense: list({float,int64,string}) >= 0 = []")
.Attr("context_sparse_types: list({float,int64,string}) >= 0 = []")
.Attr("context_ragged_value_types: list({float,int64,string}) >= 0 = []")
.Attr("context_ragged_split_types: list({int32,int64}) >= 0 = []")
.Attr("context_dense_shapes: list(shape) >= 0 = []")
.Attr("Nfeature_list_sparse: int >= 0 = 0")
.Attr("Nfeature_list_dense: int >= 0 = 0")
.Attr("feature_list_dense_types: list({float,int64,string}) >= 0 = []")
.Attr("feature_list_sparse_types: list({float,int64,string}) >= 0 = []")
.Attr(
"feature_list_ragged_value_types: list({float,int64,string}) >= 0 = []")
.Attr("feature_list_ragged_split_types: list({int32,int64}) >= 0 = []")
.Attr("feature_list_dense_shapes: list(shape) >= 0 = []")
.SetShapeFn([](InferenceContext* c) {
ParseSequenceExampleAttrs attrs;
TF_RETURN_IF_ERROR(attrs.Init(c, 2));
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(0), 1, &input));
ShapeHandle names;
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(1), 1, &names));
ShapeHandle feature_list_dense_prefix;
TF_RETURN_IF_ERROR(c->Concatenate(input, c->UnknownShapeOfRank(1),
&feature_list_dense_prefix));
DimensionHandle num_examples = c->UnknownDim();
if (c->RankKnown(input) && c->Rank(input) == 1) {
num_examples = c->Dim(input, 0);
}
int output_idx = 0;
AddSparseOutputShapes(attrs.num_context_sparse, input, 1, c, &output_idx);
TF_RETURN_IF_ERROR(AddDenseOutputShapes(attrs.context_dense_shapes, input,
c, &output_idx));
TF_RETURN_IF_ERROR(AddRaggedOutputShapes(attrs.num_context_ragged, false,
num_examples, c, &output_idx));
AddSparseOutputShapes(attrs.num_feature_list_sparse, input, 2, c,
&output_idx);
TF_RETURN_IF_ERROR(AddDenseOutputShapes(attrs.feature_list_dense_shapes,
feature_list_dense_prefix, c,
&output_idx));
AddDenseLengthsShapes(attrs.num_feature_list_dense, input, c,
&output_idx);
TF_RETURN_IF_ERROR(AddRaggedOutputShapes(
attrs.num_feature_list_ragged, true, num_examples, c, &output_idx));
return absl::OkStatus();
});
REGISTER_OP("ParseSingleSequenceExample")
.Input("serialized: string")
.Input("feature_list_dense_missing_assumed_empty: string")
.Input("context_sparse_keys: Ncontext_sparse * string")
.Input("context_dense_keys: Ncontext_dense * string")
.Input("feature_list_sparse_keys: Nfeature_list_sparse * string")
.Input("feature_list_dense_keys: Nfeature_list_dense * string")
.Input("context_dense_defaults: Tcontext_dense")
.Input("debug_name: string")
.Output("context_sparse_indices: Ncontext_sparse * int64")
.Output("context_sparse_values: context_sparse_types")
.Output("context_sparse_shapes: Ncontext_sparse * int64")
.Output("context_dense_values: Tcontext_dense")
.Output("feature_list_sparse_indices: Nfeature_list_sparse * int64")
.Output("feature_list_sparse_values: feature_list_sparse_types")
.Output("feature_list_sparse_shapes: Nfeature_list_sparse * int64")
.Output("feature_list_dense_values: feature_list_dense_types")
.Attr("Ncontext_sparse: int >= 0 = 0")
.Attr("Ncontext_dense: int >= 0 = 0")
.Attr("Nfeature_list_sparse: int >= 0 = 0")
.Attr("Nfeature_list_dense: int >= 0 = 0")
.Attr("context_sparse_types: list({float,int64,string}) >= 0 = []")
.Attr("Tcontext_dense: list({float,int64,string}) >= 0 = []")
.Attr("feature_list_dense_types: list({float,int64,string}) >= 0 = []")
.Attr("context_dense_shapes: list(shape) >= 0 = []")
.Attr("feature_list_sparse_types: list({float,int64,string}) >= 0 = []")
.Attr("feature_list_dense_shapes: list(shape) >= 0 = []")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
ParseSingleSequenceExampleAttrs attrs;
TF_RETURN_IF_ERROR(attrs.Init(c));
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &input));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
int output_idx = 0;
AddSparseOutputShapes(attrs.num_context_sparse, input, 1, c, &output_idx);
TF_RETURN_IF_ERROR(AddDenseOutputShapes(attrs.context_dense_shapes, input,
c, &output_idx));
AddSparseOutputShapes(attrs.num_feature_list_sparse, input, 2, c,
&output_idx);
TF_RETURN_IF_ERROR(AddDenseOutputShapes(attrs.feature_list_dense_shapes,
c->UnknownShapeOfRank(1), c,
&output_idx));
return absl::OkStatus();
});
REGISTER_OP("ParseTensor")
.Input("serialized: string")
.Output("output: out_type")
.Attr("out_type: type")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("SerializeTensor")
.Input("tensor: T")
.Output("serialized: string")
.Attr("T: type")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("DecodeJSONExample")
.Input("json_examples: string")
.Output("binary_examples: string")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("DecodeCSV")
.Input("records: string")
.Input("record_defaults: OUT_TYPE")
.Output("output: OUT_TYPE")
.Attr("OUT_TYPE: list({float,double,int32,int64,string})")
.Attr("field_delim: string = ','")
.Attr("use_quote_delim: bool = true")
.Attr("na_value: string = ''")
.Attr("select_cols: list(int) = []")
.SetShapeFn([](InferenceContext* c) {
for (int i = 1; i < c->num_inputs(); ++i) {
ShapeHandle v;
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(i), 1, &v));
if (c->Rank(c->input(i)) == 1 && c->Value(c->Dim(v, 0)) > 1) {
return errors::InvalidArgument(
"Shape of a default must be a length-0 or length-1 vector, or a "
"scalar.");
}
}
for (int i = 0; i < c->num_outputs(); ++i) c->set_output(i, c->input(0));
return absl::OkStatus();
});
REGISTER_OP("StringToNumber")
.Input("string_tensor: string")
.Output("output: out_type")
.Attr("out_type: {float, double, int32, int64, uint32, uint64} = DT_FLOAT")
.SetShapeFn(shape_inference::UnchangedShape);
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(ParsingOpsTest, DecodeRaw_ShapeFn) {
ShapeInferenceTestOp op("DecodeRaw");
INFER_OK(op, "?", "?");
INFER_OK(op, "[?,?,?]", "[d0_0,d0_1,d0_2,?]");
}
TEST(ParsingOpsTest, DecodeCSV_ShapeFn) {
ShapeInferenceTestOp op("DecodeCSV");
auto set_n_outputs = [&op](int n) {
std::vector<NodeDefBuilder::NodeOut> src_list;
std::vector<DataType> out_types;
for (int i = 0; i < n; ++i) {
src_list.emplace_back("b", 0, DT_FLOAT);
out_types.push_back(DT_FLOAT);
}
TF_ASSERT_OK(NodeDefBuilder("test", "DecodeCSV")
.Input("a", 0, DT_STRING)
.Input(src_list)
.Attr("OUT_TYPE", out_types)
.Finalize(&op.node_def));
};
set_n_outputs(2);
INFER_OK(op, "?;?;?", "in0;in0");
INFER_OK(op, "[1,2,?,4];?;?", "in0;in0");
INFER_OK(op, "[1,2,?,4];[?];[?]", "in0;in0");
INFER_OK(op, "?;?;[]", "in0;in0");
INFER_ERROR("must be at most rank 1 but is rank 2", op, "?;?;[1,2]");
INFER_ERROR("must be at most rank 1 but is rank 2", op, "?;[3,4];?");
INFER_ERROR("Shape of a default must be", op, "?;?;[2]");
INFER_ERROR("Shape of a default must be", op, "?;[2];?");
}
static std::vector<PartialTensorShape> MakeDenseShapes(int size,
bool add_extra_shape,
int unknown_outer_dims) {
std::vector<PartialTensorShape> shapes(size);
for (int i = 0; i < size; ++i) {
if (i == 0) {
shapes[i].Clear();
for (int d = 0; d < unknown_outer_dims; ++d) {
shapes[i].AddDim(-1);
}
} else {
shapes[i] = shapes[i - 1];
}
shapes[i].AddDim(i + 1);
}
if (add_extra_shape) shapes.push_back(PartialTensorShape({}));
return shapes;
}
TEST(ParsingOpsTest, ParseExample_ShapeFn) {
ShapeInferenceTestOp op("ParseExample");
auto set_outputs = [&op](int num_sparse, int num_dense,
bool add_extra_shape = false,
int unknown_outer_dims = 0) {
using NodeOutList = std::vector<NodeDefBuilder::NodeOut>;
using DataTypeList = std::vector<DataType>;
NodeDefBuilder::NodeOut string_in{"a", 0, DT_STRING};
TF_ASSERT_OK(
NodeDefBuilder("test", "ParseExample")
.Input("serialized", 0, DT_STRING)
.Input("names", 0, DT_STRING)
.Input(NodeOutList(num_sparse, string_in))
.Input(NodeOutList(num_dense, string_in))
.Input(NodeOutList(num_dense, string_in))
.Attr("sparse_types", DataTypeList(num_sparse, DT_FLOAT))
.Attr("dense_shapes", MakeDenseShapes(num_dense, add_extra_shape,
unknown_outer_dims))
.Finalize(&op.node_def));
};
set_outputs(0 , 0 );
INFER_OK(op, "?;?", "");
INFER_OK(op, "[10];[20]", "");
INFER_ERROR("must be rank 1", op, "[1,2];?");
INFER_ERROR("must be rank 1", op, "?;[2,3]");
set_outputs(2 , 3 );
INFER_OK(op, "?;?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[?,1];[?,1,2];[?,1,2,3]"));
INFER_OK(op, "[10];?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3]"));
set_outputs(2, 3, true );
INFER_ERROR("len(dense_keys) != len(dense_shapes)", op,
"?;?;?;?;?;?;?;?;?;?");
set_outputs(2, 3, false , 1 );
INFER_OK(op, "?;?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[?,?,1];[?,?,1,2];[?,?,1,2,3]"));
INFER_OK(op, "[?];?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[d0_0,?,1];[d0_0,?,1,2];[d0_0,?,1,2,3]"));
INFER_OK(op, "[10];?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[d0_0,?,1];[d0_0,?,1,2];[d0_0,?,1,2,3]"));
set_outputs(2, 3, true , 1 );
INFER_ERROR("len(dense_keys) != len(dense_shapes)", op,
"?;?;?;?;?;?;?;?;?;?");
set_outputs(2, 3, false , 2 );
INFER_ERROR("shapes[0] has unknown rank or unknown inner dimensions", op,
"?;?;?;?;?;?;?;?;?;?");
}
TEST(ParsingOpsTest, ParseSequenceExample_ShapeFn) {
ShapeInferenceTestOp op("ParseSequenceExample");
auto set_outputs = [&op](int num_context_sparse, int num_context_dense,
int num_feature_list_sparse,
int num_feature_list_dense,
bool add_extra_shape = false) {
using NodeOutList = std::vector<NodeDefBuilder::NodeOut>;
using DataTypeList = std::vector<DataType>;
string string_in("test");
NodeDefBuilder::NodeOut node_in{"a", 0, DT_STRING};
TF_ASSERT_OK(
NodeDefBuilder("test", "ParseSequenceExample")
.Input("serialized", 0, DT_STRING)
.Input("debug_name", 0, DT_STRING)
.Input(NodeOutList(num_context_dense, node_in))
.Attr("Ncontext_sparse", num_context_sparse)
.Attr("Ncontext_dense", num_context_dense)
.Attr("Nfeature_list_sparse", num_feature_list_sparse)
.Attr("Nfeature_list_dense", num_feature_list_dense)
.Attr("feature_list_dense_missing_assumed_empty",
std::vector<string>(num_feature_list_dense, string_in))
.Attr("context_sparse_keys",
std::vector<string>(num_context_sparse, string_in))
.Attr("context_dense_keys",
std::vector<string>(num_context_dense, string_in))
.Attr("feature_list_sparse_keys",
std::vector<string>(num_feature_list_sparse, string_in))
.Attr("feature_list_dense_keys",
std::vector<string>(num_feature_list_dense, string_in))
.Attr("context_sparse_types",
DataTypeList(num_context_sparse, DT_FLOAT))
.Attr("context_dense_types",
DataTypeList(num_context_dense, DT_FLOAT))
.Attr("context_dense_shapes",
MakeDenseShapes(num_context_dense, add_extra_shape, 0))
.Attr("feature_list_sparse_types",
DataTypeList(num_feature_list_sparse, DT_FLOAT))
.Attr("feature_list_dense_types",
DataTypeList(num_feature_list_dense, DT_FLOAT))
.Attr("feature_list_dense_shapes",
MakeDenseShapes(num_feature_list_dense, add_extra_shape, 0))
.Finalize(&op.node_def));
};
set_outputs(0, 0, 0, 0);
INFER_OK(op, "[?];[?]", "");
INFER_OK(op, "[8];[8]", "");
INFER_ERROR("must be rank 1", op, "[];[?]");
INFER_ERROR("must be rank 1", op, "[?];[]");
set_outputs(2 , 3 , 0, 0);
INFER_OK(op, "[?];[?];?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3]"));
set_outputs(0, 0, 2 ,
3 );
INFER_OK(op, "[?];[?]",
("[?,3];[?,3];[?];[?];[3];[3];"
"[d0_0,?,1];[d0_0,?,1,2];[d0_0,?,1,2,3];"
"in0;in0;in0"));
set_outputs(2, 3, 2, 3);
INFER_OK(op, "[7];[7];?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3];"
"[?,3];[?,3];[?];[?];[3];[3];"
"[d0_0,?,1];[d0_0,?,1,2];[d0_0,?,1,2,3];"
"in0;in0;in0"));
set_outputs(1, 1, 1, 1, true );
INFER_ERROR(
"num_context_dense (1) must match the size of "
"context_dense_types (1) and context_dense_shapes (2)",
op, "[?];[?];?");
}
TEST(ParsingOpsTest, ParseSingleSequenceExample_ShapeFn) {
ShapeInferenceTestOp op("ParseSingleSequenceExample");
auto set_outputs = [&op](int num_context_sparse, int num_context_dense,
int num_feature_list_sparse,
int num_feature_list_dense,
bool add_extra_shape = false) {
using NodeOutList = std::vector<NodeDefBuilder::NodeOut>;
using DataTypeList = std::vector<DataType>;
NodeDefBuilder::NodeOut string_in{"a", 0, DT_STRING};
TF_ASSERT_OK(
NodeDefBuilder("test", "ParseSingleSequenceExample")
.Input("serialized", 0, DT_STRING)
.Input("feature_list_dense_missing_assumed_empty", 0, DT_STRING)
.Input(NodeOutList(num_context_sparse, string_in))
.Input(NodeOutList(num_context_dense, string_in))
.Input(NodeOutList(num_feature_list_sparse, string_in))
.Input(NodeOutList(num_feature_list_dense, string_in))
.Input(NodeOutList(num_context_dense, string_in))
.Input("debug_name", 0, DT_STRING)
.Attr("context_sparse_types",
DataTypeList(num_context_sparse, DT_FLOAT))
.Attr("context_dense_types",
DataTypeList(num_context_dense, DT_FLOAT))
.Attr("context_dense_shapes",
MakeDenseShapes(num_context_dense, add_extra_shape, 0))
.Attr("feature_list_sparse_types",
DataTypeList(num_feature_list_sparse, DT_FLOAT))
.Attr("feature_list_dense_types",
DataTypeList(num_feature_list_dense, DT_FLOAT))
.Attr("feature_list_dense_shapes",
MakeDenseShapes(num_feature_list_dense, add_extra_shape, 0))
.Finalize(&op.node_def));
};
set_outputs(0, 0, 0, 0);
INFER_OK(op, "?;?;?", "");
INFER_OK(op, "[];[20];?", "");
INFER_ERROR("must be rank 0", op, "[1];?;?");
INFER_ERROR("must be rank 1", op, "?;[2,3];?");
set_outputs(2 , 3 , 0, 0);
INFER_OK(op, "?;?;?;?;?;?;?;?;?;?;?",
("[?,1];[?,1];[?];[?];[1];[1];"
"[1];[1,2];[1,2,3]"));
set_outputs(0, 0, 2 ,
3 );
INFER_OK(op, "?;?;?;?;?;?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[?,1];[?,1,2];[?,1,2,3]"));
set_outputs(2, 3, 2, 3);
INFER_OK(op, "?;?;?;?;?;?;?;?;?;?;?;?;?;?;?;?",
("[?,1];[?,1];[?];[?];[1];[1];"
"[1];[1,2];[1,2,3];"
"[?,2];[?,2];[?];[?];[2];[2];"
"[?,1];[?,1,2];[?,1,2,3]"));
set_outputs(1, 1, 1, 1, true );
INFER_ERROR("len(context_dense_keys) != len(context_dense_shapes)", op,
"?;?;?;?;?;?;?;?");
}
TEST(ParsingOpsTest, ParseExampleV2_ShapeFn) {
ShapeInferenceTestOp op("ParseExampleV2");
auto set_outputs = [&op](int num_sparse, int num_dense, int num_ragged,
bool add_extra_shape = false,
int unknown_outer_dims = 0) {
using NodeOutList = std::vector<NodeDefBuilder::NodeOut>;
using DataTypeList = std::vector<DataType>;
NodeDefBuilder::NodeOut string_in{"a", 0, DT_STRING};
TF_ASSERT_OK(
NodeDefBuilder("test", "ParseExampleV2")
.Input("serialized", 0, DT_STRING)
.Input("names", 0, DT_STRING)
.Input("sparse_keys", 0, DT_STRING)
.Input("dense_keys", 0, DT_STRING)
.Input("ragged_keys", 0, DT_STRING)
.Input(NodeOutList(num_dense, string_in))
.Attr("num_sparse", num_sparse)
.Attr("sparse_types", DataTypeList(num_sparse, DT_FLOAT))
.Attr("ragged_value_types", DataTypeList(num_ragged, DT_FLOAT))
.Attr("ragged_split_types", DataTypeList(num_ragged, DT_INT32))
.Attr("dense_shapes", MakeDenseShapes(num_dense, add_extra_shape,
unknown_outer_dims))
.Finalize(&op.node_def));
};
set_outputs(0 , 0 , 0 );
INFER_OK(op, "?;?;[0];[0];[0]", "");
INFER_OK(op, "[10];[10];[0];[0];[0]", "");
INFER_OK(op, "[];[];[0];[0];[0]", "");
INFER_ERROR("must be at most rank 1", op, "[1,2];?;?;?;?");
INFER_ERROR("must be at most rank 1", op, "?;[2,3];?;?;?");
set_outputs(2 , 3 , 4 );
INFER_OK(op, "[?];?;?;?;?;?;?;?",
("[?,2];[?,2];"
"[?];[?];"
"[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
INFER_OK(op, "[10];?;?;?;?;?;?;?",
("[?,2];[?,2];"
"[?];[?];"
"[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3];"
"[?];[?];[?];[?];"
"[11];[11];[11];[11]"));
INFER_OK(op, "[];?;?;?;?;?;?;?",
("[?,1];[?,1];"
"[?];[?];"
"[1];[1];"
"[1];[1,2];[1,2,3];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
INFER_OK(op, "?;?;?;?;?;?;?;?",
("[?,?];[?,?];"
"[?];[?];"
"[?];[?];"
"?;?;?;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
set_outputs(2, 3, 0, true );
INFER_ERROR("len(dense_keys) != len(dense_shapes)", op, "?;?;?;?;?;?;?;?");
set_outputs(2, 3, 0, true , 1 );
INFER_ERROR("len(dense_keys) != len(dense_shapes)", op, "?;?;?;?;?;?;?;?");
set_outputs(2, 3, 0, false , 1 );
INFER_OK(op, "[?];?;?;?;?;?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[d0_0,?,1];[d0_0,?,1,2];[d0_0,?,1,2,3]"));
INFER_OK(op, "[10];?;?;?;?;?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[d0_0,?,1];[d0_0,?,1,2];[d0_0,?,1,2,3]"));
set_outputs(2, 3, 0, false , 2 );
INFER_ERROR("shapes[0] has unknown rank or unknown inner dimensions", op,
"?;?;?;?;?;?;?;?");
}
TEST(ParsingOpsTest, ParseSequenceExampleV2_ShapeFn) {
ShapeInferenceTestOp op("ParseSequenceExampleV2");
auto set_outputs = [&op](int num_context_sparse, int num_context_dense,
int num_context_ragged, int num_feature_list_sparse,
int num_feature_list_dense,
int num_feature_list_ragged,
bool add_extra_shape = false) {
using NodeOutList = std::vector<NodeDefBuilder::NodeOut>;
using DataTypeList = std::vector<DataType>;
string string_in("test");
NodeDefBuilder::NodeOut node_in{"a", 0, DT_STRING};
TF_ASSERT_OK(
NodeDefBuilder("test", "ParseSequenceExampleV2")
.Input("serialized", 0, DT_STRING)
.Input("debug_name", 0, DT_STRING)
.Input("context_sparse_keys", 0, DT_STRING)
.Input("context_dense_keys", 0, DT_STRING)
.Input("context_ragged_keys", 0, DT_STRING)
.Input("feature_list_sparse_keys", 0, DT_STRING)
.Input("feature_list_dense_keys", 0, DT_STRING)
.Input("feature_list_ragged_keys", 0, DT_STRING)
.Input("feature_list_dense_missing_assumed_empty", 0, DT_BOOL)
.Input(NodeOutList(num_context_dense, node_in))
.Attr("Ncontext_sparse", num_context_sparse)
.Attr("Nfeature_list_sparse", num_feature_list_sparse)
.Attr("Nfeature_list_dense", num_feature_list_dense)
.Attr("context_sparse_types",
DataTypeList(num_context_sparse, DT_FLOAT))
.Attr("context_dense_types",
DataTypeList(num_context_dense, DT_FLOAT))
.Attr("context_dense_shapes",
MakeDenseShapes(num_context_dense, add_extra_shape, 0))
.Attr("feature_list_sparse_types",
DataTypeList(num_feature_list_sparse, DT_FLOAT))
.Attr("feature_list_dense_types",
DataTypeList(num_feature_list_dense, DT_FLOAT))
.Attr("feature_list_dense_shapes",
MakeDenseShapes(num_feature_list_dense, add_extra_shape, 0))
.Attr("context_ragged_value_types",
DataTypeList(num_context_ragged, DT_FLOAT))
.Attr("context_ragged_split_types",
DataTypeList(num_context_ragged, DT_INT32))
.Attr("feature_list_ragged_value_types",
DataTypeList(num_feature_list_ragged, DT_FLOAT))
.Attr("feature_list_ragged_split_types",
DataTypeList(num_feature_list_ragged, DT_INT32))
.Finalize(&op.node_def));
};
set_outputs(0, 0, 0, 0, 0, 0);
INFER_OK(op, "?;[?];?;?;?;?;?;?;?", "");
INFER_OK(op, "[?];[?];?;?;?;?;?;?;?", "");
INFER_OK(op, "[8];[8];?;?;?;?;?;?;?", "");
INFER_OK(op, "[];[];?;?;?;?;?;?;?", "");
INFER_ERROR("must be at most rank 1", op, "[1,2];?;?;?;?;?;?;?;?");
INFER_ERROR("must be at most rank 1", op, "?;[2,3];?;?;?;?;?;?;?");
set_outputs(2 , 3 ,
4 , 0, 0, 0);
INFER_OK(op, "[?];[?];?;?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];"
"[?];[?];"
"[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
INFER_OK(op, "[5];[?];?;?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];"
"[?];[?];"
"[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3];"
"[?];[?];[?];[?];"
"[6];[6];[6];[6]"));
INFER_OK(op, "[];[?];?;?;?;?;?;?;?;?;?;?",
("[?,1];[?,1];"
"[?];[?];"
"[1];[1];"
"[1];[1,2];[1,2,3];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
INFER_OK(op, "?;[?];?;?;?;?;?;?;?;?;?;?",
("[?,?];[?,?];"
"[?];[?];"
"[?];[?];"
"?;?;?;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
set_outputs(0, 0, 0, 2 , 3 ,
4 );
INFER_OK(op, "[?];[?];?;?;?;?;?;?;?",
("[?,3];[?,3];"
"[?];[?];"
"[3];[3];"
"[d0_0,?,1];[d0_0,?,1,2];"
"[d0_0,?,1,2,3];"
"in0;in0;in0;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
INFER_OK(op, "[5];[?];?;?;?;?;?;?;?",
("[?,3];[?,3];"
"[?];[?];"
"[3];[3];"
"[d0_0,?,1];[d0_0,?,1,2];"
"[d0_0,?,1,2,3];"
"in0;in0;in0;"
"[?];[?];[?];[?];"
"[6];[6];[6];[6];"
"[?];[?];[?];[?]"));
INFER_OK(op, "[];[?];?;?;?;?;?;?;?",
("[?,2];[?,2];"
"[?];[?];"
"[2];[2];"
"[?,1];[?,1,2];[?,1,2,3];"
"in0;in0;in0;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
INFER_OK(op, "?;[?];?;?;?;?;?;?;?",
("[?,?];[?,?];"
"[?];[?];"
"[?];[?];"
"?;?;?;"
"in0;in0;in0;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
set_outputs(2 , 3 ,
4 , 2 ,
3 , 4 );
INFER_OK(op, "[?];[?];?;?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];"
"[?];[?];"
"[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?,3];[?,3];"
"[?];[?];"
"[3];[3];"
"[d0_0,?,1];[d0_0,?,1,2];"
"[d0_0,?,1,2,3];"
"in0;in0;in0;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
INFER_OK(op, "[5];[?];?;?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];"
"[?];[?];"
"[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3];"
"[?];[?];[?];[?];"
"[6];[6];[6];[6];"
"[?,3];[?,3];"
"[?];[?];"
"[3];[3];"
"[d0_0,?,1];[d0_0,?,1,2];"
"[d0_0,?,1,2,3];"
"in0;in0;in0;"
"[?];[?];[?];[?];"
"[6];[6];[6];[6];"
"[?];[?];[?];[?]"));
INFER_OK(op, "[];[?];?;?;?;?;?;?;?;?;?;?",
("[?,1];[?,1];"
"[?];[?];"
"[1];[1];"
"[1];[1,2];[1,2,3];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?,2];[?,2];"
"[?];[?];"
"[2];[2];"
"[?,1];[?,1,2];[?,1,2,3];"
"in0;in0;in0;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
INFER_OK(op, "?;[?];?;?;?;?;?;?;?;?;?;?",
("[?,?];[?,?];"
"[?];[?];"
"[?];[?];"
"?;?;?;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?,?];[?,?];"
"[?];[?];"
"[?];[?];"
"?;?;?;"
"in0;in0;in0;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
set_outputs(1, 1, 1, 1, 1, 1, true );
INFER_ERROR(
"num_context_dense (1) must match the size of "
"context_dense_types (1) and context_dense_shapes (2)",
op, "[?];[?];?;?;?;?;?;?;?;?");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/parsing_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/parsing_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7e2c7a0e-34b4-43d0-83f0-bea87162eae3 | cpp | google/tensorstore | downsample | tensorstore/driver/downsample/downsample.cc | tensorstore/driver/downsample/downsample_test.cc | #include "tensorstore/driver/downsample/downsample.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <mutex>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/array.h"
#include "tensorstore/array_storage_statistics.h"
#include "tensorstore/box.h"
#include "tensorstore/chunk_layout.h"
#include "tensorstore/codec_spec.h"
#include "tensorstore/context.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/driver/chunk.h"
#include "tensorstore/driver/downsample/downsample_array.h"
#include "tensorstore/driver/downsample/downsample_method_json_binder.h"
#include "tensorstore/driver/downsample/downsample_nditerable.h"
#include "tensorstore/driver/downsample/downsample_util.h"
#include "tensorstore/driver/downsample/grid_occupancy_map.h"
#include "tensorstore/driver/driver.h"
#include "tensorstore/driver/driver_handle.h"
#include "tensorstore/driver/driver_spec.h"
#include "tensorstore/driver/read.h"
#include "tensorstore/driver/registry.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/dimension_units.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/lock_collection.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/open_options.h"
#include "tensorstore/rank.h"
#include "tensorstore/resize_options.h"
#include "tensorstore/schema.h"
#include "tensorstore/serialization/std_vector.h"
#include "tensorstore/spec.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_util.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/std_vector.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_downsample {
namespace {
using ::tensorstore::internal::DriverPtr;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::LockCollection;
using ::tensorstore::internal::NDIterable;
using ::tensorstore::internal::OpenTransactionPtr;
using ::tensorstore::internal::ReadChunk;
using ::tensorstore::internal::TransformedDriverSpec;
namespace jb = tensorstore::internal_json_binding;
Result<IndexDomain<>> GetBaseDomainConstraintFromDownsampledDomain(
IndexDomain<> downsampled_domain,
tensorstore::span<const Index> downsample_factors) {
assert(downsampled_domain.valid());
const DimensionIndex rank = downsampled_domain.rank();
assert(rank == downsample_factors.size());
IndexDomainBuilder builder(rank);
builder.labels(downsampled_domain.labels());
auto& implicit_lower_bounds = builder.implicit_lower_bounds();
auto& implicit_upper_bounds = builder.implicit_upper_bounds();
auto origin = builder.origin();
auto shape = builder.shape();
for (DimensionIndex i = 0; i < rank; ++i) {
if (downsample_factors[i] != 1) {
implicit_lower_bounds[i] = true;
implicit_upper_bounds[i] = true;
origin[i] = -kInfIndex;
shape[i] = kInfSize;
} else {
implicit_lower_bounds[i] = downsampled_domain.implicit_lower_bounds()[i];
implicit_upper_bounds[i] = downsampled_domain.implicit_upper_bounds()[i];
origin[i] = downsampled_domain.origin()[i];
shape[i] = downsampled_domain.shape()[i];
}
}
return builder.Finalize();
}
Result<IndexTransform<>> GetBaseTransformForDownsampledTransform(
IndexTransformView<> base_transform,
IndexTransformView<> downsampled_transform,
tensorstore::span<const Index> downsample_factors,
DownsampleMethod downsample_method) {
if (downsample_method == DownsampleMethod::kStride) {
return base_transform | tensorstore::AllDims().Stride(downsample_factors) |
downsampled_transform;
}
PropagatedIndexTransformDownsampling propagated;
TENSORSTORE_RETURN_IF_ERROR(
internal_downsample::PropagateAndComposeIndexTransformDownsampling(
downsampled_transform, base_transform, downsample_factors,
propagated));
return std::move(propagated.transform);
}
class DownsampleDriverSpec
: public internal::RegisteredDriverSpec<DownsampleDriverSpec,
internal::DriverSpec> {
public:
constexpr static char id[] = "downsample";
TransformedDriverSpec base;
std::vector<Index> downsample_factors;
DownsampleMethod downsample_method;
constexpr static auto ApplyMembers = [](auto& x, auto f) {
return f(internal::BaseCast<internal::DriverSpec>(x), x.base,
x.downsample_factors, x.downsample_method);
};
absl::Status InitializeFromBase() {
TENSORSTORE_RETURN_IF_ERROR(
this->schema.Set(RankConstraint{internal::GetRank(this->base)}));
TENSORSTORE_RETURN_IF_ERROR(
this->schema.Set(this->base.driver_spec->schema.dtype()));
return absl::OkStatus();
}
absl::Status ValidateDownsampleFactors() {
TENSORSTORE_RETURN_IF_ERROR(
this->schema.Set(RankConstraint(this->downsample_factors.size())));
return absl::OkStatus();
}
absl::Status ValidateDownsampleMethod() {
auto dtype = this->schema.dtype();
if (!dtype.valid()) return absl::OkStatus();
return internal_downsample::ValidateDownsampleMethod(
dtype, this->downsample_method);
}
OpenMode open_mode() const override { return base.driver_spec->open_mode(); }
absl::Status ApplyOptions(SpecOptions&& options) override {
TENSORSTORE_RETURN_IF_ERROR(schema.Set(options.dtype()));
TENSORSTORE_RETURN_IF_ERROR(schema.Set(options.rank()));
auto transform = base.transform;
if (!transform.valid()) {
transform = tensorstore::IdentityTransform(downsample_factors.size());
}
if (options.domain().valid()) {
TENSORSTORE_RETURN_IF_ERROR(schema.Set(options.domain()));
TENSORSTORE_ASSIGN_OR_RETURN(auto base_domain,
GetBaseDomainConstraintFromDownsampledDomain(
options.domain(), downsample_factors));
TENSORSTORE_RETURN_IF_ERROR(options.Override(std::move(base_domain)));
}
TENSORSTORE_ASSIGN_OR_RETURN(
transform, transform | AllDims().Stride(downsample_factors));
TENSORSTORE_RETURN_IF_ERROR(options.TransformInputSpaceSchema(transform));
return internal::TransformAndApplyOptions(base, std::move(options));
}
constexpr static auto default_json_binder = jb::Object(
jb::Member("base",
[](auto is_loading, const auto& options, auto* obj, auto* j) {
return jb::Projection<&DownsampleDriverSpec::base>()(
is_loading,
JsonSerializationOptions(options, obj->schema.dtype(),
obj->schema.rank()),
obj, j);
}),
jb::Initialize([](auto* obj) { return obj->InitializeFromBase(); }),
jb::Member("downsample_factors",
jb::Validate(
[](const auto& options, auto* obj) {
return obj->ValidateDownsampleFactors();
},
jb::Projection<&DownsampleDriverSpec::downsample_factors>(
jb::Array(jb::Integer<Index>(1))))),
jb::Member(
"downsample_method",
jb::Validate(
[](const auto& options, auto* obj) {
return obj->ValidateDownsampleMethod();
},
jb::Projection<&DownsampleDriverSpec::downsample_method>())),
jb::Initialize([](auto* obj) {
SpecOptions base_options;
static_cast<Schema&>(base_options) = std::exchange(obj->schema, {});
return obj->ApplyOptions(std::move(base_options));
}));
Result<IndexDomain<>> GetDomain() const override {
TENSORSTORE_ASSIGN_OR_RETURN(auto domain,
internal::GetEffectiveDomain(base));
if (!domain.valid()) {
return schema.domain();
}
if (domain.rank() != downsample_factors.size()) {
return absl::InternalError(tensorstore::StrCat(
"Domain of base TensorStore has rank (", domain.rank(),
") but expected ", downsample_factors.size()));
}
auto downsampled_domain = internal_downsample::DownsampleDomain(
domain, downsample_factors, downsample_method);
return MergeIndexDomains(std::move(downsampled_domain), schema.domain());
}
Result<ChunkLayout> GetChunkLayout() const override {
return internal::GetEffectiveChunkLayout(base) |
AllDims().Stride(downsample_factors);
}
Result<CodecSpec> GetCodec() const override {
return internal::GetEffectiveCodec(base);
}
Result<SharedArray<const void>> GetFillValue(
IndexTransformView<> transform) const override {
return {std::in_place};
}
Result<DimensionUnitsVector> GetDimensionUnits() const override {
TENSORSTORE_ASSIGN_OR_RETURN(auto dimension_units,
internal::GetEffectiveDimensionUnits(base));
if (!dimension_units.empty()) {
tensorstore::span<const Index> downsample_factors =
this->downsample_factors;
TENSORSTORE_ASSIGN_OR_RETURN(
auto transform,
tensorstore::IdentityTransform(downsample_factors.size()) |
tensorstore::AllDims().Stride(downsample_factors));
dimension_units =
TransformOutputDimensionUnits(transform, std::move(dimension_units));
}
return dimension_units;
}
kvstore::Spec GetKvstore() const override {
return base.driver_spec->GetKvstore();
}
Result<TransformedDriverSpec> GetBase(
IndexTransformView<> transform) const override {
TransformedDriverSpec new_base;
new_base.driver_spec = base.driver_spec;
if (transform.valid()) {
TENSORSTORE_ASSIGN_OR_RETURN(
new_base.transform,
GetBaseTransformForDownsampledTransform(
base.transform.valid()
? base.transform
: tensorstore::IdentityTransform(downsample_factors.size()),
transform, downsample_factors, downsample_method));
}
return new_base;
}
Future<internal::Driver::Handle> Open(
internal::DriverOpenRequest request) const override {
if (!!(request.read_write_mode & ReadWriteMode::write)) {
return absl::InvalidArgumentError("only reading is supported");
}
request.read_write_mode = ReadWriteMode::read;
return MapFutureValue(
InlineExecutor{},
[spec = internal::DriverSpec::PtrT<const DownsampleDriverSpec>(this)](
internal::Driver::Handle handle)
-> Result<internal::Driver::Handle> {
TENSORSTORE_ASSIGN_OR_RETURN(
auto downsampled_handle,
MakeDownsampleDriver(std::move(handle), spec->downsample_factors,
spec->downsample_method));
if (auto domain = spec->schema.domain(); domain.valid()) {
TENSORSTORE_RETURN_IF_ERROR(
MergeIndexDomains(domain,
downsampled_handle.transform.domain()),
tensorstore::MaybeAnnotateStatus(
_, "downsampled domain does not match domain in schema"));
}
return downsampled_handle;
},
internal::OpenDriver(base, std::move(request)));
}
};
class DownsampleDriver
: public internal::RegisteredDriver<DownsampleDriver,
internal::Driver> {
public:
Result<TransformedDriverSpec> GetBoundSpec(
internal::OpenTransactionPtr transaction,
IndexTransformView<> transform) override {
auto driver_spec = internal::DriverSpec::Make<DownsampleDriverSpec>();
driver_spec->context_binding_state_ = ContextBindingState::bound;
TENSORSTORE_ASSIGN_OR_RETURN(
driver_spec->base,
base_driver_->GetBoundSpec(std::move(transaction), base_transform_));
driver_spec->downsample_factors = downsample_factors_;
driver_spec->downsample_method = downsample_method_;
TENSORSTORE_RETURN_IF_ERROR(driver_spec->InitializeFromBase());
TransformedDriverSpec spec;
spec.transform = transform;
spec.driver_spec = std::move(driver_spec);
return spec;
}
Result<ChunkLayout> GetChunkLayout(IndexTransformView<> transform) override {
TENSORSTORE_ASSIGN_OR_RETURN(auto strided_base_transform,
GetStridedBaseTransform());
return base_driver_->GetChunkLayout(strided_base_transform) | transform;
}
Result<CodecSpec> GetCodec() override { return base_driver_->GetCodec(); }
Result<SharedArray<const void>> GetFillValue(
IndexTransformView<> transform) override {
if (downsample_method_ == DownsampleMethod::kStride) {
TENSORSTORE_ASSIGN_OR_RETURN(auto strided_transform,
GetStridedBaseTransform() | transform);
return base_driver_->GetFillValue(strided_transform);
}
PropagatedIndexTransformDownsampling propagated;
TENSORSTORE_RETURN_IF_ERROR(
internal_downsample::PropagateAndComposeIndexTransformDownsampling(
transform, base_transform_, downsample_factors_, propagated));
TENSORSTORE_ASSIGN_OR_RETURN(
auto fill_value, base_driver_->GetFillValue(propagated.transform));
if (!fill_value.valid()) return {std::in_place};
TENSORSTORE_ASSIGN_OR_RETURN(
auto broadcast_fill_value,
BroadcastArray(std::move(fill_value),
propagated.transform.domain().box()));
TENSORSTORE_ASSIGN_OR_RETURN(
auto downsampled_fill_value,
internal_downsample::DownsampleArray(
broadcast_fill_value, propagated.input_downsample_factors,
downsample_method_));
return UnbroadcastArray(downsampled_fill_value);
}
Result<DimensionUnitsVector> GetDimensionUnits() override {
TENSORSTORE_ASSIGN_OR_RETURN(auto dimension_units,
base_driver_->GetDimensionUnits());
TENSORSTORE_ASSIGN_OR_RETURN(auto strided_base_transform,
GetStridedBaseTransform());
return TransformOutputDimensionUnits(strided_base_transform,
std::move(dimension_units));
}
KvStore GetKvstore(const Transaction& transaction) override {
return base_driver_->GetKvstore(transaction);
}
Result<internal::DriverHandle> GetBase(
ReadWriteMode read_write_mode, IndexTransformView<> transform,
const Transaction& transaction) override {
internal::DriverHandle base_handle;
base_handle.driver = base_driver_;
base_handle.driver.set_read_write_mode(read_write_mode);
base_handle.transaction = transaction;
TENSORSTORE_ASSIGN_OR_RETURN(base_handle.transform,
GetBaseTransformForDownsampledTransform(
base_transform_, transform,
downsample_factors_, downsample_method_));
return base_handle;
}
Future<ArrayStorageStatistics> GetStorageStatistics(
GetStorageStatisticsRequest request) override;
explicit DownsampleDriver(DriverPtr base, IndexTransform<> base_transform,
tensorstore::span<const Index> downsample_factors,
DownsampleMethod downsample_method)
: base_driver_(std::move(base)),
base_transform_(std::move(base_transform)),
downsample_factors_(downsample_factors.begin(),
downsample_factors.end()),
downsample_method_(downsample_method) {}
DataType dtype() override { return base_driver_->dtype(); }
DimensionIndex rank() override { return base_transform_.input_rank(); }
Executor data_copy_executor() override {
return base_driver_->data_copy_executor();
}
void Read(ReadRequest request, ReadChunkReceiver receiver) override;
Result<IndexTransform<>> GetStridedBaseTransform() {
return base_transform_ | tensorstore::AllDims().Stride(downsample_factors_);
}
Future<IndexTransform<>> ResolveBounds(ResolveBoundsRequest request) override;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.base_driver_, x.base_transform_, x.downsample_factors_,
x.downsample_method_);
};
DriverPtr base_driver_;
IndexTransform<> base_transform_;
std::vector<Index> downsample_factors_;
DownsampleMethod downsample_method_;
};
Future<IndexTransform<>> DownsampleDriver::ResolveBounds(
ResolveBoundsRequest request) {
return MapFutureValue(
InlineExecutor{},
[self = IntrusivePtr<DownsampleDriver>(this),
transform = std::move(request.transform)](
IndexTransform<> base_transform) -> Result<IndexTransform<>> {
Box<dynamic_rank(internal::kNumInlinedDims)> downsampled_bounds(
base_transform.input_rank());
internal_downsample::DownsampleBounds(
base_transform.domain().box(), downsampled_bounds,
self->downsample_factors_, self->downsample_method_);
return tensorstore::PropagateBoundsToTransform(
downsampled_bounds, base_transform.implicit_lower_bounds(),
base_transform.implicit_upper_bounds(), std::move(transform));
},
base_driver_->ResolveBounds({std::move(request.transaction),
base_transform_,
std::move(request.options)}));
}
struct ReadState : public internal::AtomicReferenceCount<ReadState> {
IntrusivePtr<DownsampleDriver> self_;
internal::ReadChunkReceiver receiver_;
absl::Mutex mutex_;
SharedOffsetArray<void> data_buffer_;
Index remaining_elements_;
internal_downsample::GridOccupancyTracker independently_emitted_chunks_;
absl::InlinedVector<Index, internal::kNumInlinedDims> downsample_factors_;
DimensionIndex original_input_rank_;
IndexDomain<> base_transform_domain_;
AnyCancelReceiver on_cancel_;
absl::Status error_;
bool done_signal_received_ = false;
bool done_sent_ = false;
bool canceled_ = false;
size_t chunks_in_progress_ = 0;
void Cancel() {
std::lock_guard<ReadState> guard(*this);
canceled_ = true;
}
void lock() ABSL_NO_THREAD_SAFETY_ANALYSIS { mutex_.Lock(); }
void unlock() ABSL_NO_THREAD_SAFETY_ANALYSIS {
bool has_error = !error_.ok();
bool send_done = !done_sent_ && chunks_in_progress_ == 0 &&
(done_signal_received_ || has_error);
if (send_done) done_sent_ = true;
AnyCancelReceiver on_cancel;
if (canceled_ && on_cancel_) {
on_cancel = std::move(on_cancel_);
}
mutex_.Unlock();
if (on_cancel) on_cancel();
if (!send_done) return;
if (has_error) {
execution::set_error(receiver_, error_);
} else {
execution::set_done(receiver_);
}
execution::set_stopping(receiver_);
}
void SetError(absl::Status error, size_t decrement_chunks_in_progress = 0) {
std::lock_guard<ReadState> guard(*this);
chunks_in_progress_ -= decrement_chunks_in_progress;
if (!error_.ok()) return;
error_ = std::move(error);
canceled_ = true;
}
void EmitBufferedChunkForBox(BoxView<> base_domain);
void EmitBufferedChunks();
};
struct BufferedReadChunkImpl {
internal::IntrusivePtr<ReadState> state_;
absl::Status operator()(LockCollection& lock_collection) const {
return absl::OkStatus();
}
Result<NDIterable::Ptr> operator()(internal::ReadChunk::BeginRead,
IndexTransform<> chunk_transform,
internal::Arena* arena) const {
TENSORSTORE_ASSIGN_OR_RETURN(
auto propagated,
internal_downsample::PropagateIndexTransformDownsampling(
chunk_transform, state_->data_buffer_.domain(),
state_->downsample_factors_));
TENSORSTORE_ASSIGN_OR_RETURN(
auto transformed_array,
MakeTransformedArray(state_->data_buffer_,
std::move(propagated.transform)));
TENSORSTORE_ASSIGN_OR_RETURN(
auto base_nditerable,
GetTransformedArrayNDIterable(transformed_array, arena));
return internal_downsample::DownsampleNDIterable(
std::move(base_nditerable), transformed_array.domain().box(),
propagated.input_downsample_factors, state_->self_->downsample_method_,
chunk_transform.input_rank(), arena);
}
};
IndexTransform<> GetDownsampledRequestIdentityTransform(
BoxView<> base_domain, tensorstore::span<const Index> downsample_factors,
DownsampleMethod downsample_method, DimensionIndex request_rank) {
assert(base_domain.rank() == downsample_factors.size());
assert(request_rank <= base_domain.rank());
IndexTransformBuilder builder(base_domain.rank(), request_rank);
internal_downsample::DownsampleBounds(base_domain, builder.input_bounds(),
downsample_factors, downsample_method);
builder.output_identity_transform();
return builder.Finalize().value();
}
void ReadState::EmitBufferedChunkForBox(BoxView<> base_domain) {
auto request_transform = GetDownsampledRequestIdentityTransform(
base_domain, downsample_factors_, self_->downsample_method_,
original_input_rank_);
ReadChunk downsampled_chunk;
downsampled_chunk.transform =
IdentityTransform(request_transform.domain().box());
downsampled_chunk.impl = BufferedReadChunkImpl{IntrusivePtr<ReadState>(this)};
execution::set_value(receiver_, std::move(downsampled_chunk),
std::move(request_transform));
}
void ReadState::EmitBufferedChunks() {
if (independently_emitted_chunks_.occupied_chunks.empty()) {
EmitBufferedChunkForBox(base_transform_domain_.box());
} else {
internal_downsample::GridOccupancyMap emitted_chunk_map(
std::move(independently_emitted_chunks_), base_transform_domain_.box());
const DimensionIndex rank = emitted_chunk_map.rank();
Index grid_cell[kMaxRank];
tensorstore::span<Index> grid_cell_span(&grid_cell[0], rank);
Box<dynamic_rank(internal::kNumInlinedDims)> grid_cell_domain;
grid_cell_domain.set_rank(rank);
emitted_chunk_map.InitializeCellIterator(grid_cell_span);
do {
if (!emitted_chunk_map.GetGridCellDomain(grid_cell_span,
grid_cell_domain)) {
continue;
}
EmitBufferedChunkForBox(grid_cell_domain);
} while (emitted_chunk_map.AdvanceCellIterator(grid_cell_span));
}
{
std::lock_guard<ReadState> guard(*this);
--chunks_in_progress_;
}
}
struct IndependentReadChunkImpl {
internal::IntrusivePtr<ReadState> state_;
internal::ReadChunk base_chunk_;
absl::Status operator()(LockCollection& lock_collection) {
return base_chunk_.impl(lock_collection);
}
Result<NDIterable::Ptr> operator()(internal::ReadChunk::BeginRead,
IndexTransform<> chunk_transform,
internal::Arena* arena) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto propagated,
internal_downsample::PropagateIndexTransformDownsampling(
chunk_transform, state_->base_transform_domain_.box(),
state_->downsample_factors_));
TENSORSTORE_ASSIGN_OR_RETURN(
auto base_transform,
ComposeTransforms(base_chunk_.transform, propagated.transform));
TENSORSTORE_ASSIGN_OR_RETURN(
auto base_nditerable,
base_chunk_.impl(internal::ReadChunk::BeginRead{},
std::move(base_transform), arena));
return internal_downsample::DownsampleNDIterable(
std::move(base_nditerable), propagated.transform.domain().box(),
propagated.input_downsample_factors, state_->self_->downsample_method_,
chunk_transform.input_rank(), arena);
}
};
bool MaybeEmitIndependentReadChunk(
ReadState& state, ReadChunk& base_chunk,
IndexTransformView<> base_request_transform) {
if (!internal_downsample::CanDownsampleIndexTransform(
base_request_transform, state.base_transform_domain_.box(),
state.downsample_factors_)) {
return false;
}
TENSORSTORE_ASSIGN_OR_RETURN(auto inverse_request_transform,
InverseTransform(base_request_transform), false);
TENSORSTORE_ASSIGN_OR_RETURN(
base_chunk.transform,
ComposeTransforms(base_chunk.transform, inverse_request_transform),
false);
const Index num_elements = base_chunk.transform.domain().num_elements();
bool emit_buffered_chunk;
{
absl::MutexLock lock(&state.mutex_);
bool has_data_buffer =
state.data_buffer_.byte_strided_origin_pointer() != nullptr;
bool remaining_data = (state.remaining_elements_ -= num_elements) != 0;
emit_buffered_chunk = (!remaining_data && has_data_buffer);
if (has_data_buffer || remaining_data) {
state.independently_emitted_chunks_.MarkOccupied(
base_chunk.transform.domain().box());
}
}
internal::ReadChunk downsampled_chunk;
auto request_transform = GetDownsampledRequestIdentityTransform(
base_chunk.transform.domain().box(), state.downsample_factors_,
state.self_->downsample_method_, state.original_input_rank_);
downsampled_chunk.impl = IndependentReadChunkImpl{
internal::IntrusivePtr<ReadState>(&state), std::move(base_chunk)};
downsampled_chunk.transform =
IdentityTransform(request_transform.domain().box());
execution::set_value(state.receiver_, std::move(downsampled_chunk),
request_transform);
if (emit_buffered_chunk) {
state.self_->data_copy_executor()(
[state = internal::IntrusivePtr<ReadState>(&state)] {
state->EmitBufferedChunks();
});
} else {
std::lock_guard<ReadState> guard(state);
--state.chunks_in_progress_;
}
return true;
}
struct ReadReceiverImpl {
internal::IntrusivePtr<ReadState> state_;
void set_starting(AnyCancelReceiver on_cancel) {
{
absl::MutexLock lock(&state_->mutex_);
if (!state_->canceled_) {
state_->on_cancel_ = std::move(on_cancel);
return;
}
}
if (on_cancel) on_cancel();
}
void set_value(ReadChunk chunk, IndexTransform<> cell_transform) {
if (cell_transform.domain().box().is_empty()) return;
{
absl::MutexLock lock(&state_->mutex_);
if (state_->canceled_) return;
++state_->chunks_in_progress_;
}
if (MaybeEmitIndependentReadChunk(*state_, chunk, cell_transform)) return;
state_->self_->data_copy_executor()([state = state_,
chunk = std::move(chunk),
cell_transform = std::move(
cell_transform)]() mutable {
const Index num_elements = cell_transform.domain().num_elements();
{
std::lock_guard<ReadState> guard(*state);
if (state->canceled_) {
--state->chunks_in_progress_;
return;
}
if (state->data_buffer_.byte_strided_origin_pointer() == nullptr) {
state->data_buffer_ =
AllocateArray(state->base_transform_domain_.box(), c_order,
default_init, state->self_->base_driver_->dtype());
}
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto transformed_data_buffer,
MakeTransformedArray(state->data_buffer_, std::move(cell_transform)),
state->SetError(_, 1));
TENSORSTORE_RETURN_IF_ERROR(
internal::CopyReadChunk(chunk.impl, chunk.transform,
transformed_data_buffer),
state->SetError(_, 1));
{
std::lock_guard<ReadState> guard(*state);
bool elements_done = (state->remaining_elements_ -= num_elements) == 0;
if (state->canceled_ || !elements_done) {
--state->chunks_in_progress_;
return;
}
}
state->EmitBufferedChunks();
});
}
void set_error(absl::Status status) { state_->SetError(std::move(status)); }
void set_done() {
std::lock_guard<ReadState> guard(*state_);
state_->done_signal_received_ = true;
}
void set_stopping() {
absl::MutexLock lock(&state_->mutex_);
state_->on_cancel_ = {};
}
};
void DownsampleDriver::Read(ReadRequest request, ReadChunkReceiver receiver) {
if (downsample_method_ == DownsampleMethod::kStride) {
TENSORSTORE_ASSIGN_OR_RETURN(
request.transform, GetStridedBaseTransform() | request.transform,
execution::set_error(FlowSingleReceiver{std::move(receiver)}, _));
base_driver_->Read(std::move(request), std::move(receiver));
return;
}
auto base_resolve_future = base_driver_->ResolveBounds(
{request.transaction, base_transform_, {fix_resizable_bounds}});
auto state = internal::MakeIntrusivePtr<ReadState>();
state->self_.reset(this);
state->original_input_rank_ = request.transform.input_rank();
state->receiver_ = std::move(receiver);
execution::set_starting(state->receiver_,
[state = state.get()] { state->Cancel(); });
std::move(base_resolve_future)
.ExecuteWhenReady([state = std::move(state),
request = std::move(request)](
ReadyFuture<IndexTransform<>> future) mutable {
auto& r = future.result();
if (!r.ok()) {
state->SetError(std::move(r.status()));
return;
}
IndexTransform<> base_transform = std::move(*r);
PropagatedIndexTransformDownsampling propagated;
TENSORSTORE_RETURN_IF_ERROR(
internal_downsample::PropagateAndComposeIndexTransformDownsampling(
request.transform, base_transform,
state->self_->downsample_factors_, propagated),
state->SetError(_));
state->remaining_elements_ =
propagated.transform.domain().num_elements();
state->downsample_factors_ =
std::move(propagated.input_downsample_factors);
state->base_transform_domain_ = propagated.transform.domain();
auto* state_ptr = state.get();
request.transform = std::move(propagated.transform);
state_ptr->self_->base_driver_->Read(
std::move(request), ReadReceiverImpl{std::move(state)});
});
}
Future<ArrayStorageStatistics> DownsampleDriver::GetStorageStatistics(
GetStorageStatisticsRequest request) {
if (downsample_method_ == DownsampleMethod::kStride) {
TENSORSTORE_ASSIGN_OR_RETURN(request.transform,
GetStridedBaseTransform() | request.transform);
return base_driver_->GetStorageStatistics(std::move(request));
}
auto [promise, future] = PromiseFuturePair<ArrayStorageStatistics>::Make();
auto base_resolve_future = base_driver_->ResolveBounds(
{request.transaction, base_transform_, {fix_resizable_bounds}});
LinkValue(WithExecutor(
data_copy_executor(),
[self = IntrusivePtr<DownsampleDriver>(this),
request = std::move(request)](
Promise<ArrayStorageStatistics> promise,
ReadyFuture<IndexTransform<>> future) mutable {
IndexTransform<> base_transform = std::move(future.value());
PropagatedIndexTransformDownsampling propagated;
TENSORSTORE_RETURN_IF_ERROR(
internal_downsample::
PropagateAndComposeIndexTransformDownsampling(
request.transform, base_transform,
self->downsample_factors_, propagated),
static_cast<void>(promise.SetResult(_)));
request.transform = std::move(propagated.transform);
LinkResult(std::move(promise),
self->base_driver_->GetStorageStatistics(
std::move(request)));
}),
std::move(promise), std::move(base_resolve_future));
return std::move(future);
}
const internal::DriverRegistration<DownsampleDriverSpec> driver_registration;
}
}
namespace internal {
Result<Driver::Handle> MakeDownsampleDriver(
Driver::Handle base, tensorstore::span<const Index> downsample_factors,
DownsampleMethod downsample_method) {
if (downsample_factors.size() != base.transform.input_rank()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of downsample factors (", downsample_factors.size(),
") does not match TensorStore rank (", base.transform.input_rank(),
")"));
}
if (!(base.driver.read_write_mode() & ReadWriteMode::read)) {
return absl::InvalidArgumentError(
"Cannot downsample write-only TensorStore");
}
if (std::any_of(downsample_factors.begin(), downsample_factors.end(),
[](Index factor) { return factor < 1; })) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Downsample factors ", downsample_factors, " are not all positive"));
}
TENSORSTORE_RETURN_IF_ERROR(internal_downsample::ValidateDownsampleMethod(
base.driver->dtype(), downsample_method));
auto downsampled_domain =
internal_downsample::GetDownsampledDomainIdentityTransform(
base.transform.domain(), downsample_factors, downsample_method);
base.driver =
internal::MakeReadWritePtr<internal_downsample::DownsampleDriver>(
ReadWriteMode::read, std::move(base.driver),
std::move(base.transform), downsample_factors, downsample_method);
base.transform = std::move(downsampled_domain);
return base;
}
}
Result<Spec> Downsample(const Spec& base_spec,
tensorstore::span<const Index> downsample_factors,
DownsampleMethod downsample_method) {
using internal_spec::SpecAccess;
Spec downsampled_spec;
auto& impl = SpecAccess::impl(downsampled_spec);
auto driver_spec =
internal::DriverSpec::Make<internal_downsample::DownsampleDriverSpec>();
driver_spec->context_binding_state_ = base_spec.context_binding_state();
driver_spec->base = SpecAccess::impl(base_spec);
TENSORSTORE_RETURN_IF_ERROR(driver_spec->InitializeFromBase());
driver_spec->downsample_factors.assign(downsample_factors.begin(),
downsample_factors.end());
driver_spec->downsample_method = downsample_method;
TENSORSTORE_RETURN_IF_ERROR(driver_spec->ValidateDownsampleFactors());
TENSORSTORE_RETURN_IF_ERROR(driver_spec->ValidateDownsampleMethod());
impl.driver_spec = std::move(driver_spec);
if (base_spec.transform().valid()) {
impl.transform = internal_downsample::GetDownsampledDomainIdentityTransform(
base_spec.transform().domain(), downsample_factors, downsample_method);
}
return downsampled_spec;
}
} | #include "tensorstore/downsample.h"
#include <stdint.h>
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/array.h"
#include "tensorstore/chunk_layout.h"
#include "tensorstore/context.h"
#include "tensorstore/data_type.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/driver/array/array.h"
#include "tensorstore/driver/driver_testutil.h"
#include "tensorstore/driver/read.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/open.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/schema.h"
#include "tensorstore/spec.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/tensorstore.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_util.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
#include "tensorstore/util/unit.h"
namespace {
using ::tensorstore::BoxView;
using ::tensorstore::ChunkLayout;
using ::tensorstore::Context;
using ::tensorstore::DimensionIndex;
using ::tensorstore::DownsampleMethod;
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::ReadWriteMode;
using ::tensorstore::Spec;
using ::tensorstore::TensorStore;
using ::tensorstore::internal::CollectReadChunks;
using ::tensorstore::internal::MakeArrayBackedReadChunk;
using ::tensorstore::internal::MockDriver;
using ::tensorstore::internal::ReadAsIndividualChunks;
using ::tensorstore::internal::TestSpecSchema;
using ::tensorstore::internal::TestTensorStoreCreateCheckSchema;
using ::testing::Optional;
using ::testing::Pair;
TEST(DownsampleTest, Rank1Mean) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::FromArray(MakeArray<float>({1, 2, 5, 7})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(store, {2}, DownsampleMethod::kMean));
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(MakeArray<float>({1.5, 6})));
}
TEST(DownsampleTest, Rank1Median) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::FromArray(MakeArray<float>({1, 2, 5, 7})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(store, {2}, DownsampleMethod::kMin));
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(MakeArray<float>({1, 5})));
}
TEST(DownsampleTest, Rank1Empty) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::FromArray(tensorstore::AllocateArray<float>({2, 0, 3})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(store, {2, 3, 2}, DownsampleMethod::kMean));
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(tensorstore::AllocateArray<float>({1, 0, 2})));
}
TEST(DownsampleTest, Rank1MeanTranslated) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::FromArray(MakeOffsetArray<float>({1}, {1, 2, 5, 7})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(store, {2}, DownsampleMethod::kMean));
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(MakeArray<float>({1, 3.5, 7})));
}
TEST(DownsampleTest, Rank1Stride) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::FromArray(MakeArray<float>({1, 2, 5, 7})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(store, {2}, DownsampleMethod::kStride));
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(MakeArray<float>({1, 5})));
}
TEST(DownsampleTest, Rank1MeanChunked) {
::nlohmann::json base_spec{{"driver", "n5"},
{"kvstore", {{"driver", "memory"}}},
{"metadata",
{{"dataType", "uint8"},
{"dimensions", {11}},
{"blockSize", {3}},
{"compression", {{"type", "raw"}}}}}};
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store,
tensorstore::Open(base_spec, context, tensorstore::OpenMode::create)
.result());
TENSORSTORE_ASSERT_OK(tensorstore::Write(
MakeArray<uint8_t>({0, 2, 3, 9, 1, 5, 7, 3, 4, 0, 5}), base_store));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store, tensorstore::Open({{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", {2}},
{"downsample_method", "mean"}},
context)
.result());
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(MakeArray<uint8_t>({1, 6, 3, 5, 2, 5})));
}
TEST(DownsampleTest, Rank1MeanChunkedTranslated) {
::nlohmann::json base_spec{{"driver", "n5"},
{"kvstore", {{"driver", "memory"}}},
{"metadata",
{{"dataType", "uint8"},
{"dimensions", {11}},
{"blockSize", {3}},
{"compression", {{"type", "raw"}}}}},
{"transform",
{
{"input_inclusive_min", {1}},
{"input_exclusive_max", {12}},
{"output",
{
{{"input_dimension", 0}, {"offset", -1}},
}},
}}};
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store,
tensorstore::Open(base_spec, context, tensorstore::OpenMode::create)
.result());
TENSORSTORE_ASSERT_OK(tensorstore::Write(
MakeArray<uint8_t>({0, 2, 3, 9, 1, 5, 7, 3, 4, 0, 5}), base_store));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store, tensorstore::Open({{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", {2}},
{"downsample_method", "mean"}},
context)
.result());
EXPECT_THAT(ReadAsIndividualChunks(downsampled_store).result(),
Optional(::testing::UnorderedElementsAre(
Pair(MakeOffsetArray<uint8_t>({0}, {0, 2}),
IdentityTransform(BoxView({0}, {2}))),
Pair(MakeOffsetArray<uint8_t>({5}, {2}),
IdentityTransform(BoxView({5}, {1}))),
Pair(MakeOffsetArray<uint8_t>({2}, {5, 6, 4}),
IdentityTransform(BoxView({2}, {3}))))));
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(MakeArray<uint8_t>({0, 2, 5, 6, 4, 2})));
}
TEST(DownsampleTest, Rank1MeanChunkedIndexArray) {
::nlohmann::json base_spec{{"driver", "n5"},
{"kvstore", {{"driver", "memory"}}},
{"metadata",
{{"dataType", "uint8"},
{"dimensions", {11}},
{"blockSize", {3}},
{"compression", {{"type", "raw"}}}}}};
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store,
tensorstore::Open(base_spec, context, tensorstore::OpenMode::create)
.result());
TENSORSTORE_ASSERT_OK(tensorstore::Write(
MakeArray<uint8_t>({0, 2, 3, 9, 1, 5, 7, 3, 4, 0, 5}), base_store));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store, tensorstore::Open({{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", {2}},
{"downsample_method", "mean"}},
context)
.result());
EXPECT_THAT(tensorstore::Read(downsampled_store |
tensorstore::Dims(0).IndexArraySlice(
MakeArray<Index>({0, 3, 2})))
.result(),
Optional(MakeArray<uint8_t>({1, 5, 3})));
}
TEST(DownsampleTest, JsonSpecArray) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::Open({{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", {2}},
{"downsample_method", "mean"}})
.result());
EXPECT_THAT(tensorstore::Read(store).result(),
Optional(MakeArray<float>({1.5, 3.5})));
}
TEST(DownsampleTest, JsonSpecArrayRank0) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", 42},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::Open({{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", ::nlohmann::json::array_t{}},
{"downsample_method", "mean"}})
.result());
EXPECT_THAT(tensorstore::Read(store).result(),
Optional(tensorstore::MakeScalarArray<float>(42)));
}
TEST(DownsampleTest, JsonSpecErrorMissingBase) {
EXPECT_THAT(
tensorstore::Open({
{"driver", "downsample"},
{"downsample_factors", {2}},
{"downsample_method", "mean"},
})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, ".*\"base\".*"));
}
TEST(DownsampleTest, JsonSpecErrorMissingDownsampleFactors) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
};
EXPECT_THAT(tensorstore::Open({
{"driver", "downsample"},
{"base", base_spec},
{"downsample_method", "mean"},
})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"downsample_factors\".*"));
}
TEST(DownsampleTest, JsonSpecErrorDownsampleFactorsInvalidRank) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
};
EXPECT_THAT(tensorstore::Open({
{"driver", "downsample"},
{"base", base_spec},
{"downsample_method", "mean"},
{"downsample_factors", {2, 3}},
})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"downsample_factors\": .*rank.*"));
}
TEST(DownsampleTest, JsonSpecErrorDownsampleFactorsZero) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
};
EXPECT_THAT(
tensorstore::Open({
{"driver", "downsample"},
{"base", base_spec},
{"downsample_method", "mean"},
{"downsample_factors", {0}},
})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"downsample_factors\":.*Expected .*, but received: 0"));
}
TEST(DownsampleTest, JsonSpecErrorDownsampleFactorsNegative) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
};
EXPECT_THAT(tensorstore::Open({
{"driver", "downsample"},
{"base", base_spec},
{"downsample_method", "mean"},
{"downsample_factors", {-2}},
})
.result(),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
".*\"downsample_factors\":.*Expected .*, but received: -2"));
}
TEST(DownsampleTest, JsonSpecErrorMissingDownsampleMethod) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
};
EXPECT_THAT(tensorstore::Open({
{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", {2}},
})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"downsample_method\".*"));
}
TEST(DownsampleTest, JsonSpecErrorInvalidDownsampleMethod) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
};
EXPECT_THAT(tensorstore::Open({
{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", {2}},
{"downsample_method", 42},
})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"downsample_method\".*42.*"));
}
TEST(DownsampleTest, ErrorOpenWriteOnly) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
};
for (auto mode : {ReadWriteMode::write, ReadWriteMode::read_write}) {
SCOPED_TRACE(tensorstore::StrCat("mode=", mode));
EXPECT_THAT(tensorstore::Open(
{
{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", {2}},
{"downsample_method", "mean"},
},
mode)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: only reading is supported"));
}
}
TEST(DownsampleTest, AdapterErrorNegativeDownsampleFactor) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::FromArray(MakeArray<float>({1, 2, 5, 7})));
EXPECT_THAT(
tensorstore::Downsample(store, {-2}, DownsampleMethod::kMean),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Downsample factors \\{-2\\} are not all positive"));
}
TEST(DownsampleTest, AdapterErrorZeroDownsampleFactor) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::FromArray(MakeArray<float>({1, 2, 5, 7})));
EXPECT_THAT(tensorstore::Downsample(store, {0}, DownsampleMethod::kMean),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Downsample factors \\{0\\} are not all positive"));
}
TEST(DownsampleTest, AdapterErrorDownsampleFactorsRankMismatch) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
TensorStore<float> store,
tensorstore::FromArray(MakeArray<float>({1, 2, 5, 7})));
EXPECT_THAT(
tensorstore::Downsample(store, {2, 2}, DownsampleMethod::kMean),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Number of downsample factors \\(2\\) does not match "
"TensorStore rank \\(1\\)"));
}
TEST(DownsampleTest, AdapterErrorDataType) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::FromArray(MakeArray<std::string>({"a", "b", "c"})));
TENSORSTORE_EXPECT_OK(
tensorstore::Downsample(store, {2}, DownsampleMethod::kStride));
EXPECT_THAT(tensorstore::Downsample(store, {2}, DownsampleMethod::kMean),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Downsample method \"mean\" does not support "
"data type \"string\""));
}
TEST(DownsampleTest, AdapterErrorWriteOnly) {
tensorstore::TensorStore<float, 1> store;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
store, tensorstore::FromArray(MakeArray<float>({1, 2, 3})));
store = tensorstore::ModeCast<ReadWriteMode::write, tensorstore::unchecked>(
std::move(store));
EXPECT_THAT(tensorstore::Downsample(store, {2}, DownsampleMethod::kMean),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot downsample write-only TensorStore"));
}
TEST(DownsampleTest, ReadError) {
auto mock_driver = MockDriver::Make(tensorstore::ReadWriteMode::dynamic,
tensorstore::dtype_v<float>, 1);
auto mock_store = mock_driver->Wrap(tensorstore::IdentityTransform<1>({10}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(mock_store, {2}, DownsampleMethod::kMean));
auto read_future = tensorstore::Read(downsampled_store);
{
auto read_req = mock_driver->read_requests.pop();
EXPECT_EQ(tensorstore::IdentityTransform<1>({10}), read_req.transform);
tensorstore::execution::set_error(
tensorstore::FlowSingleReceiver{std::move(read_req.receiver)},
absl::UnknownError("read error"));
}
EXPECT_THAT(read_future.result(),
MatchesStatus(absl::StatusCode::kUnknown, "read error"));
}
TEST(DownsampleTest, CancelRead) {
auto mock_driver = MockDriver::Make(tensorstore::ReadWriteMode::dynamic,
tensorstore::dtype_v<float>, 1);
auto mock_store = mock_driver->Wrap(tensorstore::IdentityTransform<1>({10}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(mock_store, {2}, DownsampleMethod::kMean));
auto read_future = tensorstore::Read(downsampled_store);
auto canceled = std::make_shared<bool>(false);
{
auto read_req = mock_driver->read_requests.pop();
tensorstore::execution::set_starting(read_req.receiver,
[canceled] { *canceled = true; });
read_future = {};
EXPECT_EQ(true, *canceled);
tensorstore::execution::set_done(read_req.receiver);
tensorstore::execution::set_stopping(read_req.receiver);
}
}
TEST(DownsampleTest, IndependentChunkCompletesBufferedChunk) {
auto mock_driver = MockDriver::Make(tensorstore::ReadWriteMode::dynamic,
tensorstore::dtype_v<float>, 1);
auto mock_store = mock_driver->Wrap(tensorstore::IdentityTransform<1>({4}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(mock_store, {2}, DownsampleMethod::kMean));
auto read_future = tensorstore::Read(downsampled_store);
{
auto read_req = mock_driver->read_requests.pop();
tensorstore::execution::set_starting(read_req.receiver, [] {});
tensorstore::execution::set_value(
read_req.receiver, MakeArrayBackedReadChunk(MakeArray<float>({0, 1})),
(tensorstore::IdentityTransform(1) |
tensorstore::Dims(0).IndexArraySlice(MakeArray<Index>({0, 1})))
.value());
tensorstore::execution::set_value(
read_req.receiver,
MakeArrayBackedReadChunk(MakeOffsetArray<float>({2}, {2, 3})),
tensorstore::IdentityTransform(BoxView<1>({2}, {2})));
tensorstore::execution::set_done(read_req.receiver);
tensorstore::execution::set_stopping(read_req.receiver);
}
ASSERT_TRUE(read_future.ready());
EXPECT_THAT(read_future.result(), Optional(MakeArray<float>({0.5, 2.5})));
}
TEST(DownsampleTest, EmptyChunk) {
auto mock_driver = MockDriver::Make(tensorstore::ReadWriteMode::dynamic,
tensorstore::dtype_v<float>, 1);
auto mock_store = mock_driver->Wrap(tensorstore::IdentityTransform<1>({10}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(mock_store, {2}, DownsampleMethod::kMean));
auto read_future = tensorstore::Read(downsampled_store);
{
auto read_req = mock_driver->read_requests.pop();
EXPECT_EQ(tensorstore::IdentityTransform<1>({10}), read_req.transform);
tensorstore::execution::set_error(
tensorstore::FlowSingleReceiver{std::move(read_req.receiver)},
absl::UnknownError("read error"));
}
EXPECT_THAT(read_future.result(),
MatchesStatus(absl::StatusCode::kUnknown, "read error"));
}
TEST(DownsampleTest, ReadChunkWithIndexTransform) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store,
tensorstore::FromArray(MakeArray<float>({
{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15},
})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(store, {2, 3}, DownsampleMethod::kMean));
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(MakeArray<float>({
{4.5, 7},
{12, 14.5},
})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto chunks, CollectReadChunks(downsampled_store).result());
ASSERT_THAT(chunks,
::testing::ElementsAre(Pair(
::testing::_, tensorstore::IdentityTransform<2>({2, 2}))));
auto& entry = chunks[0];
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, entry.second | tensorstore::Dims(0).IndexArraySlice(
MakeArray<Index>({0, 1, 1, 0})));
auto target_array = tensorstore::AllocateArray<float>({4, 2});
TENSORSTORE_ASSERT_OK(tensorstore::internal::CopyReadChunk(
entry.first.impl, transform,
tensorstore::TransformedArray(target_array)));
EXPECT_EQ(MakeArray<float>({
{4.5, 7},
{12, 14.5},
{12, 14.5},
{4.5, 7},
}),
target_array);
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, entry.second | tensorstore::Dims(0, 1).IndexArraySlice(
MakeArray<Index>({0, 1, 1}),
MakeArray<Index>({0, 0, 1})));
auto target_array = tensorstore::AllocateArray<float>({3});
TENSORSTORE_ASSERT_OK(tensorstore::internal::CopyReadChunk(
entry.first.impl, transform,
tensorstore::TransformedArray(target_array)));
EXPECT_EQ(MakeArray<float>({4.5, 12, 14.666666666666666}), target_array);
}
}
TEST(DownsampleTest, ConvertError) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Open({
{"driver", "downsample"},
{"base",
{
{"driver", "cast"},
{"base",
{
{"driver", "array"},
{"dtype", "json"},
{"array", {1, "abc", 2}},
}},
{"dtype", "uint8"},
}},
{"downsample_method", "mean"},
{"downsample_factors", {2}},
})
.result());
auto dest = tensorstore::MakeArray<uint8_t>({0, 0});
EXPECT_THAT(
tensorstore::Read(downsampled_store, dest).result(),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Expected integer in the range \\[0, 255\\], but received: \"abc\""));
EXPECT_EQ(dest, MakeArray<uint8_t>({0, 0}));
}
TENSORSTORE_GLOBAL_INITIALIZER {
tensorstore::internal::TestTensorStoreDriverSpecRoundtripOptions options;
options.test_name = "downsample";
options.create_spec = {
{"driver", "downsample"},
{"base",
{
{"driver", "array"},
{"dtype", "float32"},
{"array", {{1, 2, 3}, {4, 5, 6}}},
}},
{"downsample_method", "mean"},
{"downsample_factors", {1, 2}},
};
options.full_spec = {
{"driver", "downsample"},
{"base",
{
{"driver", "array"},
{"array", {{1, 2, 3}, {4, 5, 6}}},
{"transform",
{{"input_inclusive_min", {0, 0}}, {"input_exclusive_max", {2, 3}}}},
}},
{"dtype", "float32"},
{"downsample_method", "mean"},
{"downsample_factors", {1, 2}},
{"transform",
{{"input_inclusive_min", {0, 0}}, {"input_exclusive_max", {2, 2}}}},
};
options.full_base_spec = {
{"driver", "array"},
{"array", {{1, 2, 3}, {4, 5, 6}}},
{"dtype", "float32"},
{"transform",
{{"input_inclusive_min", {0, 0}}, {"input_exclusive_max", {2, 3}}}},
};
options.minimal_spec = options.full_spec;
options.check_not_found_before_create = false;
options.check_not_found_before_commit = false;
options.supported_transaction_modes = {};
tensorstore::internal::RegisterTensorStoreDriverSpecRoundtripTest(
std::move(options));
}
TEST(DownsampleTest, Spec) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec, Spec::FromJson({
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_spec,
tensorstore::Downsample(spec, {2}, DownsampleMethod::kMean));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store, tensorstore::Open(downsampled_spec).result());
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(MakeArray<float>({1.5, 3.5})));
EXPECT_THAT(
downsampled_spec.ToJson(),
Optional(MatchesJson(::nlohmann::json({
{"driver", "downsample"},
{"dtype", "float32"},
{"base",
{
{"driver", "array"},
{"array", {1, 2, 3, 4}},
{"transform",
{{"input_inclusive_min", {0}}, {"input_exclusive_max", {4}}}},
}},
{"downsample_factors", {2}},
{"downsample_method", "mean"},
{"transform",
{{"input_inclusive_min", {0}}, {"input_exclusive_max", {2}}}},
}))));
}
TEST(DownsampleTest, ChunkLayout) {
::nlohmann::json base_spec{
{"driver", "n5"},
{"kvstore", {{"driver", "memory"}}},
{"metadata",
{{"dataType", "uint8"},
{"dimensions", {100, 200}},
{"blockSize", {10, 21}},
{"compression", {{"type", "raw"}}}}},
};
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store,
tensorstore::Open(base_spec, context, tensorstore::OpenMode::create)
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::Open({{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", {2, 3}},
{"downsample_method", "mean"}},
context)
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_layout,
ChunkLayout::FromJson({
{"write_chunk", {{"shape", {5, 7}}}},
{"read_chunk", {{"shape", {5, 7}}}},
{"grid_origin", {0, 0}},
{"inner_order", {1, 0}},
}));
EXPECT_THAT(store.chunk_layout(), ::testing::Optional(expected_layout));
}
TEST(SpecSchemaTest, Basic) {
TestSpecSchema(
{
{"driver", "downsample"},
{"downsample_method", "mean"},
{"downsample_factors", {1, 2}},
{"base",
{
{"driver", "array"},
{"array", {{1, 2, 3, 4}, {5, 6, 7, 8}}},
{"dtype", "float32"},
}},
{"schema", {{"dimension_units", {"4nm", "5nm"}}}},
},
{
{"rank", 2},
{"dtype", "float32"},
{"domain", {{"shape", {2, 2}}}},
{"chunk_layout", {{"grid_origin", {0, 0}}, {"inner_order", {0, 1}}}},
{"dimension_units", {"4nm", "5nm"}},
});
}
TEST(TensorStoreCreateCheckSchemaTest, Basic) {
TestTensorStoreCreateCheckSchema(
{
{"driver", "downsample"},
{"downsample_method", "mean"},
{"downsample_factors", {1, 2}},
{"base",
{
{"driver", "array"},
{"array", {{1, 2, 3, 4}, {5, 6, 7, 8}}},
{"dtype", "float32"},
}},
{"schema", {{"dimension_units", {"4nm", "5nm"}}}},
},
{
{"rank", 2},
{"dtype", "float32"},
{"domain", {{"shape", {2, 2}}}},
{"chunk_layout", {{"grid_origin", {0, 0}}, {"inner_order", {0, 1}}}},
{"dimension_units", {"4nm", "5nm"}},
});
}
TEST(DownsampleTest, DomainSpecified) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_spec,
tensorstore::Spec::FromJson({
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_spec,
tensorstore::Downsample(base_spec, {2, 1}, DownsampleMethod::kMean));
TENSORSTORE_ASSERT_OK(
downsampled_spec.Set(tensorstore::Schema::Shape({10, 10})));
EXPECT_THAT(downsampled_spec.ToJson(),
::testing::Optional(MatchesJson({
{"driver", "downsample"},
{"base",
{
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
{"schema",
{
{"domain",
{{"inclusive_min", {{"-inf"}, 0}},
{"exclusive_max", {{"+inf"}, 10}}}},
}},
{"transform",
{
{"input_exclusive_max", {{"+inf"}, {10}}},
{"input_inclusive_min", {0, 0}},
}},
}},
{"downsample_factors", {2, 1}},
{"downsample_method", "mean"},
{"schema",
{
{"domain",
{
{"inclusive_min", {0, 0}},
{"exclusive_max", {10, 10}},
}},
}},
{"transform",
{{"input_exclusive_max", {10, 10}},
{"input_inclusive_min", {0, 0}}}},
})));
}
TEST(DownsampleTest, FillValueNotSpecified) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store,
tensorstore::Open(
{
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
{"metadata", {{"dtype", {{"x", "<u4", {4, 3}}}}}},
},
tensorstore::OpenMode::create,
tensorstore::Schema::Shape({100, 4, 3}))
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::Downsample(base_store, {1, 2, 1},
tensorstore::DownsampleMethod::kMean));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto fill_value, store.fill_value());
EXPECT_FALSE(fill_value.valid());
}
TEST(DownsampleTest, FillValueSpecified) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store,
tensorstore::Open(
{
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
{"metadata", {{"dtype", {{"x", "<u4", {4, 3}}}}}},
},
tensorstore::OpenMode::create,
tensorstore::Schema::Shape({100, 4, 3}),
tensorstore::Schema::FillValue(tensorstore::MakeArray<uint32_t>(
{{1, 2, 3}, {40, 50, 60}, {7, 8, 9}, {100, 110, 120}})))
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::Downsample(base_store, {1, 2, 1},
tensorstore::DownsampleMethod::kMean));
EXPECT_THAT(store.fill_value(),
::testing::Optional(tensorstore::MakeArray<uint32_t>(
{{20, 26, 32}, {54, 59, 64}})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transformed, store | tensorstore::Dims(2).SizedInterval(1, 2));
EXPECT_THAT(transformed.fill_value(),
::testing::Optional(
tensorstore::MakeArray<uint32_t>({{26, 32}, {59, 64}})));
}
TEST(DownsampleTest, DimensionUnits) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store, tensorstore::FromArray(
tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}}),
tensorstore::DimensionUnitsVector{"4nm", "5nm"}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::Downsample(base_store, {1, 2},
tensorstore::DownsampleMethod::kMean));
EXPECT_THAT(store.dimension_units(),
::testing::Optional(::testing::ElementsAre(
tensorstore::Unit("4nm"), tensorstore::Unit("10nm"))));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/downsample.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/downsample_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
02c13d34-4aaf-42ef-bae3-8c05db8365ff | cpp | tensorflow/tensorflow | lrn_op | tensorflow/core/kernels/lrn_op.cc | tensorflow/core/kernels/lrn_op_test.cc | #define EIGEN_USE_THREADS
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/errors.h"
#if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL)
#include "xla/tsl/framework/contraction/eigen_contraction_kernel.h"
#endif
#if !defined(IS_MOBILE_PLATFORM)
#include "tensorflow/core/util/work_sharder.h"
#endif
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#endif
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "tensorflow/core/kernels/conv_2d.h"
#include "tensorflow/core/kernels/gpu_utils.h"
#if TENSORFLOW_USE_ROCM
#include "tensorflow/core/kernels/conv_ops_gpu.h"
#endif
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/util/stream_executor_util.h"
#endif
namespace tensorflow {
namespace {
const int kSingleThreadedLRNDepthCutoff = 384;
template <typename T>
void GetBandMatrix(int depth, int depth_radius,
Eigen::Tensor<T, 2, Eigen::RowMajor>* result) {
result->setZero();
for (int row = 0; row < depth; ++row) {
const int begin = std::max<int>(0, row - depth_radius);
const int end = std::min<int>(depth, row + depth_radius + 1);
Eigen::DSizes<Eigen::DenseIndex, 2> start(row, begin);
Eigen::DSizes<Eigen::DenseIndex, 2> sizes(1, end - begin);
result->slice(start, sizes).setConstant(T(1));
}
}
}
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
struct LaunchLRN;
template <typename T>
struct LaunchLRN<CPUDevice, T> {
LaunchLRN(int depth_radius, T bias, T alpha, T beta)
: depth_radius_(depth_radius), bias_(bias), alpha_(alpha), beta_(beta) {}
void launch(OpKernelContext* context, OpKernel* kernel, const Tensor& in,
Tensor* output) {
const int batch = static_cast<int>(in.dim_size(0));
const int rows = static_cast<int>(in.dim_size(1));
const int cols = static_cast<int>(in.dim_size(2));
const int depth = static_cast<int>(in.dim_size(3));
#if defined(IS_MOBILE_PLATFORM)
SingleThreadedLRN(in, batch, rows, cols, depth, output);
#else
if (depth > kSingleThreadedLRNDepthCutoff &&
(beta_ == T(0.5) || beta_ == T(1))) {
SingleThreadedLRN(in, batch, rows, cols, depth, output);
return;
}
const int nodes = cols * rows;
auto in_shaped = in.shaped<T, 2>({nodes * batch, depth});
Eigen::Tensor<T, 2, Eigen::RowMajor> multiplier(depth, depth);
GetBandMatrix<T>(depth, depth_radius_, &multiplier);
auto out_shaped = output->shaped<T, 2>({nodes * batch, depth});
Eigen::array<DimPair, 1> dims = {{DimPair(1, 0)}};
auto tmp = in_shaped.square().contract(multiplier, dims) * alpha_ + bias_;
if (beta_ == T(1)) {
out_shaped.device(context->eigen_cpu_device()) =
in_shaped * tmp.inverse();
} else if (beta_ == T(0.5)) {
out_shaped.device(context->eigen_cpu_device()) = in_shaped * tmp.rsqrt();
} else {
out_shaped.device(context->eigen_cpu_device()) =
in_shaped * (tmp.log() * -beta_).exp();
}
#endif
}
private:
typedef typename Eigen::Tensor<T, 1, Eigen::RowMajor>::DimensionPair DimPair;
void SingleThreadedLRN(const Tensor& in, const int batch, const int rows,
const int cols, const int depth, Tensor* out) {
Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> data_in(
in.flat<T>().data(), depth, batch * rows * cols);
Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> data_out(
out->flat<T>().data(), depth, batch * rows * cols);
const int double_depth_radius = depth_radius_ * 2;
Eigen::Matrix<T, Eigen::Dynamic, 1> padded_square(data_in.rows() +
double_depth_radius);
padded_square.setZero();
for (int r = 0; r < data_in.cols(); ++r) {
padded_square.block(depth_radius_, 0, data_out.rows(), 1) =
data_in.col(r).cwiseProduct(data_in.col(r)) * alpha_;
T accumulated_scale(0);
for (int i = 0; i < double_depth_radius; ++i) {
accumulated_scale += padded_square(i);
}
for (int i = 0; i < data_in.rows(); ++i) {
accumulated_scale += padded_square(i + double_depth_radius);
data_out(i, r) = bias_ + accumulated_scale;
accumulated_scale -= padded_square(i);
}
}
if (beta_ == T(1)) {
data_out.array() = data_in.array() * data_out.array().inverse();
} else if (beta_ == T(0.5)) {
data_out.array() = data_in.array() * data_out.array().rsqrt();
} else {
data_out.array() =
data_in.array() * (data_out.array().log() * -beta_).exp();
}
}
int depth_radius_;
T bias_;
T alpha_;
T beta_;
};
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
template <typename T>
struct LaunchLRN<GPUDevice, T> {
LaunchLRN(int depth_radius, T bias, T alpha, T beta)
: depth_radius_(depth_radius), bias_(bias), alpha_(alpha), beta_(beta) {}
void launch(OpKernelContext* context, OpKernel* kernel, const Tensor& in,
Tensor* output) {
#if GOOGLE_CUDA
OP_REQUIRES(
context, beta_ >= 0.01,
errors::InvalidArgument("cuDNN requires beta >= 0.01, got: ", beta_));
OP_REQUIRES(
context, depth_radius_ > 0 && depth_radius_ <= 7,
errors::InvalidArgument("cuDNN requires depth_radius in [1, 7], got: ",
depth_radius_));
OP_REQUIRES(
context, bias_ >= 1e-5,
errors::InvalidArgument("cuDNN requires bias >= 1e-5, got: ", bias_));
const int batch = static_cast<int>(in.dim_size(0));
const int rows = static_cast<int>(in.dim_size(1));
const int cols = static_cast<int>(in.dim_size(2));
const int depth = static_cast<int>(in.dim_size(3));
se::dnn::BatchDescriptor dimensions_desc;
dimensions_desc.set_count(batch)
.set_height(rows)
.set_width(cols)
.set_feature_map_count(depth)
.set_layout(se::dnn::DataLayout::kBatchYXDepth);
se::dnn::NormalizeDescriptor normalize_desc;
normalize_desc.set_bias(bias_)
.set_range(depth_radius_)
.set_alpha(alpha_)
.set_beta(beta_);
auto input_data = StreamExecutorUtil::AsDeviceMemory<T>(in);
auto output_data = StreamExecutorUtil::AsDeviceMemory<T>(*output);
auto* stream = context->op_device_context()->stream();
OP_REQUIRES(context, stream, errors::Internal("No GPU stream available."));
auto dnn = stream->parent()->AsDnn();
OP_REQUIRES(context, dnn != nullptr,
absl::InternalError("No DNN support for stream."));
bool status = dnn->DoNormalizeWithDimensions(
stream, normalize_desc, dimensions_desc, input_data, &output_data);
OP_REQUIRES(context, status,
errors::Internal("NormalizeWithDimensions launch failed"));
#elif TENSORFLOW_USE_ROCM
const int batch = static_cast<int>(in.dim_size(0));
const int rows = static_cast<int>(in.dim_size(1));
const int cols = static_cast<int>(in.dim_size(2));
const int depth = static_cast<int>(in.dim_size(3));
Tensor transformed_input;
TensorShape transformed_input_shape;
OP_REQUIRES_OK(
context, ShapeFromFormatWithStatus(FORMAT_NCHW, in.shape(), FORMAT_NHWC,
&transformed_input_shape));
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value,
transformed_input_shape,
&transformed_input));
functor::NHWCToNCHW<GPUDevice, T, 4>()(context->eigen_device<GPUDevice>(),
in.tensor<T, 4>(),
transformed_input.tensor<T, 4>());
Tensor transformed_output;
TensorShape transformed_output_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
FORMAT_NCHW, output->shape(), FORMAT_NHWC,
&transformed_output_shape));
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value,
transformed_output_shape,
&transformed_output));
stream_executor::dnn::BatchDescriptor dimensions_desc;
dimensions_desc.set_count(batch)
.set_height(rows)
.set_width(cols)
.set_feature_map_count(depth)
.set_layout(stream_executor::dnn::DataLayout::kBatchDepthYX);
stream_executor::dnn::NormalizeDescriptor normalize_desc;
normalize_desc.set_bias(bias_)
.set_range(depth_radius_)
.set_alpha(alpha_)
.set_beta(beta_);
auto input_data =
AsDeviceMemory(transformed_input.template flat<T>().data(),
transformed_input.template flat<T>().size());
auto output_data =
AsDeviceMemory(transformed_output.template flat<T>().data(),
transformed_output.template flat<T>().size());
auto* stream = context->op_device_context()->stream();
OP_REQUIRES(context, stream, errors::Internal("No GPU stream available."));
auto dnn = stream->parent()->AsDnn();
OP_REQUIRES(context, dnn != nullptr,
absl::InternalError("No DNN support for stream."));
bool status = dnn->DoNormalizeWithDimensions(
stream, normalize_desc, dimensions_desc, input_data, &output_data);
OP_REQUIRES(context, status,
errors::Internal("NormalizeWithDimensions launch failed"));
auto toConstTensor = [](const Tensor& x) -> const Tensor { return x; };
functor::NCHWToNHWC<GPUDevice, T, 4>()(
context->eigen_device<GPUDevice>(),
toConstTensor(transformed_output).template tensor<T, 4>(),
output->tensor<T, 4>());
#endif
}
int depth_radius_;
T bias_;
T alpha_;
T beta_;
};
#endif
template <typename Device, typename T>
class LRNOp : public OpKernel {
public:
explicit LRNOp(OpKernelConstruction* context) : OpKernel(context) {
int64_t depth_radius64;
OP_REQUIRES_OK(context, context->GetAttr("depth_radius", &depth_radius64));
OP_REQUIRES(
context,
FastBoundsCheck(depth_radius64, std::numeric_limits<int>::max()),
errors::InvalidArgument("depth_radius = ", depth_radius64,
" larger than int max"));
depth_radius_ = static_cast<int>(depth_radius64);
float tmp;
OP_REQUIRES_OK(context, context->GetAttr("bias", &tmp));
bias_ = T(tmp);
OP_REQUIRES_OK(context, context->GetAttr("alpha", &tmp));
alpha_ = T(tmp);
OP_REQUIRES_OK(context, context->GetAttr("beta", &tmp));
beta_ = T(tmp);
}
void Compute(OpKernelContext* context) override {
const Tensor& in = context->input(0);
OP_REQUIRES(context, in.dims() == 4,
errors::InvalidArgument("in must be 4-dimensional"));
OP_REQUIRES(
context,
FastBoundsCheck(in.NumElements(), std::numeric_limits<int>::max()),
errors::InvalidArgument("argument to LRN too large"));
const int batch = static_cast<int>(in.dim_size(0));
const int rows = static_cast<int>(in.dim_size(1));
const int cols = static_cast<int>(in.dim_size(2));
const int depth = static_cast<int>(in.dim_size(3));
OP_REQUIRES(context,
(depth + depth_radius_) <= std::numeric_limits<int>::max(),
errors::InvalidArgument("depth ", depth, " + depth_radius ",
depth_radius_, " exceeds int max."));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(
0, TensorShape({batch, rows, cols, depth}), &output));
LaunchLRN<Device, T> launcher(depth_radius_, bias_, alpha_, beta_);
launcher.launch(context, this, in, output);
}
private:
int depth_radius_;
T bias_;
T alpha_;
T beta_;
};
#define REGISTER_CPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("LRN").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
LRNOp<CPUDevice, T>);
TF_CALL_float(REGISTER_CPU);
TF_CALL_half(REGISTER_CPU);
#undef REGISTER_CPU
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_GPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("LRN").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
LRNOp<GPUDevice, T>);
TF_CALL_float(REGISTER_GPU);
#undef REGISTER_GPU
#endif
#if !defined(IS_MOBILE_PLATFORM)
template <typename Device, typename T>
struct LaunchLRNGrad;
template <typename T>
struct LaunchLRNGrad<CPUDevice, T> {
LaunchLRNGrad(int depth_radius, T bias, T alpha, T beta)
: depth_radius_(depth_radius),
bias_(bias),
alpha_(alpha),
beta_(beta),
alpha_beta_2_(T(-2) * alpha * beta) {}
void launch(OpKernelContext* context, OpKernel* kernel,
const Tensor& in_grads, const Tensor& in_image,
const Tensor& out_image, Tensor* output) {
const int64_t batch = in_grads.dim_size(0);
const int64_t rows = in_grads.dim_size(1);
const int64_t cols = in_grads.dim_size(2);
const int64_t depth = in_grads.dim_size(3);
const auto nodes = cols * rows;
auto grads_shaped = in_grads.shaped<T, 2>({nodes * batch, depth});
auto in_shaped = in_image.shaped<T, 2>({nodes * batch, depth});
auto activations = out_image.shaped<T, 2>({nodes * batch, depth});
auto out_shaped = output->shaped<T, 2>({nodes * batch, depth});
out_shaped.setZero();
auto shard = [this, activations, in_shaped, grads_shaped, out_shaped,
depth](int64_t begin, int64_t end) {
for (int64_t i = begin; i < end; ++i) {
for (int64_t j = 0; j < depth; ++j) {
T gs = grads_shaped(i, j);
if (gs == T(0)) continue;
int64_t depth_begin = std::max<int64_t>(0, j - depth_radius_);
int64_t depth_end = std::min<int64_t>(depth, j + depth_radius_ + 1);
T norm(0);
for (int64_t k = depth_begin; k < depth_end; ++k) {
norm += in_shaped(i, k) * in_shaped(i, k);
}
norm = alpha_ * norm + bias_;
DCHECK_GT(norm, T(1e-6));
T pre_computed_pow = Eigen::numext::pow(norm, -beta_);
T activations_ab2 = alpha_beta_2_ * activations(i, j);
for (int64_t k = depth_begin; k < depth_end; ++k) {
T dyi = in_shaped(i, k) * activations_ab2 / norm;
if (k == j) {
dyi += pre_computed_pow;
}
dyi *= gs;
const_cast<typename TTypes<T, 2>::Tensor&>(out_shaped)(i, k) += dyi;
}
}
}
};
auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads());
Shard(worker_threads.num_threads, worker_threads.workers, nodes * batch,
depth * depth, shard);
}
int depth_radius_;
T bias_;
T alpha_;
T beta_;
T alpha_beta_2_;
};
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
template <typename T>
struct LaunchLRNGrad<GPUDevice, T> {
LaunchLRNGrad(int depth_radius, T bias, T alpha, T beta)
: depth_radius_(depth_radius), bias_(bias), alpha_(alpha), beta_(beta) {}
void launch(OpKernelContext* context, OpKernel* kernel,
const Tensor& in_grads, const Tensor& in_image,
const Tensor& out_image, Tensor* output) {
#if GOOGLE_CUDA
OP_REQUIRES(
context, beta_ >= 0.01,
errors::InvalidArgument("cuDNN requires beta >= 0.01, got: ", beta_));
OP_REQUIRES(
context, depth_radius_ > 0 && depth_radius_ <= 7,
errors::InvalidArgument("cuDNN requires depth_radius in [1, 7], got: ",
depth_radius_));
OP_REQUIRES(
context, bias_ >= 1e-5,
errors::InvalidArgument("cuDNN requires bias >= 1e-5, got: ", bias_));
const int64_t batch = in_grads.dim_size(0);
const int64_t rows = in_grads.dim_size(1);
const int64_t cols = in_grads.dim_size(2);
const int64_t depth = in_grads.dim_size(3);
se::dnn::BatchDescriptor dimensions_desc;
dimensions_desc.set_count(batch)
.set_height(rows)
.set_width(cols)
.set_feature_map_count(depth)
.set_layout(se::dnn::DataLayout::kBatchYXDepth);
se::dnn::NormalizeDescriptor normalize_desc;
normalize_desc.set_bias(bias_)
.set_range(depth_radius_)
.set_alpha(alpha_)
.set_beta(beta_);
auto input_grads_data = StreamExecutorUtil::AsDeviceMemory<T>(in_grads);
auto input_image_data = StreamExecutorUtil::AsDeviceMemory<T>(in_image);
auto output_image_data = StreamExecutorUtil::AsDeviceMemory<T>(out_image);
auto output_grads_data = StreamExecutorUtil::AsDeviceMemory<T>(*output);
auto* stream = context->op_device_context()->stream();
OP_REQUIRES(context, stream, errors::Internal("No GPU stream available."));
auto dnn = stream->parent()->AsDnn();
OP_REQUIRES(context, dnn != nullptr,
absl::InternalError("No DNN support for stream."));
bool status = dnn->DoNormalizeBackwardWithDimensions(
stream, normalize_desc, dimensions_desc, input_image_data,
output_image_data, input_grads_data, &output_grads_data,
nullptr);
OP_REQUIRES(
context, status,
errors::Internal("NormalizeBackwardWithDimensions launch failed"));
#elif TENSORFLOW_USE_ROCM
const int64 batch = in_grads.dim_size(0);
const int64 rows = in_grads.dim_size(1);
const int64 cols = in_grads.dim_size(2);
const int64 depth = in_grads.dim_size(3);
Tensor transformed_in_grads;
TensorShape transformed_in_grads_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
FORMAT_NCHW, in_grads.shape(), FORMAT_NHWC,
&transformed_in_grads_shape));
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value,
transformed_in_grads_shape,
&transformed_in_grads));
functor::NHWCToNCHW<GPUDevice, T, 4>()(context->eigen_device<GPUDevice>(),
in_grads.tensor<T, 4>(),
transformed_in_grads.tensor<T, 4>());
Tensor transformed_in_image;
TensorShape transformed_in_image_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
FORMAT_NCHW, in_image.shape(), FORMAT_NHWC,
&transformed_in_image_shape));
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value,
transformed_in_image_shape,
&transformed_in_image));
functor::NHWCToNCHW<GPUDevice, T, 4>()(context->eigen_device<GPUDevice>(),
in_image.tensor<T, 4>(),
transformed_in_image.tensor<T, 4>());
Tensor transformed_out_image;
TensorShape transformed_out_image_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
FORMAT_NCHW, out_image.shape(), FORMAT_NHWC,
&transformed_out_image_shape));
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value,
transformed_out_image_shape,
&transformed_out_image));
functor::NHWCToNCHW<GPUDevice, T, 4>()(
context->eigen_device<GPUDevice>(), out_image.tensor<T, 4>(),
transformed_out_image.tensor<T, 4>());
Tensor transformed_output;
TensorShape transformed_output_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
FORMAT_NCHW, output->shape(), FORMAT_NHWC,
&transformed_output_shape));
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value,
transformed_output_shape,
&transformed_output));
stream_executor::dnn::BatchDescriptor dimensions_desc;
dimensions_desc.set_count(batch)
.set_height(rows)
.set_width(cols)
.set_feature_map_count(depth)
.set_layout(stream_executor::dnn::DataLayout::kBatchDepthYX);
stream_executor::dnn::NormalizeDescriptor normalize_desc;
normalize_desc.set_bias(bias_)
.set_range(depth_radius_)
.set_alpha(alpha_)
.set_beta(beta_);
auto input_grads_data =
AsDeviceMemory(transformed_in_grads.template flat<T>().data(),
transformed_in_grads.template flat<T>().size());
auto input_image_data =
AsDeviceMemory(transformed_in_image.template flat<T>().data(),
transformed_in_image.template flat<T>().size());
auto output_image_data =
AsDeviceMemory(transformed_out_image.template flat<T>().data(),
transformed_out_image.template flat<T>().size());
auto output_grads_data =
AsDeviceMemory(transformed_output.template flat<T>().data(),
transformed_output.template flat<T>().size());
auto* stream = context->op_device_context()->stream();
OP_REQUIRES(context, stream, errors::Internal("No GPU stream available."));
static int64 NormalizeBackwardScratchSize = GetDnnWorkspaceLimit(
"TF_CUDNN_WORKSPACE_LIMIT_IN_MB", 1LL << 32
);
DnnScratchAllocator scratch_allocator(NormalizeBackwardScratchSize,
context);
auto dnn = stream->parent()->AsDnn();
OP_REQUIRES(context, dnn != nullptr,
absl::InternalError("No DNN support for stream."));
bool status = dnn->DoNormalizeBackwardWithDimensions(
stream, normalize_desc, dimensions_desc, input_image_data,
output_image_data, input_grads_data, &output_grads_data,
nullptr, &scratch_allocator);
OP_REQUIRES(
context, status,
errors::Internal("NormalizeBackwardWithDimensions launch failed"));
auto toConstTensor = [](const Tensor& x) -> const Tensor { return x; };
functor::NCHWToNHWC<GPUDevice, T, 4>()(
context->eigen_device<GPUDevice>(),
toConstTensor(transformed_output).template tensor<T, 4>(),
output->tensor<T, 4>());
#endif
}
int depth_radius_;
T bias_;
T alpha_;
T beta_;
};
#endif
template <typename Device, typename T>
class LRNGradOp : public OpKernel {
public:
explicit LRNGradOp(OpKernelConstruction* context) : OpKernel(context) {
int64_t depth_radius64;
OP_REQUIRES_OK(context, context->GetAttr("depth_radius", &depth_radius64));
OP_REQUIRES(
context,
FastBoundsCheck(depth_radius64, std::numeric_limits<int>::max()),
errors::InvalidArgument("depth_radius = ", depth_radius64,
" larger than int max"));
depth_radius_ = static_cast<int>(depth_radius64);
float tmp;
OP_REQUIRES_OK(context, context->GetAttr("bias", &tmp));
bias_ = T(tmp);
OP_REQUIRES_OK(context, context->GetAttr("alpha", &tmp));
alpha_ = T(tmp);
OP_REQUIRES_OK(context, context->GetAttr("beta", &tmp));
beta_ = T(tmp);
}
void Compute(OpKernelContext* context) override {
const Tensor& in_grads = context->input(0);
const Tensor& in_image = context->input(1);
const Tensor& out_image = context->input(2);
OP_REQUIRES(context, in_grads.dims() == 4 && in_image.dims() == 4,
errors::InvalidArgument("inputs must be 4-dimensional"));
const int64_t batch = in_grads.dim_size(0);
const int64_t rows = in_grads.dim_size(1);
const int64_t cols = in_grads.dim_size(2);
const int64_t depth = in_grads.dim_size(3);
OP_REQUIRES(
context,
in_image.dim_size(0) == batch && in_image.dim_size(1) == rows &&
in_image.dim_size(2) == cols && in_image.dim_size(3) == depth &&
out_image.dim_size(0) == batch && out_image.dim_size(1) == rows &&
out_image.dim_size(2) == cols && out_image.dim_size(3) == depth &&
out_image.dims() == 4,
errors::InvalidArgument(
"input_grads, input_image, and out_image should have the same "
"shape"));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(
0, TensorShape({batch, rows, cols, depth}), &output));
LaunchLRNGrad<Device, T> launcher(depth_radius_, bias_, alpha_, beta_);
launcher.launch(context, this, in_grads, in_image, out_image, output);
}
private:
int depth_radius_;
T bias_;
T alpha_;
T beta_;
};
#define REGISTER_CPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("LRNGrad").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
LRNGradOp<CPUDevice, T>);
TF_CALL_float(REGISTER_CPU);
TF_CALL_half(REGISTER_CPU);
#undef REGISTER_CPU
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_GPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("LRNGrad").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
LRNGradOp<GPUDevice, T>);
TF_CALL_float(REGISTER_GPU);
#undef REGISTER_GPU
#endif
#endif
} | #include <functional>
#include <memory>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
static const float tol_ = 1e-4;
class LRNFloatTest : public OpsTestBase {
protected:
LRNFloatTest() : philox_(123, 17), rand_(&philox_) {}
int GetIntAttr(const string& name) {
int value;
TF_CHECK_OK(GetNodeAttr(*node_def(), name, &value));
return value;
}
float GetFloatAttr(const string& name) {
float value;
TF_CHECK_OK(GetNodeAttr(*node_def(), name, &value));
return value;
}
bool Compare() {
const auto& input = GetInput(0);
const int64_t batch_size = input.dim_size(0);
const int64_t rows = input.dim_size(1);
const int64_t cols = input.dim_size(2);
const int64_t depth = input.dim_size(3);
const int64_t rest = cols * rows * batch_size;
const int64_t depth_radius = GetIntAttr("depth_radius");
const float bias = GetFloatAttr("bias");
const float alpha = GetFloatAttr("alpha");
const float beta = GetFloatAttr("beta");
Eigen::Tensor<float, 4, Eigen::RowMajor> expected(batch_size, rows, cols,
depth);
auto out = expected.reshape(Eigen::DSizes<Eigen::Index, 2>{rest, depth});
auto in = input.shaped<float, 2>({rest, depth});
for (int64_t i = 0; i < rest; ++i) {
Eigen::Tensor<float, 1, Eigen::RowMajor> out_col(depth);
for (int64_t d = 0; d < depth; ++d) {
float denom = 0.0f;
for (int64_t r = std::max(int64_t{0}, d - depth_radius);
r < std::min(depth, d + depth_radius + 1); ++r) {
denom += in(i, r) * in(i, r);
}
denom = std::pow(denom * alpha + bias, beta);
out_col(d) = in(i, d) / denom;
}
out.chip<0>(i) = out_col;
}
auto actual = GetOutput(0)->tensor<float, 4>();
Eigen::Tensor<float, 0, Eigen::RowMajor> sum =
((expected - actual).abs() > actual.constant(tol_))
.select(actual.constant(1), actual.constant(0))
.sum();
return sum() == 0;
}
random::PhiloxRandom philox_;
random::SimplePhilox rand_;
};
TEST_F(LRNFloatTest, Depth96) {
TF_ASSERT_OK(NodeDefBuilder("lrn_op", "LRN")
.Input(FakeInput())
.Attr("depth_radius", 5)
.Attr("bias", 1.0f)
.Attr("alpha", 0.1f)
.Attr("beta", 2.0f)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInput<float>(TensorShape({1, 1, 1, 96}),
[](int i) -> float { return i + 1; });
TF_ASSERT_OK(RunOpKernel());
auto actual = GetOutput(0)->tensor<float, 4>();
EXPECT_NEAR(1. / (10.1 * 10.1), actual(0, 0, 0, 0), tol_);
EXPECT_NEAR(6. / (51.6 * 51.6), actual(0, 0, 0, 5), tol_);
EXPECT_NEAR(64. / (2272.1 * 2272.1), actual(0, 0, 0, 63), tol_);
EXPECT_NEAR(65. / (2736.5 * 2736.5), actual(0, 0, 0, 64), tol_);
EXPECT_NEAR(96. / (5248.1 * 5248.1), actual(0, 0, 0, 95), tol_);
EXPECT_TRUE(Compare());
}
TEST_F(LRNFloatTest, Depth16) {
TF_ASSERT_OK(NodeDefBuilder("lrn_op", "LRN")
.Input(FakeInput())
.Attr("depth_radius", 5)
.Attr("bias", 1.0f)
.Attr("alpha", 0.1f)
.Attr("beta", 2.0f)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInput<float>(TensorShape({1, 1, 1, 16}),
[](int i) -> float { return i + 1; });
TF_ASSERT_OK(RunOpKernel());
auto actual = GetOutput(0)->tensor<float, 4>();
EXPECT_NEAR(1. / (10.1 * 10.1), actual(0, 0, 0, 0), tol_);
EXPECT_NEAR(6. / (51.6 * 51.6), actual(0, 0, 0, 5), tol_);
EXPECT_NEAR(16. / (112.1 * 112.1), actual(0, 0, 0, 15), tol_);
EXPECT_TRUE(Compare());
}
static double RndGaussian(random::SimplePhilox* rnd) {
double x1, x2;
double r;
do {
x1 = 2 * rnd->RandDouble() - 1;
x2 = 2 * rnd->RandDouble() - 1;
r = x1 * x1 + x2 * x2;
} while (r == 0 || r >= 1.0);
double w = sqrt(-2.0 * log(r) / r);
return x1 * w;
}
#define TCASE(NAME, DEPTH, BATCH, DEPTH_RADIUS, BIAS, ALPHA, BETA) \
TEST_F(LRNFloatTest, NAME) { \
TF_ASSERT_OK(NodeDefBuilder("lrn_op", "LRN") \
.Input(FakeInput()) \
.Attr("depth_radius", (DEPTH_RADIUS)) \
.Attr("bias", (BIAS)) \
.Attr("alpha", ((ALPHA) / 10)) \
.Attr("beta", (BETA)) \
.Finalize(node_def())); \
TF_ASSERT_OK(InitOp()); \
AddInput<float>(TensorShape({BATCH, 1, 1, DEPTH}), \
[this](int i) -> float { return RndGaussian(&rand_); }); \
TF_ASSERT_OK(RunOpKernel()); \
EXPECT_TRUE(Compare()); \
}
TCASE(T0, 4, 2, 2, 1.0f, 1.0f, 2.0f)
TCASE(T1, 16, 1, 5, 1.0f, 1.0f, 2.0f)
TCASE(T2, 16, 32, 2, 1.0f, 2.0f, 1.0f)
TCASE(T3, 128, 4, 3, 2.0f, 1.0f, 1.0f)
#undef TCASE
static Graph* MakeRNGrad(int batches, int rows, int cols, int depth,
int depth_radius) {
Graph* g = new Graph(OpRegistry::Global());
Tensor grads(DT_FLOAT, TensorShape({batches, rows, cols, depth}));
grads.flat<float>().setRandom();
Tensor in(DT_FLOAT, TensorShape({batches, rows, cols, depth}));
in.flat<float>().setRandom();
Tensor out(DT_FLOAT, TensorShape({batches, rows, cols, depth}));
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("lrn_grad_op"), "LRNGrad")
.Input(test::graph::Constant(g, grads))
.Input(test::graph::Constant(g, in))
.Input(test::graph::Constant(g, out))
.Attr("depth_radius", depth_radius)
.Attr("bias", 1.0f)
.Attr("alpha", 1.0f / 10)
.Attr("beta", 2.0f)
.Finalize(g, &ret));
return g;
}
#define BM_LRNGradDev(DEVICE, B, R, C, D, DR) \
static void BM_LRNGrad_##DEVICE##_##B##_##R##_##C##_##D##_##DR( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, MakeRNGrad(B, R, C, D, DR), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * B * R * \
C * D * DR * 4); \
} \
BENCHMARK(BM_LRNGrad_##DEVICE##_##B##_##R##_##C##_##D##_##DR)
BM_LRNGradDev(cpu, 128, 12, 12, 64, 4);
BM_LRNGradDev(cpu, 128, 56, 56, 64, 2);
BM_LRNGradDev(cpu, 128, 27, 27, 192, 2);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/lrn_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/lrn_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cf68becf-c51f-4d43-90f6-818835c3a4aa | cpp | tensorflow/tensorflow | hlo_proto_to_memory_visualization_utils | tensorflow/core/profiler/convert/hlo_proto_to_memory_visualization_utils.cc | tensorflow/core/profiler/convert/hlo_proto_to_memory_visualization_utils_test.cc | #include "tensorflow/core/profiler/convert/hlo_proto_to_memory_visualization_utils.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <list>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/layout_util.h"
#include "xla/service/hlo.pb.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/profiler/protobuf/memory_viewer_preprocess.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::xla::BufferAllocationProto;
using ::xla::HeapSimulatorTrace;
using ::xla::HloInstructionProto;
using ::xla::HloProto;
using ::xla::LayoutUtil;
using ::xla::LogicalBufferProto;
using ::xla::Shape;
using ::xla::ShapeUtil;
Shape ResolveShapeIndex(const xla::ShapeProto& shape_proto,
absl::Span<const int64_t> shape_index) {
if (shape_index.empty()) return Shape(shape_proto);
int64_t i = shape_index.back();
if (i >= shape_proto.tuple_shapes_size()) {
return Shape(shape_proto);
}
return Shape(shape_proto.tuple_shapes(i));
}
std::string ShapeDescription(const Shape& shape) {
return ShapeUtil::HumanStringWithLayout(shape);
}
int64_t ShapeUnpaddedSize(Shape shape) {
LayoutUtil::SetToDefaultLayout(&shape);
return ShapeUtil::ByteSizeOf(shape, sizeof(void*));
}
class BufferAllocationStruct {
public:
explicit BufferAllocationStruct(const BufferAllocationProto& proto)
: buffer_allocation_((proto)) {}
bool IsIndefinite() const {
return buffer_allocation_.is_thread_local() ||
buffer_allocation_.is_entry_computation_parameter() ||
buffer_allocation_.is_constant() ||
buffer_allocation_.maybe_live_out();
}
const BufferAllocationProto& proto() const { return buffer_allocation_; }
size_t size() const { return buffer_allocation_.size(); }
int64_t color() const { return buffer_allocation_.color(); }
int64_t index() const { return buffer_allocation_.index(); }
std::optional<int64_t> heap_simulator_trace_id() const {
return heap_simulator_trace_id_;
}
void set_heap_simulator_trace_id(int64_t id) {
heap_simulator_trace_id_ = id;
}
std::string category() const {
if (buffer_allocation_.is_entry_computation_parameter()) {
return "Parameter";
} else if (buffer_allocation_.maybe_live_out()) {
return "Output";
} else if (buffer_allocation_.is_thread_local()) {
return "Thread-local";
} else if (buffer_allocation_.is_constant()) {
return "Constant";
} else {
return "Temporary";
}
}
std::string description() const {
return absl::StrFormat(
"buffer_allocation_id:%d\nsize:%d\nbuffer_counts:%d\n",
buffer_allocation_.index(), size(), buffer_allocation_.assigned_size());
}
private:
const BufferAllocationProto& buffer_allocation_;
std::optional<int64_t> heap_simulator_trace_id_;
};
struct LogicalBufferStruct {
LogicalBufferStruct(const LogicalBufferProto& p,
const BufferAllocationStruct& b,
const ::xla::HloInstructionProto& i, uint64_t offset)
: proto(p),
buffer_allocation(b),
hlo_instruction(i),
offset(offset),
shape(ResolveShapeIndex(hlo_instruction.shape(),
proto.defined_at().shape_index())) {}
absl::string_view instruction_name() const { return hlo_instruction.name(); }
int64_t color() const { return proto.color(); }
size_t size() const { return proto.size(); }
size_t unpadded_size() const { return ShapeUnpaddedSize(shape); }
int64_t inc() {
if (canonical_buffer) return canonical_buffer->inc();
return ++ref_count;
}
int64_t dec() {
if (canonical_buffer) return canonical_buffer->dec();
return --ref_count;
}
int64_t share_with(LogicalBufferStruct* buffer) {
canonical_buffer = buffer;
return canonical_buffer->inc();
}
LogicalBufferStruct* get_canonical_buffer() {
return canonical_buffer ? canonical_buffer->get_canonical_buffer() : this;
}
std::string GetInstructionNameWithShapeIndex() const {
if (proto.defined_at().shape_index().empty()) {
return std::string(instruction_name());
} else {
return absl::StrCat(instruction_name(), "{",
absl::StrJoin(proto.defined_at().shape_index(), ","),
"}");
}
}
std::string description() const {
return absl::StrFormat(
"buffer_id:%d\nhlo_op:%s\nshape:%s\nsize:%d\nunpadded_size:%d\n"
"offset:%d\nspan:(%lld,%lld)",
proto.id(), instruction_name(), ShapeDescription(shape), size(),
unpadded_size(), offset, span ? span->first : -1,
span ? span->second : -1);
}
const LogicalBufferProto& proto;
const BufferAllocationStruct& buffer_allocation;
const ::xla::HloInstructionProto& hlo_instruction;
uint64_t offset;
std::optional<std::pair<uint64_t, uint64_t>> span;
xla::Shape shape;
int64_t ref_count = 0;
LogicalBufferStruct* canonical_buffer = nullptr;
};
class HloProtoBufferWrapper {
public:
explicit HloProtoBufferWrapper(const ::xla::HloProto& hlo_proto)
: hlo_proto_(hlo_proto) {
Init();
}
int64_t GetHeapSimulatorTraceId(const int64_t memory_color) const {
int64_t id = GetHeapSimulatorTraceIdFromBufferAllocationIndex(memory_color);
if (id != -1) {
return id;
}
return GetHeapSimulatorTraceIdFromEvents(memory_color);
}
const ::xla::HloProto& GetHloProto() const { return hlo_proto_; }
std::vector<const BufferAllocationStruct*> GetBufferAllocations(
int64_t memory_color) const {
std::vector<const BufferAllocationStruct*> buffer_allocations;
for (const auto& iter : id_to_buffer_allocation_) {
if (iter.second->proto().color() != memory_color) continue;
buffer_allocations.push_back(iter.second.get());
}
return buffer_allocations;
}
LogicalBufferStruct* GetLogicalBuffer(int64_t logical_buffer_id) const {
if (!id_to_logical_buffer_.contains(logical_buffer_id)) {
LOG(DFATAL) << "logical_buffer_id " << logical_buffer_id << "not found.";
return nullptr;
}
return id_to_logical_buffer_.at(logical_buffer_id).get();
}
std::vector<const LogicalBufferStruct*> LogicalBuffersWithIndefiniteLifetime(
int64_t memory_color) const {
std::vector<const LogicalBufferStruct*> indefinite_logical_buffers;
for (const auto& buffer_assignment : GetBufferAllocations(memory_color)) {
if (!buffer_assignment->IsIndefinite()) continue;
if (buffer_assignment->proto().is_thread_local()) continue;
const LogicalBufferStruct* best_logical_buffer = nullptr;
size_t best_size = 0;
for (const auto& assigned : buffer_assignment->proto().assigned()) {
const LogicalBufferStruct* logical_buffer_struct =
GetLogicalBuffer(assigned.logical_buffer_id());
if (logical_buffer_struct == nullptr) continue;
if (logical_buffer_struct->size() > best_size) {
best_size = logical_buffer_struct->size();
best_logical_buffer = logical_buffer_struct;
}
}
if (best_logical_buffer) {
indefinite_logical_buffers.push_back(best_logical_buffer);
}
}
return indefinite_logical_buffers;
}
private:
void Init() {
absl::flat_hash_map<absl::string_view, const ::xla::HloInstructionProto*>
name_to_hlo;
absl::flat_hash_map<uint64_t, const ::xla::HloInstructionProto*>
unique_id_to_hlo;
for (const auto& computation : hlo_proto_.hlo_module().computations()) {
for (const auto& instruction : computation.instructions()) {
name_to_hlo[instruction.name()] = &instruction;
unique_id_to_hlo[instruction.id()] = &instruction;
}
}
absl::flat_hash_map<int64_t, const LogicalBufferProto*>
id_to_logical_buffer_proto;
for (const auto& logical_buffer :
hlo_proto_.buffer_assignment().logical_buffers()) {
id_to_logical_buffer_proto[logical_buffer.id()] = &logical_buffer;
}
for (const auto& buffer_allocation :
hlo_proto_.buffer_assignment().buffer_allocations()) {
auto& buffer_allocation_s =
id_to_buffer_allocation_[buffer_allocation.index()];
buffer_allocation_s =
std::make_unique<BufferAllocationStruct>(buffer_allocation);
for (const auto& assigned : buffer_allocation.assigned()) {
const auto id = assigned.logical_buffer_id();
if (!id_to_logical_buffer_proto.contains(id)) {
LOG(DFATAL) << "logical_buffer_id " << id << " not found.";
continue;
}
const auto* logical_buffer = id_to_logical_buffer_proto.at(id);
int64_t inst_id = logical_buffer->defined_at().instruction_id();
if (!unique_id_to_hlo.contains(inst_id)) {
LOG(DFATAL) << "instruction_id " << inst_id << " not found.";
continue;
}
const auto* instruction = unique_id_to_hlo.at(inst_id);
id_to_logical_buffer_[id] = std::make_unique<LogicalBufferStruct>(
*logical_buffer, *buffer_allocation_s, *instruction,
assigned.offset());
}
}
const auto& heap_simulator_traces =
hlo_proto_.buffer_assignment().heap_simulator_traces();
for (int64_t i = 0; i < heap_simulator_traces.size(); i++) {
if (heap_simulator_traces[i].events().empty()) continue;
int logical_buffer_id = heap_simulator_traces[i].events(0).buffer_id();
if (!id_to_logical_buffer_.contains(logical_buffer_id)) continue;
auto* logical_buffer = id_to_logical_buffer_[logical_buffer_id].get();
auto buffer_allocation_index = logical_buffer->buffer_allocation.index();
id_to_buffer_allocation_[buffer_allocation_index]
->set_heap_simulator_trace_id(i);
}
}
int64_t GetHeapSimulatorTraceIdFromEvents(const int64_t memory_color) const {
int64_t best_index = -1;
int64_t best_event_count = 0;
for (int64_t i = 0;
i < hlo_proto_.buffer_assignment().heap_simulator_traces_size(); i++) {
const auto& heap_simulator_trace =
hlo_proto_.buffer_assignment().heap_simulator_traces(i);
int64_t event_count = 0;
for (const auto& event : heap_simulator_trace.events()) {
if (!id_to_logical_buffer_.contains(event.buffer_id())) {
LOG(DFATAL) << "buffer_id " << event.buffer_id() << "not found.";
continue;
}
const auto& logical_buffer =
id_to_logical_buffer_.at(event.buffer_id());
if (logical_buffer->color() == memory_color) {
event_count++;
}
}
if (event_count > best_event_count) {
best_index = i;
best_event_count = event_count;
}
}
return best_index;
}
int64_t GetHeapSimulatorTraceIdFromBufferAllocationIndex(
const int64_t memory_color) const {
auto buffer_allocations = GetBufferAllocations(memory_color);
for (const auto* buffer_allocation : buffer_allocations) {
if (buffer_allocation->IsIndefinite()) continue;
if (buffer_allocation->heap_simulator_trace_id()) {
return *buffer_allocation->heap_simulator_trace_id();
}
}
return -1;
}
const ::xla::HloProto& hlo_proto_;
absl::flat_hash_map<int64_t, std::unique_ptr<LogicalBufferStruct>>
id_to_logical_buffer_;
absl::flat_hash_map<int64_t, std::unique_ptr<BufferAllocationStruct>>
id_to_buffer_allocation_;
};
double BytesToMiB(int64_t bytes) {
return static_cast<double>(bytes) / (1ULL << 20);
}
HeapObject MakeHeapObjectCommon(std::string label, int32_t color,
int64_t logical_buffer_id,
int64_t logical_buffer_size_bytes,
int64_t unpadded_shape_bytes) {
HeapObject result;
result.set_numbered(color);
result.set_label(std::move(label));
result.set_logical_buffer_id(logical_buffer_id);
result.set_logical_buffer_size_mib(BytesToMiB(logical_buffer_size_bytes));
result.set_unpadded_shape_mib(BytesToMiB(unpadded_shape_bytes));
return result;
}
HeapObject MakeHeapObject(const LogicalBufferStruct& logical_buffer,
int32_t color) {
const HloInstructionProto& hlo_instruction = logical_buffer.hlo_instruction;
std::string shape_string = ShapeDescription(logical_buffer.shape);
std::string label =
absl::StrFormat("%s: %s # %s", logical_buffer.instruction_name(),
shape_string, hlo_instruction.metadata().op_name());
HeapObject result = MakeHeapObjectCommon(
std::move(label), color, logical_buffer.proto.id(), logical_buffer.size(),
logical_buffer.unpadded_size());
result.set_instruction_name(
logical_buffer.GetInstructionNameWithShapeIndex());
result.set_group_name(logical_buffer.buffer_allocation.category());
result.set_tf_op_name(hlo_instruction.metadata().op_name());
result.set_shape_string(shape_string);
result.set_op_code(hlo_instruction.opcode());
return result;
}
BufferSpan MakeBufferSpan(int32 start, int32 limit) {
BufferSpan result;
result.set_start(start);
result.set_limit(limit);
return result;
}
void Convert(const xla::BufferAllocationProto_Assigned& assigned,
const HloProtoBufferWrapper& wrapper, LogicalBuffer* result) {
result->set_id(assigned.logical_buffer_id()),
result->set_size_mib(BytesToMiB(assigned.size()));
const LogicalBufferStruct* logical_buffer =
wrapper.GetLogicalBuffer(assigned.logical_buffer_id());
if (logical_buffer == nullptr) return;
result->set_hlo_name(std::string(logical_buffer->instruction_name()));
result->mutable_shape_index()->CopyFrom(
logical_buffer->proto.defined_at().shape_index());
result->set_shape(ShapeDescription(logical_buffer->shape));
}
bool IsReusable(const BufferAllocationProto& buffer_allocation) {
return !buffer_allocation.is_thread_local() && !buffer_allocation.is_tuple();
}
void Convert(const BufferAllocationProto& proto,
const HloProtoBufferWrapper& wrapper, BufferAllocation* result) {
result->set_id(proto.index());
result->set_size_mib(BytesToMiB(proto.size()));
if (proto.is_entry_computation_parameter()) {
result->add_attributes("entry computation parameter");
}
if (proto.maybe_live_out()) {
result->add_attributes("may-be live out");
}
if (IsReusable(proto)) {
result->add_attributes("reusable");
}
for (const auto& assigned : proto.assigned()) {
Convert(assigned, wrapper, result->add_logical_buffers());
}
if (!result->logical_buffers().empty()) {
std::string common_shape = result->logical_buffers(0).shape();
for (int64_t i = 1; i < result->logical_buffers_size(); ++i) {
if (result->logical_buffers(i).shape() != common_shape) {
common_shape = "";
break;
}
}
if (!common_shape.empty()) {
result->set_common_shape(common_shape);
}
}
}
void NoteSpecialAllocations(const HloProtoBufferWrapper& wrapper,
int64_t memory_color, int64_t small_buffer_size,
PreprocessResult* result) {
int64_t entry_parameters_bytes = 0;
int64_t non_reusable_bytes = 0;
int64_t maybe_live_out_bytes = 0;
int64_t total_buffer_allocation_bytes = 0;
int64_t indefinite_buffer_allocation_bytes = 0;
for (const auto* buffer_allocation_struct :
wrapper.GetBufferAllocations(memory_color)) {
const auto& buffer_allocation = buffer_allocation_struct->proto();
if (buffer_allocation.is_entry_computation_parameter()) {
entry_parameters_bytes += buffer_allocation.size();
}
if (!IsReusable(buffer_allocation)) {
non_reusable_bytes += buffer_allocation.size();
}
if (buffer_allocation.maybe_live_out()) {
if (buffer_allocation.size() > small_buffer_size) {
VLOG(1) << "Maybe live out buffer allocation: "
<< buffer_allocation.size()
<< " bytes :: " << buffer_allocation.ShortDebugString();
}
maybe_live_out_bytes += buffer_allocation.size();
}
if (buffer_allocation_struct->IsIndefinite()) {
indefinite_buffer_allocation_bytes += buffer_allocation.size();
Convert(buffer_allocation, wrapper, result->add_indefinite_lifetimes());
}
total_buffer_allocation_bytes += buffer_allocation.size();
}
result->set_entry_computation_parameters_mib(
BytesToMiB(entry_parameters_bytes));
result->set_non_reusable_mib(BytesToMiB(non_reusable_bytes));
result->set_maybe_live_out_mib(BytesToMiB(maybe_live_out_bytes));
result->set_total_buffer_allocation_mib(
BytesToMiB(total_buffer_allocation_bytes));
result->set_indefinite_buffer_allocation_mib(
BytesToMiB(indefinite_buffer_allocation_bytes));
}
struct HeapSimulatorStats {
explicit HeapSimulatorStats(const HloProtoBufferWrapper& wrapper)
: wrapper(wrapper) {}
void SetSimulatorTraceEventSize(int64_t size) {
simulator_trace_event_size = size;
}
void UpdateOnSimulatorEvent(const HeapSimulatorTrace::Event& event) {
heap_size_bytes_timeline.push_back(heap_size_bytes);
unpadded_heap_size_bytes_timeline.push_back(unpadded_heap_size_bytes);
const LogicalBufferStruct* logical_buffer =
wrapper.GetLogicalBuffer(event.buffer_id());
if (logical_buffer == nullptr) return;
seen_logical_buffers.insert(logical_buffer);
seen_buffer_allocations.insert(&logical_buffer->buffer_allocation.proto());
}
void IncreaseMemoryUsage(LogicalBufferStruct* canonical_logical_buffer,
bool init_buffer_span) {
logical_buffers.push_back(canonical_logical_buffer->proto.id());
heap_size_bytes += canonical_logical_buffer->size();
unpadded_heap_size_bytes += canonical_logical_buffer->unpadded_size();
int64_t prior_peak_heap_size_bytes = peak_heap_size_bytes;
peak_heap_size_bytes = std::max(peak_heap_size_bytes, heap_size_bytes);
if (prior_peak_heap_size_bytes != peak_heap_size_bytes) {
peak_heap_size_position = heap_size_bytes_timeline.size() - 1;
peak_unpadded_heap_size_bytes = unpadded_heap_size_bytes;
VLOG(1) << absl::StrFormat("New peak heap size on %d :: %d bytes",
peak_heap_size_position, peak_heap_size_bytes);
peak_logical_buffers = logical_buffers;
}
if (init_buffer_span) {
canonical_logical_buffer->span.emplace(
heap_size_bytes_timeline.size() - 1, simulator_trace_event_size - 1);
}
}
Status DecreaseMemoryUsage(LogicalBufferStruct* canonical_logical_buffer) {
int64_t canonical_buffer_id = canonical_logical_buffer->proto.id();
logical_buffers.remove(canonical_buffer_id);
heap_size_bytes -= canonical_logical_buffer->size();
if (heap_size_bytes < 0) {
return errors::InvalidArgument(absl::StrCat(
"Heap size should be non-negative, but get: ", heap_size_bytes));
}
unpadded_heap_size_bytes -= canonical_logical_buffer->unpadded_size();
if (canonical_logical_buffer->span) {
canonical_logical_buffer->span->second =
heap_size_bytes_timeline.size() - 1;
}
return absl::OkStatus();
}
Status FinalizeMemoryUsage() {
heap_size_bytes_timeline.push_back(heap_size_bytes);
unpadded_heap_size_bytes_timeline.push_back(unpadded_heap_size_bytes);
if (seen_buffer_allocations.size() != 1) {
return errors::InvalidArgument(
absl::StrCat("All heap simulation should work out of a single buffer "
"allocation, actual seen_buffer_allocations.size():",
seen_buffer_allocations.size()));
}
VLOG(1) << "Found " << peak_logical_buffers.size()
<< " logical buffers alive at point of peak heap usage.";
VLOG(1) << "Peak logical buffers: ["
<< absl::StrJoin(peak_logical_buffers, ", ") << "]";
return absl::OkStatus();
}
int64_t heap_size_bytes = 0;
int64_t unpadded_heap_size_bytes = 0;
int64_t peak_heap_size_bytes = 0;
int64_t peak_unpadded_heap_size_bytes = 0;
std::list<int64_t> logical_buffers;
std::list<int64_t> peak_logical_buffers;
std::vector<int64_t> heap_size_bytes_timeline;
std::vector<int64_t> unpadded_heap_size_bytes_timeline;
int64_t peak_heap_size_position = 0;
absl::flat_hash_set<const LogicalBufferStruct*> seen_logical_buffers;
absl::flat_hash_set<const BufferAllocationProto*> seen_buffer_allocations;
const HloProtoBufferWrapper& wrapper;
int64_t simulator_trace_event_size;
};
Status ProcessHeapSimulatorTrace(const HloProtoBufferWrapper& wrapper,
const int64_t memory_color,
HeapSimulatorStats* stats) {
int64_t heap_simulator_trace_id =
wrapper.GetHeapSimulatorTraceId(memory_color);
if (heap_simulator_trace_id < 0 ||
heap_simulator_trace_id >= wrapper.GetHloProto()
.buffer_assignment()
.heap_simulator_traces_size()) {
return absl::OkStatus();
}
const auto& trace =
wrapper.GetHloProto().buffer_assignment().heap_simulator_traces(
heap_simulator_trace_id);
stats->SetSimulatorTraceEventSize(trace.events_size());
for (const auto& event : trace.events()) {
stats->UpdateOnSimulatorEvent(event);
LogicalBufferStruct* logical_buffer =
wrapper.GetLogicalBuffer(event.buffer_id());
if (logical_buffer == nullptr) {
continue;
}
if (event.kind() == HeapSimulatorTrace::Event::ALLOC) {
logical_buffer->inc();
stats->IncreaseMemoryUsage(logical_buffer,
true);
} else if (event.kind() == HeapSimulatorTrace::Event::FREE) {
auto ref_count = logical_buffer->dec();
if (ref_count < 0) {
return errors::InvalidArgument(absl::StrCat(
"Buffer ", logical_buffer->proto.id(), "is freed multiple times."));
}
if (ref_count == 0) {
auto& canonical_buffer = *logical_buffer->get_canonical_buffer();
TF_RETURN_IF_ERROR(stats->DecreaseMemoryUsage(&canonical_buffer));
}
} else if (event.kind() == HeapSimulatorTrace::Event::SHARE_WITH) {
int64_t canonical_buffer_id = event.share_with_canonical_id();
LogicalBufferStruct* canonical_buffer =
wrapper.GetLogicalBuffer(canonical_buffer_id);
if (canonical_buffer == nullptr) {
continue;
}
auto ref_count = logical_buffer->share_with(canonical_buffer);
if (ref_count == 1) {
stats->IncreaseMemoryUsage(canonical_buffer,
false);
}
} else {
return errors::InvalidArgument(
absl::StrCat("Unhandled event kind: ", event.kind()));
}
}
TF_RETURN_IF_ERROR(stats->FinalizeMemoryUsage());
return absl::OkStatus();
}
struct PeakUsageSnapshot {
PeakUsageSnapshot(const HloProtoBufferWrapper& wrapper,
const HeapSimulatorStats& simulator_stats,
int64_t small_buffer_size)
: wrapper(wrapper),
simulator_stats(simulator_stats),
small_buffer_size(small_buffer_size) {}
void AddHeapObject(const LogicalBufferStruct& logical_buffer) {
if (logical_buffer.size() < small_buffer_size) {
total_small_buffer_size_bytes += logical_buffer.size();
} else {
max_heap_objects.push_back(MakeHeapObject(logical_buffer, colorno++));
}
}
void FinalizeBufferUsage() {
for (const int64_t logical_buffer_id :
simulator_stats.peak_logical_buffers) {
const LogicalBufferStruct* logical_buffer =
wrapper.GetLogicalBuffer(logical_buffer_id);
if (logical_buffer == nullptr) return;
AddHeapObject(*logical_buffer);
}
if (total_small_buffer_size_bytes != 0) {
max_heap_objects.push_back(MakeHeapObjectCommon(
absl::StrFormat("small (<%d bytes)", small_buffer_size), colorno++,
-1, total_small_buffer_size_bytes,
0));
}
}
std::vector<HeapObject> max_heap_objects;
int64_t indefinite_memory_usage_bytes = 0;
int64_t total_small_buffer_size_bytes = 0;
int32_t colorno = 0;
const HloProtoBufferWrapper& wrapper;
const HeapSimulatorStats& simulator_stats;
const int64_t small_buffer_size;
};
void CreatePeakUsageSnapshot(const HloProtoBufferWrapper& wrapper,
int64_t memory_color,
PeakUsageSnapshot* peak_snapshot) {
for (const auto* logical_buffer :
wrapper.LogicalBuffersWithIndefiniteLifetime(memory_color)) {
const auto& buffer_allocation = logical_buffer->buffer_allocation;
peak_snapshot->indefinite_memory_usage_bytes += buffer_allocation.size();
peak_snapshot->AddHeapObject(*logical_buffer);
}
peak_snapshot->FinalizeBufferUsage();
}
void ConvertAllocationTimeline(const HloProtoBufferWrapper& wrapper,
const HeapSimulatorStats& simulator_stats,
const int64_t memory_color,
PreprocessResult* result) {
const char* lb_colors[] = {
"antiquewhite3",
"aqua",
"aquamarine",
"bisque",
"blanchedalmond",
"blue",
"blueviolet",
"brown",
"burlywood",
"cadetblue",
"chartreuse",
"chocolate",
"coral",
"cornflowerblue",
"crimson",
"cyan",
"darkblue",
"darkcyan",
"darkgoldenrod",
"darkgray",
"darkgreen",
"darkkhaki",
"darkmagenta",
"darkolivegreen",
"darkorange",
"darkorchid",
"darkred",
"darksalmon",
"darkseagreen",
"darkslateblue",
"darkslategray",
"darkturquoise",
"darkviolet",
"deeppink",
"deepskyblue",
"dimgray",
"dodgerblue",
"firebrick",
"floralwhite",
"forestgreen",
"fuchsia",
"gainsboro",
"gold",
"goldenrod",
"green",
"greenyellow",
"goldenrod",
"greenyellow",
"honeydew",
"hotpink",
"indianred",
"indigo",
"ivory3",
"khaki",
"lavender",
"lavenderblush",
"lawngreen",
"lemonchiffon",
"lightblue",
"lightcoral",
"lightcyan",
"lightpink",
"limegreen",
"lightsalmon",
"lightseagreen",
"lightskyblue",
"lime",
"magenta",
"maroon",
"mediumaquamarine",
"mediumblue",
"mediumorchid",
"mediumpurple",
"midnightblue",
"mediumvioletred",
"mistyrose",
"moccasin",
"olive",
"orange",
"orangered",
"orchid",
"palegoldenrod",
"palegreen",
"paleturquoise",
"palevioletred",
"papayawhip",
"peachpuff",
"peachpuff",
"pink",
"plum",
"powderblue",
"purple",
"rebeccapurple",
"red",
"rosybrown",
"royalblue",
"salmon",
"sandybrown",
"seagreen",
"seashell",
"sienna",
"skyblue",
"tan",
"teal",
"turquoise",
"tomato",
"violet",
"violetred",
"yellow",
};
struct RenderOptions {
size_t graph_width = 2048;
size_t graph_height = 2048;
} render_options;
const char* ba_colors[] = {
"azure",
"beige",
"cornsilk",
};
int num_lb_colors = sizeof(lb_colors) / sizeof(lb_colors[0]);
int num_ba_colors = sizeof(ba_colors) / sizeof(ba_colors[0]);
std::vector<size_t> buffer_allocation_offsets;
size_t total_y_size = 0;
size_t total_x_size = 0;
std::vector<std::string> rects;
auto buffer_allocations = wrapper.GetBufferAllocations(memory_color);
const auto& heap_simulator_traces =
wrapper.GetHloProto().buffer_assignment().heap_simulator_traces();
for (const auto& buffer_allocation : buffer_allocations) {
if (buffer_allocation->IsIndefinite()) continue;
auto heap_simulator_trace_id = buffer_allocation->heap_simulator_trace_id();
if (!heap_simulator_trace_id) continue;
buffer_allocation_offsets.push_back(total_y_size);
total_y_size += buffer_allocation->size();
if (*heap_simulator_trace_id >= heap_simulator_traces.size()) {
LOG(DFATAL) << "heap_simulator_trace_id " << *heap_simulator_trace_id
<< " out of bounds.";
continue;
}
total_x_size = std::max<size_t>(
total_x_size,
heap_simulator_traces.at(*heap_simulator_trace_id).events_size());
}
if (!total_y_size || !total_x_size) return;
double scale_x =
static_cast<double>(render_options.graph_width) / total_x_size;
double scale_y =
static_cast<double>(render_options.graph_height) / total_y_size;
int node_id = 0;
auto add_rect = [&](size_t x, size_t y, size_t width, size_t height,
const string& description, const char* color) {
size_t center_x = x + (width >> 1);
size_t center_y = y + (height >> 1);
int pos_x = center_x * scale_x;
int pos_y = center_y * scale_y;
int rect_w = width * scale_x;
int rect_h = height * scale_y;
if (height * scale_y < 0.5) return;
rect_h = std::max(rect_h, 1);
std::string rect = absl::StrFormat(
R"("%d" [tooltip="%s", pos="%d,%d!", width="%d!", height="%d!", color=%s];)",
node_id++, description, pos_x, pos_y, rect_w, rect_h, color);
rects.push_back(rect);
};
int buffer_id = 0;
for (const auto& buffer_allocation : buffer_allocations) {
if (buffer_allocation->IsIndefinite()) continue;
auto buffer_allocation_offset = buffer_allocation_offsets[buffer_id++];
add_rect(0, buffer_allocation_offset, total_x_size,
buffer_allocation->size(), buffer_allocation->description(),
ba_colors[buffer_id % num_ba_colors]);
for (const auto& assigned : buffer_allocation->proto().assigned()) {
const LogicalBufferStruct* logical_buffer =
wrapper.GetLogicalBuffer(assigned.logical_buffer_id());
if (logical_buffer == nullptr) continue;
if (!logical_buffer->span || logical_buffer->canonical_buffer) continue;
size_t width = logical_buffer->span->second - logical_buffer->span->first;
size_t height = buffer_allocation_offset + logical_buffer->size();
add_rect(logical_buffer->span->first, logical_buffer->offset, width,
height, logical_buffer->description(),
lb_colors[node_id % num_lb_colors]);
}
}
VLOG(1) << "rects:" << rects.size();
result->set_allocation_timeline(
absl::StrFormat("graph G {\n node [shape=box,style=filled];\n %s\n}",
absl::StrJoin(rects, "\n")));
}
void GeneratePreprocessResult(const HloProtoBufferWrapper& wrapper,
const HeapSimulatorStats& simulator_stats,
const PeakUsageSnapshot& peak_snapshot,
const int64_t memory_color,
PreprocessResult* result) {
result->set_module_name(wrapper.GetHloProto().hlo_module().name());
result->set_entry_computation_name(
wrapper.GetHloProto().hlo_module().entry_computation_name());
std::vector<const HeapObject*> max_heap_by_size;
max_heap_by_size.reserve(peak_snapshot.max_heap_objects.size());
for (const auto& object : peak_snapshot.max_heap_objects) {
max_heap_by_size.push_back(&object);
}
std::sort(max_heap_by_size.begin(), max_heap_by_size.end(),
[](const HeapObject* a, const HeapObject* b) {
return a->logical_buffer_size_mib() >
b->logical_buffer_size_mib();
});
std::vector<int> max_heap_to_by_size;
max_heap_to_by_size.reserve(max_heap_by_size.size());
for (const auto& object : peak_snapshot.max_heap_objects) {
auto it =
std::find(max_heap_by_size.begin(), max_heap_by_size.end(), &object);
int index = std::distance(max_heap_by_size.begin(), it);
max_heap_to_by_size.push_back(index);
}
std::vector<int> by_size_to_max_heap;
for (const auto* object : max_heap_by_size) {
int index = object - &peak_snapshot.max_heap_objects[0];
by_size_to_max_heap.push_back(index);
}
*result->mutable_max_heap() = {peak_snapshot.max_heap_objects.begin(),
peak_snapshot.max_heap_objects.end()};
result->mutable_max_heap_by_size()->Reserve(max_heap_by_size.size());
for (const HeapObject* o : max_heap_by_size) {
*result->add_max_heap_by_size() = *o;
}
*result->mutable_max_heap_to_by_size() = {max_heap_to_by_size.begin(),
max_heap_to_by_size.end()};
*result->mutable_by_size_to_max_heap() = {by_size_to_max_heap.begin(),
by_size_to_max_heap.end()};
size_t timeline_size = simulator_stats.heap_size_bytes_timeline.size();
double add_mib = BytesToMiB(peak_snapshot.indefinite_memory_usage_bytes);
result->mutable_heap_sizes()->Reserve(timeline_size);
result->mutable_unpadded_heap_sizes()->Reserve(timeline_size);
for (size_t i = 0; i < timeline_size; i++) {
result->add_heap_sizes(
BytesToMiB(simulator_stats.heap_size_bytes_timeline[i]) + add_mib);
result->add_unpadded_heap_sizes(
BytesToMiB(simulator_stats.unpadded_heap_size_bytes_timeline[i]) +
add_mib);
}
result->set_peak_heap_mib(BytesToMiB(simulator_stats.peak_heap_size_bytes) +
add_mib);
result->set_peak_unpadded_heap_mib(
BytesToMiB(simulator_stats.peak_unpadded_heap_size_bytes) + add_mib);
result->set_peak_heap_size_position(simulator_stats.peak_heap_size_position);
for (const auto* logical_buffer : simulator_stats.seen_logical_buffers) {
if (!logical_buffer->span) continue;
(*result->mutable_logical_buffer_spans())[logical_buffer->proto.id()] =
MakeBufferSpan(logical_buffer->span->first,
logical_buffer->span->second);
}
NoteSpecialAllocations(wrapper, memory_color, peak_snapshot.small_buffer_size,
result);
ConvertAllocationTimeline(wrapper, simulator_stats, memory_color, result);
}
}
absl::StatusOr<PreprocessResult> ConvertHloProtoToPreprocessResult(
const HloProto& hlo_proto, int64_t small_buffer_size,
int64_t memory_color) {
HloProtoBufferWrapper wrapper(hlo_proto);
HeapSimulatorStats simulator_stats(wrapper);
auto status =
ProcessHeapSimulatorTrace(wrapper, memory_color, &simulator_stats);
if (!status.ok()) {
return absl::InvalidArgumentError(absl::StrCat(
"Failed to process heap simulator trace: ", status.message()));
}
PeakUsageSnapshot peak_snapshot(wrapper, simulator_stats, small_buffer_size);
CreatePeakUsageSnapshot(wrapper, memory_color, &peak_snapshot);
PreprocessResult result;
GeneratePreprocessResult(wrapper, simulator_stats, peak_snapshot,
memory_color, &result);
return result;
}
}
} | #include "tensorflow/core/profiler/convert/hlo_proto_to_memory_visualization_utils.h"
#include <string>
#include <gmock/gmock.h>
#include "absl/strings/str_format.h"
#include "xla/service/hlo.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/protobuf/memory_viewer_preprocess.pb.h"
#include "tensorflow/core/util/proto/proto_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
static constexpr char kHLOBase[] = R"pb(
hlo_module {
name: "test_module"
entry_computation_name: "test_computation"
computations {
name: "test_computation"
instructions {
name: "fusion.1"
id: 0
shape { tuple_shapes { element_type: U64 } }
}
instructions {
name: "fusion.2"
id: 1
shape { tuple_shapes { element_type: U64 } }
}
}
}
buffer_assignment {
buffer_allocations {
index: 0
size: 1048576
color: 0
assigned { logical_buffer_id: 1 offset: 0 size: 524288 }
assigned { logical_buffer_id: 2 offset: 524288 size: 524288 }
}
logical_buffers {
id: 1
size: 524288
color: 0
defined_at { instruction_id: 0 shape_index: 0 }
}
logical_buffers {
id: 2
size: 524288
color: 0
defined_at { instruction_id: 1 shape_index: 0 }
}
heap_simulator_traces { %s }
}
)pb";
TEST(MemoryViewerTest, TestHeapSimulatorTraceShareWith_1) {
static constexpr char kHeapSimulatorTrace[] = R"pb(
events { kind: ALLOC buffer_id: 1 }
events { kind: SHARE_WITH buffer_id: 2 share_with_canonical_id: 1 }
events { kind: FREE buffer_id: 1 }
events { kind: FREE buffer_id: 2 }
)pb";
std::string hlo_string = absl::StrFormat(kHLOBase, kHeapSimulatorTrace);
xla::HloProto hlo_proto;
ASSERT_TRUE(
proto_utils::ParseTextFormatFromString(hlo_string, &hlo_proto).ok());
TF_ASSERT_OK_AND_ASSIGN(
PreprocessResult preprocess_result,
ConvertHloProtoToPreprocessResult(hlo_proto, 0));
EXPECT_EQ(preprocess_result.peak_heap_mib(), 0.5);
}
TEST(MemoryViewerTest, TestHeapSimulatorTraceShareWith_2) {
static constexpr char kHeapSimulatorTrace[] = R"pb(
events { kind: ALLOC buffer_id: 1 }
events { kind: FREE buffer_id: 1 }
events { kind: SHARE_WITH buffer_id: 2 share_with_canonical_id: 1 }
events { kind: FREE buffer_id: 2 }
)pb";
std::string hlo_string = absl::StrFormat(kHLOBase, kHeapSimulatorTrace);
xla::HloProto hlo_proto;
ASSERT_TRUE(
proto_utils::ParseTextFormatFromString(hlo_string, &hlo_proto).ok());
TF_ASSERT_OK_AND_ASSIGN(
PreprocessResult preprocess_result,
ConvertHloProtoToPreprocessResult(hlo_proto, 0));
EXPECT_EQ(preprocess_result.peak_heap_mib(), 0.5);
EXPECT_FALSE(preprocess_result.allocation_timeline().empty());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/hlo_proto_to_memory_visualization_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/hlo_proto_to_memory_visualization_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
93cba225-8871-4b50-9f3e-4bab934cf0ac | cpp | abseil/abseil-cpp | demangle | absl/debugging/internal/demangle.cc | absl/debugging/internal/demangle_test.cc | #include "absl/debugging/internal/demangle.h"
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <limits>
#include <string>
#include "absl/base/config.h"
#include "absl/debugging/internal/demangle_rust.h"
#if ABSL_INTERNAL_HAS_CXA_DEMANGLE
#include <cxxabi.h>
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
typedef struct {
const char *abbrev;
const char *real_name;
int arity;
} AbbrevPair;
static const AbbrevPair kOperatorList[] = {
{"nw", "new", 0},
{"na", "new[]", 0},
{"dl", "delete", 1},
{"da", "delete[]", 1},
{"aw", "co_await", 1},
{"ps", "+", 1},
{"ng", "-", 1},
{"ad", "&", 1},
{"de", "*", 1},
{"co", "~", 1},
{"pl", "+", 2},
{"mi", "-", 2},
{"ml", "*", 2},
{"dv", "/", 2},
{"rm", "%", 2},
{"an", "&", 2},
{"or", "|", 2},
{"eo", "^", 2},
{"aS", "=", 2},
{"pL", "+=", 2},
{"mI", "-=", 2},
{"mL", "*=", 2},
{"dV", "/=", 2},
{"rM", "%=", 2},
{"aN", "&=", 2},
{"oR", "|=", 2},
{"eO", "^=", 2},
{"ls", "<<", 2},
{"rs", ">>", 2},
{"lS", "<<=", 2},
{"rS", ">>=", 2},
{"ss", "<=>", 2},
{"eq", "==", 2},
{"ne", "!=", 2},
{"lt", "<", 2},
{"gt", ">", 2},
{"le", "<=", 2},
{"ge", ">=", 2},
{"nt", "!", 1},
{"aa", "&&", 2},
{"oo", "||", 2},
{"pp", "++", 1},
{"mm", "--", 1},
{"cm", ",", 2},
{"pm", "->*", 2},
{"pt", "->", 0},
{"cl", "()", 0},
{"ix", "[]", 2},
{"qu", "?", 3},
{"st", "sizeof", 0},
{"sz", "sizeof", 1},
{"sZ", "sizeof...", 0},
{nullptr, nullptr, 0},
};
static const AbbrevPair kBuiltinTypeList[] = {
{"v", "void", 0},
{"w", "wchar_t", 0},
{"b", "bool", 0},
{"c", "char", 0},
{"a", "signed char", 0},
{"h", "unsigned char", 0},
{"s", "short", 0},
{"t", "unsigned short", 0},
{"i", "int", 0},
{"j", "unsigned int", 0},
{"l", "long", 0},
{"m", "unsigned long", 0},
{"x", "long long", 0},
{"y", "unsigned long long", 0},
{"n", "__int128", 0},
{"o", "unsigned __int128", 0},
{"f", "float", 0},
{"d", "double", 0},
{"e", "long double", 0},
{"g", "__float128", 0},
{"z", "ellipsis", 0},
{"De", "decimal128", 0},
{"Dd", "decimal64", 0},
{"Dc", "decltype(auto)", 0},
{"Da", "auto", 0},
{"Dn", "std::nullptr_t", 0},
{"Df", "decimal32", 0},
{"Di", "char32_t", 0},
{"Du", "char8_t", 0},
{"Ds", "char16_t", 0},
{"Dh", "float16", 0},
{nullptr, nullptr, 0},
};
static const AbbrevPair kSubstitutionList[] = {
{"St", "", 0},
{"Sa", "allocator", 0},
{"Sb", "basic_string", 0},
{"Ss", "string", 0},
{"Si", "istream", 0},
{"So", "ostream", 0},
{"Sd", "iostream", 0},
{nullptr, nullptr, 0},
};
typedef struct {
int mangled_idx;
int out_cur_idx;
int prev_name_idx;
unsigned int prev_name_length : 16;
signed int nest_level : 15;
unsigned int append : 1;
} ParseState;
static_assert(sizeof(ParseState) == 4 * sizeof(int),
"unexpected size of ParseState");
typedef struct {
const char *mangled_begin;
char *out;
int out_end_idx;
int recursion_depth;
int steps;
ParseState parse_state;
#ifdef ABSL_INTERNAL_DEMANGLE_RECORDS_HIGH_WATER_MARK
int high_water_mark;
bool too_complex;
#endif
} State;
namespace {
#ifdef ABSL_INTERNAL_DEMANGLE_RECORDS_HIGH_WATER_MARK
void UpdateHighWaterMark(State *state) {
if (state->high_water_mark < state->parse_state.mangled_idx) {
state->high_water_mark = state->parse_state.mangled_idx;
}
}
void ReportHighWaterMark(State *state) {
const size_t input_length = std::strlen(state->mangled_begin);
if (input_length + 6 > static_cast<size_t>(state->out_end_idx) ||
state->too_complex) {
if (state->out_end_idx > 0) state->out[0] = '\0';
return;
}
const size_t high_water_mark = static_cast<size_t>(state->high_water_mark);
std::memcpy(state->out, state->mangled_begin, high_water_mark);
std::memcpy(state->out + high_water_mark, "--!--", 5);
std::memcpy(state->out + high_water_mark + 5,
state->mangled_begin + high_water_mark,
input_length - high_water_mark);
state->out[input_length + 5] = '\0';
}
#else
void UpdateHighWaterMark(State *) {}
void ReportHighWaterMark(State *) {}
#endif
class ComplexityGuard {
public:
explicit ComplexityGuard(State *state) : state_(state) {
++state->recursion_depth;
++state->steps;
}
~ComplexityGuard() { --state_->recursion_depth; }
static constexpr int kRecursionDepthLimit = 256;
static constexpr int kParseStepsLimit = 1 << 17;
bool IsTooComplex() const {
if (state_->recursion_depth > kRecursionDepthLimit ||
state_->steps > kParseStepsLimit) {
#ifdef ABSL_INTERNAL_DEMANGLE_RECORDS_HIGH_WATER_MARK
state_->too_complex = true;
#endif
return true;
}
return false;
}
private:
State *state_;
};
}
static size_t StrLen(const char *str) {
size_t len = 0;
while (*str != '\0') {
++str;
++len;
}
return len;
}
static bool AtLeastNumCharsRemaining(const char *str, size_t n) {
for (size_t i = 0; i < n; ++i) {
if (str[i] == '\0') {
return false;
}
}
return true;
}
static bool StrPrefix(const char *str, const char *prefix) {
size_t i = 0;
while (str[i] != '\0' && prefix[i] != '\0' && str[i] == prefix[i]) {
++i;
}
return prefix[i] == '\0';
}
static void InitState(State* state,
const char* mangled,
char* out,
size_t out_size) {
state->mangled_begin = mangled;
state->out = out;
state->out_end_idx = static_cast<int>(out_size);
state->recursion_depth = 0;
state->steps = 0;
#ifdef ABSL_INTERNAL_DEMANGLE_RECORDS_HIGH_WATER_MARK
state->high_water_mark = 0;
state->too_complex = false;
#endif
state->parse_state.mangled_idx = 0;
state->parse_state.out_cur_idx = 0;
state->parse_state.prev_name_idx = 0;
state->parse_state.prev_name_length = 0;
state->parse_state.nest_level = -1;
state->parse_state.append = true;
}
static inline const char *RemainingInput(State *state) {
return &state->mangled_begin[state->parse_state.mangled_idx];
}
static bool ParseOneCharToken(State *state, const char one_char_token) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (RemainingInput(state)[0] == one_char_token) {
++state->parse_state.mangled_idx;
UpdateHighWaterMark(state);
return true;
}
return false;
}
static bool ParseTwoCharToken(State *state, const char *two_char_token) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (RemainingInput(state)[0] == two_char_token[0] &&
RemainingInput(state)[1] == two_char_token[1]) {
state->parse_state.mangled_idx += 2;
UpdateHighWaterMark(state);
return true;
}
return false;
}
static bool ParseThreeCharToken(State *state, const char *three_char_token) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (RemainingInput(state)[0] == three_char_token[0] &&
RemainingInput(state)[1] == three_char_token[1] &&
RemainingInput(state)[2] == three_char_token[2]) {
state->parse_state.mangled_idx += 3;
UpdateHighWaterMark(state);
return true;
}
return false;
}
static bool ParseLongToken(State *state, const char *long_token) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
int i = 0;
for (; long_token[i] != '\0'; ++i) {
if (RemainingInput(state)[i] != long_token[i]) return false;
}
state->parse_state.mangled_idx += i;
UpdateHighWaterMark(state);
return true;
}
static bool ParseCharClass(State *state, const char *char_class) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (RemainingInput(state)[0] == '\0') {
return false;
}
const char *p = char_class;
for (; *p != '\0'; ++p) {
if (RemainingInput(state)[0] == *p) {
++state->parse_state.mangled_idx;
UpdateHighWaterMark(state);
return true;
}
}
return false;
}
static bool ParseDigit(State *state, int *digit) {
char c = RemainingInput(state)[0];
if (ParseCharClass(state, "0123456789")) {
if (digit != nullptr) {
*digit = c - '0';
}
return true;
}
return false;
}
static bool Optional(bool ) { return true; }
typedef bool (*ParseFunc)(State *);
static bool OneOrMore(ParseFunc parse_func, State *state) {
if (parse_func(state)) {
while (parse_func(state)) {
}
return true;
}
return false;
}
static bool ZeroOrMore(ParseFunc parse_func, State *state) {
while (parse_func(state)) {
}
return true;
}
static void Append(State *state, const char *const str, const size_t length) {
for (size_t i = 0; i < length; ++i) {
if (state->parse_state.out_cur_idx + 1 <
state->out_end_idx) {
state->out[state->parse_state.out_cur_idx++] = str[i];
} else {
state->parse_state.out_cur_idx = state->out_end_idx + 1;
break;
}
}
if (state->parse_state.out_cur_idx < state->out_end_idx) {
state->out[state->parse_state.out_cur_idx] =
'\0';
}
}
static bool IsLower(char c) { return c >= 'a' && c <= 'z'; }
static bool IsAlpha(char c) {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
}
static bool IsDigit(char c) { return c >= '0' && c <= '9'; }
static bool IsFunctionCloneSuffix(const char *str) {
size_t i = 0;
while (str[i] != '\0') {
bool parsed = false;
if (str[i] == '.' && (IsAlpha(str[i + 1]) || str[i + 1] == '_')) {
parsed = true;
i += 2;
while (IsAlpha(str[i]) || str[i] == '_') {
++i;
}
}
if (str[i] == '.' && IsDigit(str[i + 1])) {
parsed = true;
i += 2;
while (IsDigit(str[i])) {
++i;
}
}
if (!parsed)
return false;
}
return true;
}
static bool EndsWith(State *state, const char chr) {
return state->parse_state.out_cur_idx > 0 &&
state->parse_state.out_cur_idx < state->out_end_idx &&
chr == state->out[state->parse_state.out_cur_idx - 1];
}
static void MaybeAppendWithLength(State *state, const char *const str,
const size_t length) {
if (state->parse_state.append && length > 0) {
if (str[0] == '<' && EndsWith(state, '<')) {
Append(state, " ", 1);
}
if (state->parse_state.out_cur_idx < state->out_end_idx &&
(IsAlpha(str[0]) || str[0] == '_')) {
state->parse_state.prev_name_idx = state->parse_state.out_cur_idx;
state->parse_state.prev_name_length = static_cast<unsigned int>(length);
}
Append(state, str, length);
}
}
static bool MaybeAppendDecimal(State *state, int val) {
constexpr size_t kMaxLength = 20;
char buf[kMaxLength];
if (state->parse_state.append) {
char *p = &buf[kMaxLength];
do {
*--p = static_cast<char>((val % 10) + '0');
val /= 10;
} while (p > buf && val != 0);
Append(state, p, kMaxLength - static_cast<size_t>(p - buf));
}
return true;
}
static bool MaybeAppend(State *state, const char *const str) {
if (state->parse_state.append) {
size_t length = StrLen(str);
MaybeAppendWithLength(state, str, length);
}
return true;
}
static bool EnterNestedName(State *state) {
state->parse_state.nest_level = 0;
return true;
}
static bool LeaveNestedName(State *state, int16_t prev_value) {
state->parse_state.nest_level = prev_value;
return true;
}
static bool DisableAppend(State *state) {
state->parse_state.append = false;
return true;
}
static bool RestoreAppend(State *state, bool prev_value) {
state->parse_state.append = prev_value;
return true;
}
static void MaybeIncreaseNestLevel(State *state) {
if (state->parse_state.nest_level > -1) {
++state->parse_state.nest_level;
}
}
static void MaybeAppendSeparator(State *state) {
if (state->parse_state.nest_level >= 1) {
MaybeAppend(state, "::");
}
}
static void MaybeCancelLastSeparator(State *state) {
if (state->parse_state.nest_level >= 1 && state->parse_state.append &&
state->parse_state.out_cur_idx >= 2) {
state->parse_state.out_cur_idx -= 2;
state->out[state->parse_state.out_cur_idx] = '\0';
}
}
static bool IdentifierIsAnonymousNamespace(State *state, size_t length) {
static const char anon_prefix[] = "_GLOBAL__N_";
return (length > (sizeof(anon_prefix) - 1) &&
StrPrefix(RemainingInput(state), anon_prefix));
}
static bool ParseMangledName(State *state);
static bool ParseEncoding(State *state);
static bool ParseName(State *state);
static bool ParseUnscopedName(State *state);
static bool ParseNestedName(State *state);
static bool ParsePrefix(State *state);
static bool ParseUnqualifiedName(State *state);
static bool ParseSourceName(State *state);
static bool ParseLocalSourceName(State *state);
static bool ParseUnnamedTypeName(State *state);
static bool ParseNumber(State *state, int *number_out);
static bool ParseFloatNumber(State *state);
static bool ParseSeqId(State *state);
static bool ParseIdentifier(State *state, size_t length);
static bool ParseOperatorName(State *state, int *arity);
static bool ParseConversionOperatorType(State *state);
static bool ParseSpecialName(State *state);
static bool ParseCallOffset(State *state);
static bool ParseNVOffset(State *state);
static bool ParseVOffset(State *state);
static bool ParseAbiTags(State *state);
static bool ParseCtorDtorName(State *state);
static bool ParseDecltype(State *state);
static bool ParseType(State *state);
static bool ParseCVQualifiers(State *state);
static bool ParseExtendedQualifier(State *state);
static bool ParseBuiltinType(State *state);
static bool ParseVendorExtendedType(State *state);
static bool ParseFunctionType(State *state);
static bool ParseBareFunctionType(State *state);
static bool ParseOverloadAttribute(State *state);
static bool ParseClassEnumType(State *state);
static bool ParseArrayType(State *state);
static bool ParsePointerToMemberType(State *state);
static bool ParseTemplateParam(State *state);
static bool ParseTemplateParamDecl(State *state);
static bool ParseTemplateTemplateParam(State *state);
static bool ParseTemplateArgs(State *state);
static bool ParseTemplateArg(State *state);
static bool ParseBaseUnresolvedName(State *state);
static bool ParseUnresolvedName(State *state);
static bool ParseUnresolvedQualifierLevel(State *state);
static bool ParseUnionSelector(State* state);
static bool ParseFunctionParam(State* state);
static bool ParseBracedExpression(State *state);
static bool ParseExpression(State *state);
static bool ParseInitializer(State *state);
static bool ParseExprPrimary(State *state);
static bool ParseExprCastValueAndTrailingE(State *state);
static bool ParseQRequiresClauseExpr(State *state);
static bool ParseRequirement(State *state);
static bool ParseTypeConstraint(State *state);
static bool ParseLocalName(State *state);
static bool ParseLocalNameSuffix(State *state);
static bool ParseDiscriminator(State *state);
static bool ParseSubstitution(State *state, bool accept_std);
static bool ParseMangledName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
return ParseTwoCharToken(state, "_Z") && ParseEncoding(state);
}
static bool ParseEncoding(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (ParseName(state)) {
if (!ParseBareFunctionType(state)) {
return true;
}
ParseQRequiresClauseExpr(state);
return true;
}
if (ParseSpecialName(state)) {
return true;
}
return false;
}
static bool ParseName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (ParseNestedName(state) || ParseLocalName(state)) {
return true;
}
ParseState copy = state->parse_state;
if (ParseSubstitution(state, false) &&
ParseTemplateArgs(state)) {
return true;
}
state->parse_state = copy;
return ParseUnscopedName(state) && Optional(ParseTemplateArgs(state));
}
static bool ParseUnscopedName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (ParseUnqualifiedName(state)) {
return true;
}
ParseState copy = state->parse_state;
if (ParseTwoCharToken(state, "St") && MaybeAppend(state, "std::") &&
ParseUnqualifiedName(state)) {
return true;
}
state->parse_state = copy;
return false;
}
static inline bool ParseRefQualifier(State *state) {
return ParseCharClass(state, "OR");
}
static bool ParseNestedName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'N') && EnterNestedName(state) &&
Optional(ParseCVQualifiers(state)) &&
Optional(ParseRefQualifier(state)) && ParsePrefix(state) &&
LeaveNestedName(state, copy.nest_level) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParsePrefix(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
bool has_something = false;
while (true) {
MaybeAppendSeparator(state);
if (ParseTemplateParam(state) || ParseDecltype(state) ||
ParseSubstitution(state, true) ||
ParseVendorExtendedType(state) ||
ParseUnscopedName(state) ||
(ParseOneCharToken(state, 'M') && ParseUnnamedTypeName(state))) {
has_something = true;
MaybeIncreaseNestLevel(state);
continue;
}
MaybeCancelLastSeparator(state);
if (has_something && ParseTemplateArgs(state)) {
return ParsePrefix(state);
} else {
break;
}
}
return true;
}
static bool ParseUnqualifiedName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (ParseOperatorName(state, nullptr) || ParseCtorDtorName(state) ||
ParseSourceName(state) || ParseLocalSourceName(state) ||
ParseUnnamedTypeName(state)) {
return ParseAbiTags(state);
}
ParseState copy = state->parse_state;
if (ParseTwoCharToken(state, "DC") && OneOrMore(ParseSourceName, state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
if (ParseOneCharToken(state, 'F') && MaybeAppend(state, "friend ") &&
(ParseSourceName(state) || ParseOperatorName(state, nullptr))) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseAbiTags(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
while (ParseOneCharToken(state, 'B')) {
ParseState copy = state->parse_state;
MaybeAppend(state, "[abi:");
if (!ParseSourceName(state)) {
state->parse_state = copy;
return false;
}
MaybeAppend(state, "]");
}
return true;
}
static bool ParseSourceName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
int length = -1;
if (ParseNumber(state, &length) &&
ParseIdentifier(state, static_cast<size_t>(length))) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseLocalSourceName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'L') && ParseSourceName(state) &&
Optional(ParseDiscriminator(state))) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseUnnamedTypeName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
int which = -1;
if (ParseTwoCharToken(state, "Ut") && Optional(ParseNumber(state, &which)) &&
which <= std::numeric_limits<int>::max() - 2 &&
ParseOneCharToken(state, '_')) {
MaybeAppend(state, "{unnamed type#");
MaybeAppendDecimal(state, 2 + which);
MaybeAppend(state, "}");
return true;
}
state->parse_state = copy;
which = -1;
if (ParseTwoCharToken(state, "Ul") && DisableAppend(state) &&
ZeroOrMore(ParseTemplateParamDecl, state) &&
OneOrMore(ParseType, state) && RestoreAppend(state, copy.append) &&
ParseOneCharToken(state, 'E') && Optional(ParseNumber(state, &which)) &&
which <= std::numeric_limits<int>::max() - 2 &&
ParseOneCharToken(state, '_')) {
MaybeAppend(state, "{lambda()#");
MaybeAppendDecimal(state, 2 + which);
MaybeAppend(state, "}");
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseNumber(State *state, int *number_out) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
bool negative = false;
if (ParseOneCharToken(state, 'n')) {
negative = true;
}
const char *p = RemainingInput(state);
uint64_t number = 0;
for (; *p != '\0'; ++p) {
if (IsDigit(*p)) {
number = number * 10 + static_cast<uint64_t>(*p - '0');
} else {
break;
}
}
if (negative) {
number = ~number + 1;
}
if (p != RemainingInput(state)) {
state->parse_state.mangled_idx += p - RemainingInput(state);
UpdateHighWaterMark(state);
if (number_out != nullptr) {
*number_out = static_cast<int>(number);
}
return true;
}
return false;
}
static bool ParseFloatNumber(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
const char *p = RemainingInput(state);
for (; *p != '\0'; ++p) {
if (!IsDigit(*p) && !(*p >= 'a' && *p <= 'f')) {
break;
}
}
if (p != RemainingInput(state)) {
state->parse_state.mangled_idx += p - RemainingInput(state);
UpdateHighWaterMark(state);
return true;
}
return false;
}
static bool ParseSeqId(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
const char *p = RemainingInput(state);
for (; *p != '\0'; ++p) {
if (!IsDigit(*p) && !(*p >= 'A' && *p <= 'Z')) {
break;
}
}
if (p != RemainingInput(state)) {
state->parse_state.mangled_idx += p - RemainingInput(state);
UpdateHighWaterMark(state);
return true;
}
return false;
}
static bool ParseIdentifier(State *state, size_t length) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (!AtLeastNumCharsRemaining(RemainingInput(state), length)) {
return false;
}
if (IdentifierIsAnonymousNamespace(state, length)) {
MaybeAppend(state, "(anonymous namespace)");
} else {
MaybeAppendWithLength(state, RemainingInput(state), length);
}
state->parse_state.mangled_idx += length;
UpdateHighWaterMark(state);
return true;
}
static bool ParseOperatorName(State *state, int *arity) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (!AtLeastNumCharsRemaining(RemainingInput(state), 2)) {
return false;
}
ParseState copy = state->parse_state;
if (ParseTwoCharToken(state, "cv") && MaybeAppend(state, "operator ") &&
EnterNestedName(state) && ParseConversionOperatorType(state) &&
LeaveNestedName(state, copy.nest_level)) {
if (arity != nullptr) {
*arity = 1;
}
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "li") && MaybeAppend(state, "operator\"\" ") &&
ParseSourceName(state)) {
return true;
}
state->parse_state = copy;
if (ParseOneCharToken(state, 'v') && ParseDigit(state, arity) &&
ParseSourceName(state)) {
return true;
}
state->parse_state = copy;
if (!(IsLower(RemainingInput(state)[0]) &&
IsAlpha(RemainingInput(state)[1]))) {
return false;
}
const AbbrevPair *p;
for (p = kOperatorList; p->abbrev != nullptr; ++p) {
if (RemainingInput(state)[0] == p->abbrev[0] &&
RemainingInput(state)[1] == p->abbrev[1]) {
if (arity != nullptr) {
*arity = p->arity;
}
MaybeAppend(state, "operator");
if (IsLower(*p->real_name)) {
MaybeAppend(state, " ");
}
MaybeAppend(state, p->real_name);
state->parse_state.mangled_idx += 2;
UpdateHighWaterMark(state);
return true;
}
}
return false;
}
static bool ParseConversionOperatorType(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
const char* begin_simple_prefixes = RemainingInput(state);
while (ParseCharClass(state, "OPRCGrVK")) {}
const char* end_simple_prefixes = RemainingInput(state);
if (!ParseType(state)) {
state->parse_state = copy;
return false;
}
while (begin_simple_prefixes != end_simple_prefixes) {
switch (*--end_simple_prefixes) {
case 'P':
MaybeAppend(state, "*");
break;
case 'R':
MaybeAppend(state, "&");
break;
case 'O':
MaybeAppend(state, "&&");
break;
case 'C':
MaybeAppend(state, " _Complex");
break;
case 'G':
MaybeAppend(state, " _Imaginary");
break;
case 'r':
MaybeAppend(state, " restrict");
break;
case 'V':
MaybeAppend(state, " volatile");
break;
case 'K':
MaybeAppend(state, " const");
break;
}
}
return true;
}
static bool ParseSpecialName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseTwoCharToken(state, "TW")) {
MaybeAppend(state, "thread-local wrapper routine for ");
if (ParseName(state)) return true;
state->parse_state = copy;
return false;
}
if (ParseTwoCharToken(state, "TH")) {
MaybeAppend(state, "thread-local initialization routine for ");
if (ParseName(state)) return true;
state->parse_state = copy;
return false;
}
if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "VTIS") &&
ParseType(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "Tc") && ParseCallOffset(state) &&
ParseCallOffset(state) && ParseEncoding(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "GV") && ParseName(state)) {
return true;
}
state->parse_state = copy;
if (ParseOneCharToken(state, 'T') && ParseCallOffset(state) &&
ParseEncoding(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "TC") && ParseType(state) &&
ParseNumber(state, nullptr) && ParseOneCharToken(state, '_') &&
DisableAppend(state) && ParseType(state)) {
RestoreAppend(state, copy.append);
return true;
}
state->parse_state = copy;
if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "FJ") &&
ParseType(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "GR")) {
MaybeAppend(state, "reference temporary for ");
if (!ParseName(state)) {
state->parse_state = copy;
return false;
}
const bool has_seq_id = ParseSeqId(state);
const bool has_underscore = ParseOneCharToken(state, '_');
if (has_seq_id && !has_underscore) {
state->parse_state = copy;
return false;
}
return true;
}
if (ParseTwoCharToken(state, "GA") && ParseEncoding(state)) {
return true;
}
state->parse_state = copy;
if (ParseThreeCharToken(state, "GTt") &&
MaybeAppend(state, "transaction clone for ") && ParseEncoding(state)) {
return true;
}
state->parse_state = copy;
if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "hv") &&
ParseCallOffset(state) && ParseEncoding(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "TA")) {
bool append = state->parse_state.append;
DisableAppend(state);
if (ParseTemplateArg(state)) {
RestoreAppend(state, append);
MaybeAppend(state, "template parameter object");
return true;
}
}
state->parse_state = copy;
return false;
}
static bool ParseCallOffset(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'h') && ParseNVOffset(state) &&
ParseOneCharToken(state, '_')) {
return true;
}
state->parse_state = copy;
if (ParseOneCharToken(state, 'v') && ParseVOffset(state) &&
ParseOneCharToken(state, '_')) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseNVOffset(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
return ParseNumber(state, nullptr);
}
static bool ParseVOffset(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseNumber(state, nullptr) && ParseOneCharToken(state, '_') &&
ParseNumber(state, nullptr)) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseCtorDtorName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'C')) {
if (ParseCharClass(state, "1234")) {
const char *const prev_name =
state->out + state->parse_state.prev_name_idx;
MaybeAppendWithLength(state, prev_name,
state->parse_state.prev_name_length);
return true;
} else if (ParseOneCharToken(state, 'I') && ParseCharClass(state, "12") &&
ParseClassEnumType(state)) {
return true;
}
}
state->parse_state = copy;
if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "0124")) {
const char *const prev_name = state->out + state->parse_state.prev_name_idx;
MaybeAppend(state, "~");
MaybeAppendWithLength(state, prev_name,
state->parse_state.prev_name_length);
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseDecltype(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "tT") &&
ParseExpression(state) && ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseType(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseCVQualifiers(state)) {
const bool result = ParseType(state);
if (!result) state->parse_state = copy;
return result;
}
state->parse_state = copy;
if (ParseCharClass(state, "OPRCG")) {
const bool result = ParseType(state);
if (!result) state->parse_state = copy;
return result;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "Dp") && ParseType(state)) {
return true;
}
state->parse_state = copy;
if (ParseBuiltinType(state) || ParseFunctionType(state) ||
ParseClassEnumType(state) || ParseArrayType(state) ||
ParsePointerToMemberType(state) || ParseDecltype(state) ||
ParseSubstitution(state, false)) {
return true;
}
if (ParseTemplateTemplateParam(state) && ParseTemplateArgs(state)) {
return true;
}
state->parse_state = copy;
if (ParseTemplateParam(state)) {
return true;
}
if (ParseTwoCharToken(state, "Dv") && ParseNumber(state, nullptr) &&
ParseOneCharToken(state, '_') && ParseType(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "Dv") && ParseExpression(state) &&
ParseOneCharToken(state, '_') && ParseType(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "Dk") && ParseTypeConstraint(state)) {
return true;
}
state->parse_state = copy;
return ParseLongToken(state, "_SUBSTPACK_");
}
static bool ParseCVQualifiers(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
int num_cv_qualifiers = 0;
while (ParseExtendedQualifier(state)) ++num_cv_qualifiers;
num_cv_qualifiers += ParseOneCharToken(state, 'r');
num_cv_qualifiers += ParseOneCharToken(state, 'V');
num_cv_qualifiers += ParseOneCharToken(state, 'K');
return num_cv_qualifiers > 0;
}
static bool ParseExtendedQualifier(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (!ParseOneCharToken(state, 'U')) return false;
bool append = state->parse_state.append;
DisableAppend(state);
if (!ParseSourceName(state)) {
state->parse_state = copy;
return false;
}
Optional(ParseTemplateArgs(state));
RestoreAppend(state, append);
return true;
}
static bool ParseBuiltinType(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseTwoCharToken(state, "DB") ||
(ParseTwoCharToken(state, "DU") && MaybeAppend(state, "unsigned "))) {
bool append = state->parse_state.append;
DisableAppend(state);
int number = -1;
if (!ParseNumber(state, &number) && !ParseExpression(state)) {
state->parse_state = copy;
return false;
}
RestoreAppend(state, append);
if (!ParseOneCharToken(state, '_')) {
state->parse_state = copy;
return false;
}
MaybeAppend(state, "_BitInt(");
if (number >= 0) {
MaybeAppendDecimal(state, number);
} else {
MaybeAppend(state, "?");
}
MaybeAppend(state, ")");
return true;
}
if (ParseTwoCharToken(state, "DF")) {
if (ParseThreeCharToken(state, "16b")) {
MaybeAppend(state, "std::bfloat16_t");
return true;
}
int number = 0;
if (!ParseNumber(state, &number)) {
state->parse_state = copy;
return false;
}
MaybeAppend(state, "_Float");
MaybeAppendDecimal(state, number);
if (ParseOneCharToken(state, 'x')) {
MaybeAppend(state, "x");
return true;
}
if (ParseOneCharToken(state, '_')) return true;
state->parse_state = copy;
return false;
}
for (const AbbrevPair *p = kBuiltinTypeList; p->abbrev != nullptr; ++p) {
if (p->abbrev[1] == '\0') {
if (ParseOneCharToken(state, p->abbrev[0])) {
MaybeAppend(state, p->real_name);
return true;
}
} else if (p->abbrev[2] == '\0' && ParseTwoCharToken(state, p->abbrev)) {
MaybeAppend(state, p->real_name);
return true;
}
}
return ParseVendorExtendedType(state);
}
static bool ParseVendorExtendedType(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'u') && ParseSourceName(state) &&
Optional(ParseTemplateArgs(state))) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseExceptionSpec(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (ParseTwoCharToken(state, "Do")) return true;
ParseState copy = state->parse_state;
if (ParseTwoCharToken(state, "DO") && ParseExpression(state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "Dw") && OneOrMore(ParseType, state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseFunctionType(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
Optional(ParseExceptionSpec(state));
Optional(ParseTwoCharToken(state, "Dx"));
if (!ParseOneCharToken(state, 'F')) {
state->parse_state = copy;
return false;
}
Optional(ParseOneCharToken(state, 'Y'));
if (!ParseBareFunctionType(state)) {
state->parse_state = copy;
return false;
}
Optional(ParseCharClass(state, "RO"));
if (!ParseOneCharToken(state, 'E')) {
state->parse_state = copy;
return false;
}
return true;
}
static bool ParseBareFunctionType(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
DisableAppend(state);
if (ZeroOrMore(ParseOverloadAttribute, state) &&
OneOrMore(ParseType, state)) {
RestoreAppend(state, copy.append);
MaybeAppend(state, "()");
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseOverloadAttribute(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseTwoCharToken(state, "Ua") && ParseName(state)) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseClassEnumType(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (Optional(ParseTwoCharToken(state, "Ts") ||
ParseTwoCharToken(state, "Tu") ||
ParseTwoCharToken(state, "Te")) &&
ParseName(state)) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseArrayType(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'A') && ParseNumber(state, nullptr) &&
ParseOneCharToken(state, '_') && ParseType(state)) {
return true;
}
state->parse_state = copy;
if (ParseOneCharToken(state, 'A') && Optional(ParseExpression(state)) &&
ParseOneCharToken(state, '_') && ParseType(state)) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParsePointerToMemberType(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'M') && ParseType(state) && ParseType(state)) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseTemplateParam(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (ParseTwoCharToken(state, "T_")) {
MaybeAppend(state, "?");
return true;
}
ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'T') && ParseNumber(state, nullptr) &&
ParseOneCharToken(state, '_')) {
MaybeAppend(state, "?");
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "TL") && ParseNumber(state, nullptr)) {
if (ParseTwoCharToken(state, "__")) {
MaybeAppend(state, "?");
return true;
}
if (ParseOneCharToken(state, '_') && ParseNumber(state, nullptr) &&
ParseOneCharToken(state, '_')) {
MaybeAppend(state, "?");
return true;
}
}
state->parse_state = copy;
return false;
}
static bool ParseTemplateParamDecl(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseTwoCharToken(state, "Ty")) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "Tk") && ParseName(state) &&
Optional(ParseTemplateArgs(state))) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "Tn") && ParseType(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "Tt") &&
ZeroOrMore(ParseTemplateParamDecl, state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "Tp") && ParseTemplateParamDecl(state)) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseTemplateTemplateParam(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
return (ParseTemplateParam(state) ||
ParseSubstitution(state, false));
}
static bool ParseTemplateArgs(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
DisableAppend(state);
if (ParseOneCharToken(state, 'I') && OneOrMore(ParseTemplateArg, state) &&
Optional(ParseQRequiresClauseExpr(state)) &&
ParseOneCharToken(state, 'E')) {
RestoreAppend(state, copy.append);
MaybeAppend(state, "<>");
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseTemplateArg(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'J') && ZeroOrMore(ParseTemplateArg, state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
if (ParseLocalSourceName(state) && Optional(ParseTemplateArgs(state))) {
copy = state->parse_state;
if (ParseExprCastValueAndTrailingE(state)) {
return true;
}
state->parse_state = copy;
return true;
}
if (ParseType(state) || ParseExprPrimary(state)) {
return true;
}
state->parse_state = copy;
if (ParseOneCharToken(state, 'X') && ParseExpression(state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
if (ParseTemplateParamDecl(state) && ParseTemplateArg(state)) {
return true;
}
state->parse_state = copy;
return false;
}
static inline bool ParseUnresolvedType(State *state) {
return (ParseTemplateParam(state) && Optional(ParseTemplateArgs(state))) ||
ParseDecltype(state) || ParseSubstitution(state, false);
}
static inline bool ParseSimpleId(State *state) {
return ParseSourceName(state) && Optional(ParseTemplateArgs(state));
}
static bool ParseBaseUnresolvedName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (ParseSimpleId(state)) {
return true;
}
ParseState copy = state->parse_state;
if (ParseTwoCharToken(state, "on") && ParseOperatorName(state, nullptr) &&
Optional(ParseTemplateArgs(state))) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "dn") &&
(ParseUnresolvedType(state) || ParseSimpleId(state))) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseUnresolvedName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (Optional(ParseTwoCharToken(state, "gs")) &&
ParseBaseUnresolvedName(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "sr") && ParseUnresolvedType(state) &&
ParseBaseUnresolvedName(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "sr") && ParseOneCharToken(state, 'N') &&
ParseUnresolvedType(state) &&
OneOrMore(ParseUnresolvedQualifierLevel, state) &&
ParseOneCharToken(state, 'E') && ParseBaseUnresolvedName(state)) {
return true;
}
state->parse_state = copy;
if (Optional(ParseTwoCharToken(state, "gs")) &&
ParseTwoCharToken(state, "sr") &&
OneOrMore(ParseUnresolvedQualifierLevel, state) &&
ParseOneCharToken(state, 'E') && ParseBaseUnresolvedName(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "sr") && ParseTwoCharToken(state, "St") &&
ParseSimpleId(state) && ParseSimpleId(state)) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseUnresolvedQualifierLevel(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (ParseSimpleId(state)) return true;
ParseState copy = state->parse_state;
if (ParseSubstitution(state, false) &&
ParseTemplateArgs(state)) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseUnionSelector(State *state) {
return ParseOneCharToken(state, '_') && Optional(ParseNumber(state, nullptr));
}
static bool ParseFunctionParam(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseTwoCharToken(state, "fp") && Optional(ParseCVQualifiers(state)) &&
Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "fL") && Optional(ParseNumber(state, nullptr)) &&
ParseOneCharToken(state, 'p') && Optional(ParseCVQualifiers(state)) &&
Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
return true;
}
state->parse_state = copy;
return ParseThreeCharToken(state, "fpT");
}
static bool ParseBracedExpression(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseTwoCharToken(state, "di") && ParseSourceName(state) &&
ParseBracedExpression(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "dx") && ParseExpression(state) &&
ParseBracedExpression(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "dX") &&
ParseExpression(state) && ParseExpression(state) &&
ParseBracedExpression(state)) {
return true;
}
state->parse_state = copy;
return ParseExpression(state);
}
static bool ParseExpression(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (ParseTemplateParam(state) || ParseExprPrimary(state)) {
return true;
}
ParseState copy = state->parse_state;
if (ParseTwoCharToken(state, "cl") && OneOrMore(ParseExpression, state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
if ((ParseThreeCharToken(state, "pp_") ||
ParseThreeCharToken(state, "mm_")) &&
ParseExpression(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "cp") && ParseSimpleId(state) &&
ZeroOrMore(ParseExpression, state) && ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "so") && ParseType(state) &&
ParseExpression(state) && Optional(ParseNumber(state, nullptr)) &&
ZeroOrMore(ParseUnionSelector, state) &&
Optional(ParseOneCharToken(state, 'p')) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
if (ParseFunctionParam(state)) return true;
state->parse_state = copy;
if (ParseTwoCharToken(state, "tl") && ParseType(state) &&
ZeroOrMore(ParseBracedExpression, state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "il") &&
ZeroOrMore(ParseBracedExpression, state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
if (Optional(ParseTwoCharToken(state, "gs")) &&
(ParseTwoCharToken(state, "nw") || ParseTwoCharToken(state, "na")) &&
ZeroOrMore(ParseExpression, state) && ParseOneCharToken(state, '_') &&
ParseType(state) &&
(ParseOneCharToken(state, 'E') || ParseInitializer(state))) {
return true;
}
state->parse_state = copy;
if (Optional(ParseTwoCharToken(state, "gs")) &&
(ParseTwoCharToken(state, "dl") || ParseTwoCharToken(state, "da")) &&
ParseExpression(state)) {
return true;
}
state->parse_state = copy;
if (ParseCharClass(state, "dscr") && ParseOneCharToken(state, 'c') &&
ParseType(state) && ParseExpression(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "cv")) {
if (ParseType(state)) {
ParseState copy2 = state->parse_state;
if (ParseOneCharToken(state, '_') && ZeroOrMore(ParseExpression, state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy2;
if (ParseExpression(state)) {
return true;
}
}
} else {
int arity = -1;
if (ParseOperatorName(state, &arity) &&
arity > 0 &&
(arity < 3 || ParseExpression(state)) &&
(arity < 2 || ParseExpression(state)) &&
(arity < 1 || ParseExpression(state))) {
return true;
}
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "ti") && ParseType(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "te") && ParseExpression(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "st") && ParseType(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "at") && ParseType(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "az") && ParseExpression(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "nx") && ParseExpression(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "sZ") &&
(ParseFunctionParam(state) || ParseTemplateParam(state))) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "sP") && ZeroOrMore(ParseTemplateArg, state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
if ((ParseTwoCharToken(state, "fl") || ParseTwoCharToken(state, "fr")) &&
ParseOperatorName(state, nullptr) && ParseExpression(state)) {
return true;
}
state->parse_state = copy;
if ((ParseTwoCharToken(state, "fL") || ParseTwoCharToken(state, "fR")) &&
ParseOperatorName(state, nullptr) && ParseExpression(state) &&
ParseExpression(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "tw") && ParseExpression(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "tr")) return true;
if ((ParseTwoCharToken(state, "dt") || ParseTwoCharToken(state, "pt")) &&
ParseExpression(state) && ParseUnresolvedName(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "ds") && ParseExpression(state) &&
ParseExpression(state)) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "sp") && ParseExpression(state)) {
return true;
}
state->parse_state = copy;
if (ParseOneCharToken(state, 'u') && ParseSourceName(state) &&
ZeroOrMore(ParseTemplateArg, state) && ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "rq") && OneOrMore(ParseRequirement, state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "rQ") && ParseBareFunctionType(state) &&
ParseOneCharToken(state, '_') && OneOrMore(ParseRequirement, state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
return ParseUnresolvedName(state);
}
static bool ParseInitializer(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseTwoCharToken(state, "pi") && ZeroOrMore(ParseExpression, state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "il") &&
ZeroOrMore(ParseBracedExpression, state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseExprPrimary(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseTwoCharToken(state, "LZ")) {
if (ParseEncoding(state) && ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
return false;
}
if (ParseOneCharToken(state, 'L')) {
if (ParseThreeCharToken(state, "DnE")) return true;
if (RemainingInput(state)[0] == 'A' ) {
if (ParseType(state) && ParseOneCharToken(state, 'E')) return true;
state->parse_state = copy;
return false;
}
if (ParseType(state) && ParseExprCastValueAndTrailingE(state)) {
return true;
}
}
state->parse_state = copy;
if (ParseOneCharToken(state, 'L') && ParseMangledName(state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseExprCastValueAndTrailingE(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseNumber(state, nullptr) && ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
if (ParseFloatNumber(state)) {
if (ParseOneCharToken(state, 'E')) return true;
if (ParseOneCharToken(state, '_') && ParseFloatNumber(state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
}
state->parse_state = copy;
return false;
}
static bool ParseQRequiresClauseExpr(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
DisableAppend(state);
if (ParseOneCharToken(state, 'Q') && ParseExpression(state)) {
RestoreAppend(state, copy.append);
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseRequirement(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'X') && ParseExpression(state) &&
Optional(ParseOneCharToken(state, 'N')) &&
(!ParseOneCharToken(state, 'R') || ParseTypeConstraint(state))) {
return true;
}
state->parse_state = copy;
if (ParseOneCharToken(state, 'T') && ParseType(state)) return true;
state->parse_state = copy;
if (ParseOneCharToken(state, 'Q') && ParseExpression(state)) return true;
state->parse_state = copy;
return false;
}
static bool ParseTypeConstraint(State *state) {
return ParseName(state);
}
static bool ParseLocalNameSuffix(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'd') &&
(IsDigit(RemainingInput(state)[0]) || RemainingInput(state)[0] == '_')) {
int number = -1;
Optional(ParseNumber(state, &number));
if (number < -1 || number > 2147483645) {
number = -1;
}
number += 2;
MaybeAppend(state, "::{default arg#");
MaybeAppendDecimal(state, number);
MaybeAppend(state, "}::");
if (ParseOneCharToken(state, '_') && ParseName(state)) return true;
state->parse_state = copy;
if (state->parse_state.append) {
state->out[state->parse_state.out_cur_idx] = '\0';
}
return false;
}
state->parse_state = copy;
if (MaybeAppend(state, "::") && ParseName(state) &&
Optional(ParseDiscriminator(state))) {
return true;
}
state->parse_state = copy;
if (state->parse_state.append) {
state->out[state->parse_state.out_cur_idx] = '\0';
}
return ParseOneCharToken(state, 's') && Optional(ParseDiscriminator(state));
}
static bool ParseLocalName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'Z') && ParseEncoding(state) &&
ParseOneCharToken(state, 'E') && ParseLocalNameSuffix(state)) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseDiscriminator(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state;
if (!ParseOneCharToken(state, '_')) return false;
if (ParseDigit(state, nullptr)) return true;
if (ParseOneCharToken(state, '_') && ParseNumber(state, nullptr) &&
ParseOneCharToken(state, '_')) {
return true;
}
state->parse_state = copy;
return false;
}
static bool ParseSubstitution(State *state, bool accept_std) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (ParseTwoCharToken(state, "S_")) {
MaybeAppend(state, "?");
return true;
}
ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'S') && ParseSeqId(state) &&
ParseOneCharToken(state, '_')) {
MaybeAppend(state, "?");
return true;
}
state->parse_state = copy;
if (ParseOneCharToken(state, 'S')) {
const AbbrevPair *p;
for (p = kSubstitutionList; p->abbrev != nullptr; ++p) {
if (RemainingInput(state)[0] == p->abbrev[1] &&
(accept_std || p->abbrev[1] != 't')) {
MaybeAppend(state, "std");
if (p->real_name[0] != '\0') {
MaybeAppend(state, "::");
MaybeAppend(state, p->real_name);
}
++state->parse_state.mangled_idx;
UpdateHighWaterMark(state);
return true;
}
}
}
state->parse_state = copy;
return false;
}
static bool ParseTopLevelMangledName(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (ParseMangledName(state)) {
if (RemainingInput(state)[0] != '\0') {
if (IsFunctionCloneSuffix(RemainingInput(state))) {
return true;
}
if (RemainingInput(state)[0] == '@') {
MaybeAppend(state, RemainingInput(state));
return true;
}
ReportHighWaterMark(state);
return false;
}
return true;
}
ReportHighWaterMark(state);
return false;
}
static bool Overflowed(const State *state) {
return state->parse_state.out_cur_idx >= state->out_end_idx;
}
bool Demangle(const char* mangled, char* out, size_t out_size) {
if (mangled[0] == '_' && mangled[1] == 'R') {
return DemangleRustSymbolEncoding(mangled, out, out_size);
}
State state;
InitState(&state, mangled, out, out_size);
return ParseTopLevelMangledName(&state) && !Overflowed(&state) &&
state.parse_state.out_cur_idx > 0;
}
std::string DemangleString(const char* mangled) {
std::string out;
int status = 0;
char* demangled = nullptr;
#if ABSL_INTERNAL_HAS_CXA_DEMANGLE
demangled = abi::__cxa_demangle(mangled, nullptr, nullptr, &status);
#endif
if (status == 0 && demangled != nullptr) {
out.append(demangled);
free(demangled);
} else {
out.append(mangled);
}
return out;
}
}
ABSL_NAMESPACE_END
} | #include "absl/debugging/internal/demangle.h"
#include <cstdlib>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/debugging/internal/stack_consumption.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
namespace {
using ::testing::ContainsRegex;
TEST(Demangle, FunctionTemplate) {
char tmp[100];
ASSERT_TRUE(Demangle("_Z3fooIiEiT_", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "foo<>()");
}
TEST(Demangle, FunctionTemplateWithNesting) {
char tmp[100];
ASSERT_TRUE(Demangle("_Z3fooI7WrapperIiEEiT_", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "foo<>()");
}
TEST(Demangle, FunctionTemplateWithNonTypeParamConstraint) {
char tmp[100];
ASSERT_TRUE(Demangle("_Z3fooITkSt8integraliEiT_", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "foo<>()");
}
TEST(Demangle, FunctionTemplateWithFunctionRequiresClause) {
char tmp[100];
ASSERT_TRUE(Demangle("_Z3fooIiEivQsr3stdE8integralIT_E", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "foo<>()");
}
TEST(Demangle, FunctionWithTemplateParamRequiresClause) {
char tmp[100];
ASSERT_TRUE(Demangle("_Z3fooIiQsr3stdE8integralIT_EEiv", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "foo<>()");
}
TEST(Demangle, FunctionWithTemplateParamAndFunctionRequiresClauses) {
char tmp[100];
ASSERT_TRUE(Demangle("_Z3fooIiQsr3stdE8integralIT_EEivQsr3stdE8integralIS0_E",
tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "foo<>()");
}
TEST(Demangle, FunctionTemplateBacktracksOnMalformedRequiresClause) {
char tmp[100];
ASSERT_FALSE(Demangle("_Z3fooIiQEiT_", tmp, sizeof(tmp)));
}
TEST(Demangle, FunctionTemplateWithAutoParam) {
char tmp[100];
ASSERT_TRUE(Demangle("_Z3fooITnDaLi1EEvv", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "foo<>()");
}
TEST(Demangle, FunctionTemplateWithNonTypeParamPack) {
char tmp[100];
ASSERT_TRUE(Demangle("_Z3fooITpTnRiJEiEvT0_", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "foo<>()");
}
TEST(Demangle, FunctionTemplateTemplateParamWithConstrainedArg) {
char tmp[100];
ASSERT_TRUE(Demangle("_Z3fooITtTyE5FooerEvv", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "foo<>()");
}
TEST(Demangle, ConstrainedAutoInFunctionTemplate) {
char tmp[100];
ASSERT_TRUE(Demangle("_Z1fITnDk1CLi0EEvv", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "f<>()");
}
TEST(Demangle, ConstrainedFriendFunctionTemplate) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZN2ns1YIiEF1yES1_QLb1E", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "ns::Y<>::friend y()");
}
TEST(Demangle, ConstrainedFriendOperatorTemplate) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZN2ns1YIiEFdeES1_QLb1E", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "ns::Y<>::friend operator*()");
}
TEST(Demangle, NonTemplateBuiltinType) {
char tmp[100];
ASSERT_TRUE(Demangle("_Z3foou17__my_builtin_type", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "foo()");
}
TEST(Demangle, SingleArgTemplateBuiltinType) {
char tmp[100];
ASSERT_TRUE(Demangle("_Z3fooIiEu17__my_builtin_typeIT_Ev", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "foo<>()");
}
TEST(Demangle, TwoArgTemplateBuiltinType) {
char tmp[100];
ASSERT_TRUE(
Demangle("_Z3fooIicEu17__my_builtin_typeIT_T0_Ev", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "foo<>()");
}
TEST(Demangle, TypeNestedUnderTemplatedBuiltinType) {
char tmp[100];
ASSERT_TRUE(Demangle("_Z1fIRK1CENu20__remove_reference_tIT_E4typeES3_",
tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, TemplateTemplateParamSubstitution) {
char tmp[100];
ASSERT_TRUE(Demangle("_Z3fooITtTyTnTL0__E8FoolableEvv", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "foo<>()");
}
TEST(Demangle, TemplateParamSubstitutionWithGenericLambda) {
char tmp[100];
ASSERT_TRUE(
Demangle("_ZN5FooerIiE3fooIiEEvNS0_UlTL0__TL0_0_E_E", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "Fooer<>::foo<>()");
}
TEST(Demangle, LambdaRequiresTrue) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QLb1E", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "$_0::operator()<>()");
}
TEST(Demangle, LambdaRequiresSimpleExpression) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QeqplLi2ELi2ELi4E",
tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "$_0::operator()<>()");
}
TEST(Demangle, LambdaRequiresRequiresExpressionContainingTrue) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QrqXLb1EE", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "$_0::operator()<>()");
}
TEST(Demangle, LambdaRequiresRequiresExpressionContainingConcept) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QrqXsr3stdE7same_asIDtfp_EiEE",
tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "$_0::operator()<>()");
}
TEST(Demangle, LambdaRequiresRequiresExpressionContainingNoexceptExpression) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QrqXplfp_fp_NE", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "$_0::operator()<>()");
}
TEST(Demangle, LambdaRequiresRequiresExpressionContainingReturnTypeConstraint) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QrqXplfp_fp_RNSt7same_asIDtfp_EEEE",
tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "$_0::operator()<>()");
}
TEST(Demangle, LambdaRequiresRequiresExpressionWithBothNoexceptAndReturnType) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QrqXplfp_fp_NRNSt7same_asIDtfp_EEEE",
tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "$_0::operator()<>()");
}
TEST(Demangle, LambdaRequiresRequiresExpressionContainingType) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZNK3$_0clI1SEEDaT_QrqTNS2_1TEE", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "$_0::operator()<>()");
}
TEST(Demangle, LambdaRequiresRequiresExpressionNestingAnotherRequires) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QrqQLb1EE", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "$_0::operator()<>()");
}
TEST(Demangle, LambdaRequiresRequiresExpressionContainingTwoRequirements) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZNK3$_0clIiEEDaT_QrqXLb1EXeqplLi2ELi2ELi4EE",
tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "$_0::operator()<>()");
}
TEST(Demangle, RequiresExpressionWithItsOwnParameter) {
char tmp[100];
ASSERT_TRUE(Demangle("_Z1fIiE1SIXrQT__XplfL0p_fp_EEES1_", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "f<>()");
}
TEST(Demangle, LambdaWithExplicitTypeArgument) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZZ1fIiET_S0_ENKUlTyS0_E_clIiEEDaS0_",
tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "f<>()::{lambda()#1}::operator()<>()");
}
TEST(Demangle, LambdaWithExplicitPackArgument) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZZ1fIiET_S0_ENKUlTpTyDpT_E_clIJiEEEDaS2_",
tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "f<>()::{lambda()#1}::operator()<>()");
}
TEST(Demangle, LambdaInClassMemberDefaultArgument) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZZN1S1fEPFvvEEd_NKUlvE_clEv", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "S::f()::{default arg#1}::{lambda()#1}::operator()()");
ASSERT_TRUE(Demangle("_ZZN1S1fEPFvvEEd0_NKUlvE_clEv", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "S::f()::{default arg#2}::{lambda()#1}::operator()()");
ASSERT_FALSE(Demangle("_ZZN1S1fEPFvvEEdn1_NKUlvE_clEv", tmp, sizeof(tmp)));
}
TEST(Demangle, AvoidSignedOverflowForUnfortunateParameterNumbers) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZZN1S1fEPFvvEEd2147483645_NKUlvE_clEv",
tmp, sizeof(tmp)));
EXPECT_STREQ(tmp,
"S::f()::{default arg#2147483647}::{lambda()#1}::operator()()");
ASSERT_TRUE(Demangle("_ZZN1S1fEPFvvEEd2147483646_NKUlvE_clEv",
tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "S::f()::{default arg#1}::{lambda()#1}::operator()()");
ASSERT_TRUE(Demangle("_ZZN1S1fEPFvvEEd2147483647_NKUlvE_clEv",
tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "S::f()::{default arg#1}::{lambda()#1}::operator()()");
ASSERT_TRUE(Demangle("_ZZN1S1fEPFvvEEd2147483648_NKUlvE_clEv",
tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "S::f()::{default arg#1}::{lambda()#1}::operator()()");
}
TEST(Demangle, SubstpackNotationForTroublesomeTemplatePack) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZN1AIJEE1fIJEEEvDpO1BI_SUBSTPACK_T_E",
tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "A<>::f<>()");
}
TEST(Demangle, TemplateTemplateParamAppearingAsBackrefFollowedByTemplateArgs) {
char tmp[100];
ASSERT_TRUE(Demangle("_ZN1WI1SE1fIiEEDTclsrS0_IT_EE1mEEv", tmp, sizeof(tmp)));
EXPECT_STREQ(tmp, "W<>::f<>()");
}
TEST(Demangle, CornerCases) {
char tmp[10];
EXPECT_TRUE(Demangle("_Z6foobarv", tmp, sizeof(tmp)));
EXPECT_STREQ("foobar()", tmp);
EXPECT_TRUE(Demangle("_Z6foobarv", tmp, 9));
EXPECT_STREQ("foobar()", tmp);
EXPECT_FALSE(Demangle("_Z6foobarv", tmp, 8));
EXPECT_FALSE(Demangle("_Z6foobarv", tmp, 1));
EXPECT_FALSE(Demangle("_Z6foobarv", tmp, 0));
EXPECT_FALSE(Demangle("_Z6foobarv", nullptr, 0));
EXPECT_FALSE(Demangle("_Z1000000", tmp, 9));
}
TEST(Demangle, Clones) {
char tmp[20];
EXPECT_TRUE(Demangle("_ZL3Foov", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.clone.3", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.constprop.80", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.isra.18", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.isra.2.constprop.18", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.__uniq.12345", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.__uniq.12345.isra.2.constprop.18", tmp,
sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.clo", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.123", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.clone.foo", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.clone.123.456", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.part.9.165493.constprop.775.31805", tmp,
sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_FALSE(Demangle("_ZL3Foov.", tmp, sizeof(tmp)));
EXPECT_FALSE(Demangle("_ZL3Foov.abc123", tmp, sizeof(tmp)));
EXPECT_FALSE(Demangle("_ZL3Foov.clone.", tmp, sizeof(tmp)));
EXPECT_FALSE(Demangle("_ZL3Foov.isra.2.constprop.", tmp, sizeof(tmp)));
}
TEST(Demangle, Discriminators) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZZ1fvEN1S1gEv", tmp, sizeof(tmp)));
EXPECT_STREQ("f()::S::g()", tmp);
EXPECT_TRUE(Demangle("_ZZ1fvEN1S1gE_0v", tmp, sizeof(tmp)));
EXPECT_STREQ("f()::S::g()", tmp);
EXPECT_TRUE(Demangle("_ZZ1fvEN1S1gE_9v", tmp, sizeof(tmp)));
EXPECT_STREQ("f()::S::g()", tmp);
EXPECT_TRUE(Demangle("_ZZ1fvEN1S1gE__10_v", tmp, sizeof(tmp)));
EXPECT_STREQ("f()::S::g()", tmp);
}
TEST(Demangle, SingleDigitDiscriminatorFollowedByADigit) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZZ1fvEN1S1gE_911return_type", tmp, sizeof(tmp)));
EXPECT_STREQ("f()::S::g()", tmp);
}
TEST(Demangle, LiteralOfGlobalNamespaceEnumType) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIL1E42EEvv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, NullptrLiterals) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fILDnEEvv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
EXPECT_TRUE(Demangle("_Z1fILDn0EEvv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, StringLiterals) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fILA42_KcEEvv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, ComplexFloatingPointLiterals) {
char tmp[80];
EXPECT_TRUE(Demangle(
"_Z1fIiEvRAszpltlCdstT_ELS0_0000000000000000_4010000000000000E_c",
tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, Float128) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZNK1ScvDF128_Ev", tmp, sizeof(tmp)));
EXPECT_STREQ("S::operator _Float128()", tmp);
}
TEST(Demangle, Float128x) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZNK1ScvDF128xEv", tmp, sizeof(tmp)));
EXPECT_STREQ("S::operator _Float128x()", tmp);
}
TEST(Demangle, Bfloat16) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZNK1ScvDF16bEv", tmp, sizeof(tmp)));
EXPECT_STREQ("S::operator std::bfloat16_t()", tmp);
}
TEST(Demangle, SimpleSignedBitInt) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZNK1ScvDB256_Ev", tmp, sizeof(tmp)));
EXPECT_STREQ("S::operator _BitInt(256)()", tmp);
}
TEST(Demangle, SimpleUnsignedBitInt) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZNK1ScvDU256_Ev", tmp, sizeof(tmp)));
EXPECT_STREQ("S::operator unsigned _BitInt(256)()", tmp);
}
TEST(Demangle, DependentBitInt) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZNK1ScvDBT__ILi256EEEv", tmp, sizeof(tmp)));
EXPECT_STREQ("S::operator _BitInt(?)<>()", tmp);
}
TEST(Demangle, ConversionToPointerType) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZNK1ScvPiEv", tmp, sizeof(tmp)));
EXPECT_STREQ("S::operator int*()", tmp);
}
TEST(Demangle, ConversionToLvalueReferenceType) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZNK1ScvRiEv", tmp, sizeof(tmp)));
EXPECT_STREQ("S::operator int&()", tmp);
}
TEST(Demangle, ConversionToRvalueReferenceType) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZNK1ScvOiEv", tmp, sizeof(tmp)));
EXPECT_STREQ("S::operator int&&()", tmp);
}
TEST(Demangle, ConversionToComplexFloatingPointType) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZNK1ScvCfEv", tmp, sizeof(tmp)));
EXPECT_STREQ("S::operator float _Complex()", tmp);
}
TEST(Demangle, ConversionToImaginaryFloatingPointType) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZNK1ScvGfEv", tmp, sizeof(tmp)));
EXPECT_STREQ("S::operator float _Imaginary()", tmp);
}
TEST(Demangle, ConversionToPointerToCvQualifiedType) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZNK1ScvPrVKiEv", tmp, sizeof(tmp)));
EXPECT_STREQ("S::operator int const volatile restrict*()", tmp);
}
TEST(Demangle, ConversionToLayeredPointerType) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZNK1ScvPKPKiEv", tmp, sizeof(tmp)));
EXPECT_STREQ("S::operator int const* const*()", tmp);
}
TEST(Demangle, ConversionToTypeWithExtendedQualifier) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZNK1ScvPU5AS128KiEv", tmp, sizeof(tmp)));
EXPECT_STREQ("S::operator int*()", tmp);
}
TEST(Demangle, GlobalInitializers) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZGR1v", tmp, sizeof(tmp)));
EXPECT_STREQ("reference temporary for v", tmp);
EXPECT_TRUE(Demangle("_ZGR1v_", tmp, sizeof(tmp)));
EXPECT_STREQ("reference temporary for v", tmp);
EXPECT_TRUE(Demangle("_ZGR1v0_", tmp, sizeof(tmp)));
EXPECT_STREQ("reference temporary for v", tmp);
EXPECT_TRUE(Demangle("_ZGR1v1Z_", tmp, sizeof(tmp)));
EXPECT_STREQ("reference temporary for v", tmp);
}
TEST(Demangle, StructuredBindings) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZDC1x1yE", tmp, sizeof(tmp)));
EXPECT_TRUE(Demangle("_ZGRDC1x1yE_", tmp, sizeof(tmp)));
}
TEST(Demangle, AbiTags) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1aB3abc", tmp, sizeof(tmp)));
EXPECT_STREQ("a[abi:abc]", tmp);
EXPECT_TRUE(Demangle("_ZN1BC2B3xyzEv", tmp, sizeof(tmp)));
EXPECT_STREQ("B::B[abi:xyz]()", tmp);
EXPECT_TRUE(Demangle("_Z1CB3barB3foov", tmp, sizeof(tmp)));
EXPECT_STREQ("C[abi:bar][abi:foo]()", tmp);
}
TEST(Demangle, SimpleGnuVectorSize) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fDv8_i", tmp, sizeof(tmp)));
EXPECT_STREQ("f()", tmp);
}
TEST(Demangle, GnuVectorSizeIsATemplateParameter) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fILi32EEvDvT__i", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, GnuVectorSizeIsADependentOperatorExpression) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fILi32EEvDvmlLi2ET__i", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, SimpleAddressSpace) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fPU5AS128Ki", tmp, sizeof(tmp)));
EXPECT_STREQ("f()", tmp);
}
TEST(Demangle, DependentAddressSpace) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fILi128EEvPU2ASIT_Ei", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, TransactionSafeEntryPoint) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZGTt1fv", tmp, sizeof(tmp)));
EXPECT_STREQ("transaction clone for f()", tmp);
}
TEST(Demangle, TransactionSafeFunctionType) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fPDxFvvE", tmp, sizeof(tmp)));
EXPECT_STREQ("f()", tmp);
}
TEST(Demangle, TemplateParameterObject) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIXtl1SLi1ELi2EEEXadL_ZTAXtlS0_Li1ELi2EEEEEEvv",
tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
EXPECT_TRUE(Demangle("_ZTAXtl1SLi1ELi2EEE", tmp, sizeof(tmp)));
EXPECT_STREQ("template parameter object", tmp);
}
TEST(Demangle, EnableIfAttributeOnGlobalFunction) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fUa9enable_ifIXgefL0p_Li0EEEl", tmp, sizeof(tmp)));
EXPECT_STREQ("f()", tmp);
}
TEST(Demangle, EnableIfAttributeOnNamespaceScopeFunction) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZN2ns1fEUa9enable_ifIXgefL0p_Li0EEEl",
tmp, sizeof(tmp)));
EXPECT_STREQ("ns::f()", tmp);
}
TEST(Demangle, EnableIfAttributeOnFunctionTemplate) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEUa9enable_ifIXgefL0p_tliEEET_S0_",
tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, ThisPointerInDependentSignature) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZN1S1fIiEEDTcl1gIT_EfpTEEv", tmp, sizeof(tmp)));
EXPECT_STREQ("S::f<>()", tmp);
}
TEST(Demangle, DependentMemberOperatorCall) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fI1CEDTcldtfp_onclEET_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, TypeNestedUnderDecltype) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiENDTtl1SIT_EEE1tEv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, ElaboratedTypes) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEvTsN1SIT_E1CE", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
EXPECT_TRUE(Demangle("_Z1fIiEvTuN1SIT_E1CE", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
EXPECT_TRUE(Demangle("_Z1fIiEvTeN1SIT_E1CE", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, SubobjectAddresses) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIXsoKcL_Z1aE123EEEvv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
EXPECT_TRUE(Demangle("_Z1fIXadsoKcL_Z1aEEEEvv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
EXPECT_TRUE(Demangle("_Z1fIXadsoKcL_Z1aE123EEEvv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
EXPECT_TRUE(Demangle("_Z1fIXadsoKcL_Z1aE123pEEEvv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
EXPECT_TRUE(Demangle("_Z1fIXadsoKcL_Z1aE__1_234EEEvv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
EXPECT_TRUE(Demangle("_Z1fIXadsoKcL_Z1aE123_456pEEEvv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, Preincrement) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_pp_fp_EES0_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, Postincrement) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_ppfp_EES0_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, Predecrement) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_mm_fp_EES0_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, Postdecrement) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_mmfp_EES0_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, UnaryFoldExpressions) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIJilEE1SIXfrooeqstT_Li4EEEDpS1_",
tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
EXPECT_TRUE(Demangle("_Z1fIJilEE1SIXflooeqstT_Li4EEEDpS1_",
tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, BinaryFoldExpressions) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIJilEE1SIXfRooeqstT_Li4ELb0EEEDpS1_",
tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
EXPECT_TRUE(Demangle("_Z1fIJilEE1SIXfLooLb0EeqstT_Li4EEEDpS1_",
tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, SizeofPacks) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIJilEE1SIXsZT_EEDpT_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
EXPECT_TRUE(Demangle("_Z1gIJilEE1SIXsZfp_EEDpT_", tmp, sizeof(tmp)));
EXPECT_STREQ("g<>()", tmp);
}
TEST(Demangle, SizeofPackInvolvingAnAliasTemplate) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIJiEEvRAsPDpT_iE_Kc", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, UserDefinedLiteral) {
char tmp[80];
EXPECT_TRUE(Demangle("_Zli4_lity", tmp, sizeof(tmp)));
EXPECT_STREQ("operator\"\" _lit()", tmp);
}
TEST(Demangle, Spaceship) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZNK1SssERKS_", tmp, sizeof(tmp)));
EXPECT_STREQ("S::operator<=>()", tmp);
EXPECT_TRUE(Demangle("_Z1gI1SEDTssfp_fp0_ET_S2_", tmp, sizeof(tmp)));
EXPECT_STREQ("g<>()", tmp);
}
TEST(Demangle, CoAwait) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZNK2ns9AwaitableawEv", tmp, sizeof(tmp)));
EXPECT_STREQ("ns::Awaitable::operator co_await()", tmp);
}
TEST(Demangle, VendorExtendedExpressions) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIXu3__eEEEvv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
EXPECT_TRUE(Demangle("_Z1fIXu3__eilEEEvv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, DirectListInitialization) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_EEv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
EXPECT_TRUE(Demangle("_Z1gI3XYZEDTtlT_Li1ELi2ELi3EEEv", tmp, sizeof(tmp)));
EXPECT_STREQ("g<>()", tmp);
EXPECT_TRUE(Demangle("_Z1hI3XYZEDTtlT_di1xLi1Edi1yLi2Edi1zLi3EEEv",
tmp, sizeof(tmp)));
EXPECT_STREQ("h<>()", tmp);
EXPECT_TRUE(Demangle("_Z1iI1AEDTtlT_di1adxLi2ELi42EEEv", tmp, sizeof(tmp)));
EXPECT_STREQ("i<>()", tmp);
EXPECT_TRUE(Demangle("_Z1jI1AEDTtlT_di1adXLi1ELi3ELi42EEEv",
tmp, sizeof(tmp)));
EXPECT_STREQ("j<>()", tmp);
}
TEST(Demangle, SimpleInitializerLists) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTcl1gIT_EilEEEv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
EXPECT_TRUE(Demangle("_Z1fIiEDTcl1gilfp_EEET_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
EXPECT_TRUE(Demangle("_Z1fIiEDTcl1gilfp_fp0_EEET_S1_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, BracedListImplicitlyConstructingAClassObject) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTcl1gildi1vfp_EEET_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, SimpleNewExpression) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_denw_S0_EEEv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, NewExpressionWithEmptyParentheses) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_denw_S0_piEEEv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, NewExpressionWithNonemptyParentheses) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_denw_S0_piLi42EEEEv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, PlacementNewExpression) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_denwadfp__S0_piLi42EEEES0_",
tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, GlobalScopeNewExpression) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_degsnw_S0_EEEv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, NewExpressionWithEmptyBraces) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_denw_S0_ilEEEv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, NewExpressionWithNonemptyBraces) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_denw_S0_ilLi42EEEEv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, SimpleArrayNewExpression) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_dena_S0_EEEv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, ArrayNewExpressionWithEmptyParentheses) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_dena_S0_piEEEv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, ArrayPlacementNewExpression) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_denaadfp__S0_EEES0_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, GlobalScopeArrayNewExpression) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_degsna_S0_EEEv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, ArrayNewExpressionWithTwoElementsInBraces) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTtlT_dena_S0_ilLi1ELi2EEEEv",
tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, SimpleDeleteExpression) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTdlfp_EPT_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, GlobalScopeDeleteExpression) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTgsdlfp_EPT_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, SimpleArrayDeleteExpression) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTdafp_EPT_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, GlobalScopeArrayDeleteExpression) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTgsdafp_EPT_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, ReferenceQualifiedFunctionTypes) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fPKFvvREi", tmp, sizeof(tmp)));
EXPECT_STREQ("f()", tmp);
EXPECT_TRUE(Demangle("_Z1fPFvvOEi", tmp, sizeof(tmp)));
EXPECT_STREQ("f()", tmp);
EXPECT_TRUE(Demangle("_Z1fPFvRiREi", tmp, sizeof(tmp)));
EXPECT_STREQ("f()", tmp);
EXPECT_TRUE(Demangle("_Z1fPFvO1SOEi", tmp, sizeof(tmp)));
EXPECT_STREQ("f()", tmp);
}
TEST(Demangle, DynamicCast) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fI1SEDTdcPKT_fp_EPS1_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, StaticCast) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTscPKT_fp_EPS0_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, ConstCast) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTccPKT_fp_EPS0_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, ReinterpretCast) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTrcPKT_fp_EPS0_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, TypeidType) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTcldttiT_4nameEES0_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, TypeidExpression) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEDTcldttetlT_E4nameEES0_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, AlignofType) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiET_RAatS0__S0_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, AlignofExpression) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiET_RAaztlS0_E_S0_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, NoexceptExpression) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIiEvRAnxtlT_E_S0_", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, UnaryThrow) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fILb0EEDTquT_twT_Li0EEv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, NullaryThrow) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fILb0EEDTquT_trLi0EEv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
}
TEST(Demangle, ThreadLocalWrappers) {
char tmp[80];
EXPECT_TRUE(Demangle("_ZTWN2ns3varE", tmp, sizeof(tmp)));
EXPECT_STREQ("thread-local wrapper routine for ns::var", tmp);
EXPECT_TRUE(Demangle("_ZTHN2ns3varE", tmp, sizeof(tmp)));
EXPECT_STREQ("thread-local initialization routine for ns::var", tmp);
}
TEST(Demangle, DubiousSrStSymbols) {
char tmp[80];
EXPECT_TRUE(Demangle("_Z1fIcE1SIXsrSt1uIT_E1vEEv", tmp, sizeof(tmp)));
EXPECT_STREQ("f<>()", tmp);
EXPECT_TRUE(Demangle(
"_ZSteqIcEN9__gnu_cxx11__enable_if"
"IXsrSt9__is_charIT_E7__valueEbE"
"6__typeE"
"RKNSt7__cxx1112basic_stringIS3_St11char_traitsIS3_ESaIS3_EEESE_",
tmp, sizeof(tmp)));
EXPECT_STREQ("std::operator==<>()", tmp);
}
TEST(Demangle, DelegatesToDemangleRustSymbolEncoding) {
char tmp[80];
EXPECT_TRUE(Demangle("_RNvC8my_crate7my_func", tmp, sizeof(tmp)));
EXPECT_STREQ("my_crate::my_func", tmp);
}
#if defined(ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION) && \
!defined(ABSL_HAVE_ADDRESS_SANITIZER) && \
!defined(ABSL_HAVE_MEMORY_SANITIZER) && \
!defined(ABSL_HAVE_THREAD_SANITIZER)
static const char *g_mangled;
static char g_demangle_buffer[4096];
static char *g_demangle_result;
static void DemangleSignalHandler(int signo) {
if (Demangle(g_mangled, g_demangle_buffer, sizeof(g_demangle_buffer))) {
g_demangle_result = g_demangle_buffer;
} else {
g_demangle_result = nullptr;
}
}
static const char *DemangleStackConsumption(const char *mangled,
int *stack_consumed) {
g_mangled = mangled;
*stack_consumed = GetSignalHandlerStackConsumption(DemangleSignalHandler);
LOG(INFO) << "Stack consumption of Demangle: " << *stack_consumed;
return g_demangle_result;
}
const int kStackConsumptionUpperLimit = 8192;
static std::string NestedMangledName(int depth) {
std::string mangled_name = "_Z1a";
if (depth > 0) {
mangled_name += "IXL";
mangled_name += NestedMangledName(depth - 1);
mangled_name += "EEE";
}
return mangled_name;
}
TEST(Demangle, DemangleStackConsumption) {
int stack_consumed = 0;
const char *demangled =
DemangleStackConsumption("_Z6foobarv", &stack_consumed);
EXPECT_STREQ("foobar()", demangled);
EXPECT_GT(stack_consumed, 0);
EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
const std::string nested_mangled_name0 = NestedMangledName(0);
demangled = DemangleStackConsumption(nested_mangled_name0.c_str(),
&stack_consumed);
EXPECT_STREQ("a", demangled);
EXPECT_GT(stack_consumed, 0);
EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
const std::string nested_mangled_name1 = NestedMangledName(1);
demangled = DemangleStackConsumption(nested_mangled_name1.c_str(),
&stack_consumed);
EXPECT_STREQ("a<>", demangled);
EXPECT_GT(stack_consumed, 0);
EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
const std::string nested_mangled_name2 = NestedMangledName(2);
demangled = DemangleStackConsumption(nested_mangled_name2.c_str(),
&stack_consumed);
EXPECT_STREQ("a<>", demangled);
EXPECT_GT(stack_consumed, 0);
EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
const std::string nested_mangled_name3 = NestedMangledName(3);
demangled = DemangleStackConsumption(nested_mangled_name3.c_str(),
&stack_consumed);
EXPECT_STREQ("a<>", demangled);
EXPECT_GT(stack_consumed, 0);
EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
}
#endif
static void TestOnInput(const char* input) {
static const int kOutSize = 1048576;
auto out = absl::make_unique<char[]>(kOutSize);
Demangle(input, out.get(), kOutSize);
}
TEST(DemangleRegression, NegativeLength) {
TestOnInput("_ZZn4");
}
TEST(DemangleRegression, DeeplyNestedArrayType) {
const int depth = 100000;
std::string data = "_ZStI";
data.reserve(data.size() + 3 * depth + 1);
for (int i = 0; i < depth; i++) {
data += "A1_";
}
TestOnInput(data.c_str());
}
struct Base {
virtual ~Base() = default;
};
struct Derived : public Base {};
TEST(DemangleStringTest, SupportsSymbolNameReturnedByTypeId) {
EXPECT_EQ(DemangleString(typeid(int).name()), "int");
EXPECT_THAT(
DemangleString(typeid(Base).name()),
ContainsRegex("absl.*debugging_internal.*anonymous namespace.*::Base"));
EXPECT_THAT(DemangleString(typeid(Derived).name()),
ContainsRegex(
"absl.*debugging_internal.*anonymous namespace.*::Derived"));
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/debugging/internal/demangle.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/debugging/internal/demangle_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
1352048f-deb8-481b-a19a-318989c453e4 | cpp | google/tensorstore | bzip2_compressor | tensorstore/internal/compression/bzip2_compressor.cc | tensorstore/driver/n5/bzip2_compressor_test.cc | #include "tensorstore/internal/compression/bzip2_compressor.h"
#include <cstddef>
#include <memory>
#include <utility>
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "riegeli/bzip2/bzip2_reader.h"
#include "riegeli/bzip2/bzip2_writer.h"
namespace tensorstore {
namespace internal {
std::unique_ptr<riegeli::Writer> Bzip2Compressor::GetWriter(
std::unique_ptr<riegeli::Writer> base_writer, size_t element_bytes) const {
using Writer = riegeli::Bzip2Writer<std::unique_ptr<riegeli::Writer>>;
Writer::Options options;
options.set_compression_level(level);
return std::make_unique<Writer>(std::move(base_writer), options);
}
std::unique_ptr<riegeli::Reader> Bzip2Compressor::GetReader(
std::unique_ptr<riegeli::Reader> base_reader, size_t element_bytes) const {
using Reader = riegeli::Bzip2Reader<std::unique_ptr<riegeli::Reader>>;
return std::make_unique<Reader>(std::move(base_reader));
}
}
} | #include <cstdint>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/driver/n5/compressor.h"
#include "tensorstore/driver/n5/metadata.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_n5::Compressor;
using ::tensorstore::internal_n5::DecodeChunk;
using ::tensorstore::internal_n5::N5Metadata;
TEST(Bzip2CompressionTest, Parse) {
tensorstore::TestJsonBinderRoundTripJsonOnlyInexact<Compressor>({
{{{"type", "bzip2"}}, {{"type", "bzip2"}, {"blockSize", 9}}},
{{{"type", "bzip2"}, {"blockSize", 3}},
{{"type", "bzip2"}, {"blockSize", 3}}},
});
EXPECT_THAT(Compressor::FromJson({{"type", "bzip2"}, {"blockSize", "x"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "bzip2"}, {"blockSize", 0}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "bzip2"}, {"blockSize", 10}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "bzip2"}, {"extra", "x"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(Bzip2CompressionTest, Golden) {
const unsigned char kData[] = {
0x00, 0x00,
0x00, 0x03,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x03,
0x42, 0x5a, 0x68, 0x39,
0x31, 0x41, 0x59, 0x26,
0x53, 0x59, 0x02, 0x3e,
0x0d, 0xd2, 0x00, 0x00,
0x00, 0x40, 0x00, 0x7f,
0x00, 0x20, 0x00, 0x31,
0x0c, 0x01, 0x0d, 0x31,
0xa8, 0x73, 0x94, 0x33,
0x7c, 0x5d, 0xc9, 0x14,
0xe1, 0x42, 0x40, 0x08,
0xf8, 0x37, 0x48,
};
std::string encoded_data(std::begin(kData), std::end(kData));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto metadata,
N5Metadata::FromJson({{"dimensions", {10, 11, 12}},
{"blockSize", {1, 2, 3}},
{"dataType", "uint16"},
{"compression", {{"type", "bzip2"}}}}));
auto array = MakeArray<uint16_t>({{{1, 3, 5}, {2, 4, 6}}});
EXPECT_EQ(array, DecodeChunk(metadata, absl::Cord(encoded_data)));
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto buffer, EncodeChunk(metadata, array));
EXPECT_EQ(array, DecodeChunk(metadata, buffer));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/bzip2_compressor.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/n5/bzip2_compressor_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
416c9ceb-73a8-4e36-b70f-7ac1a8c89534 | cpp | google/tensorstore | cast | tensorstore/driver/cast/cast.cc | tensorstore/driver/cast/cast_test.cc | #include "tensorstore/driver/cast/cast.h"
#include <cassert>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/array_storage_statistics.h"
#include "tensorstore/chunk_layout.h"
#include "tensorstore/codec_spec.h"
#include "tensorstore/context.h"
#include "tensorstore/data_type.h"
#include "tensorstore/data_type_conversion.h"
#include "tensorstore/driver/chunk.h"
#include "tensorstore/driver/driver.h"
#include "tensorstore/driver/driver_handle.h"
#include "tensorstore/driver/driver_spec.h"
#include "tensorstore/driver/registry.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_units.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/lock_collection.h"
#include "tensorstore/internal/nditerable_data_type_conversion.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/open_options.h"
#include "tensorstore/rank.h"
#include "tensorstore/schema.h"
#include "tensorstore/spec.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_cast_driver {
namespace {
using ::tensorstore::internal::Arena;
using ::tensorstore::internal::DataTypeConversionLookupResult;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::NDIterable;
using ::tensorstore::internal::OpenTransactionPtr;
using ::tensorstore::internal::ReadChunk;
using ::tensorstore::internal::TransformedDriverSpec;
using ::tensorstore::internal::WriteChunk;
namespace jb = tensorstore::internal_json_binding;
class CastDriverSpec
: public internal::RegisteredDriverSpec<CastDriverSpec,
internal::DriverSpec> {
public:
constexpr static const char id[] = "cast";
TransformedDriverSpec base;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(internal::BaseCast<internal::DriverSpec>(x), x.base);
};
OpenMode open_mode() const override { return base.driver_spec->open_mode(); }
absl::Status ApplyOptions(SpecOptions&& options) override {
TENSORSTORE_RETURN_IF_ERROR(schema.Set(options.dtype()));
options.Override(DataType()).IgnoreError();
return internal::TransformAndApplyOptions(base, std::move(options));
}
constexpr static auto default_json_binder = jb::Object(
jb::Member("base",
[](auto is_loading, const auto& options, auto* obj, auto* j) {
return jb::Projection<&CastDriverSpec::base>()(
is_loading,
JsonSerializationOptions(options, DataType(),
obj->schema.rank()),
obj, j);
}),
jb::Initialize([](auto* obj) -> absl::Status {
if (obj->base.transform.valid()) {
TENSORSTORE_RETURN_IF_ERROR(obj->schema.Set(
RankConstraint{obj->base.transform.input_rank()}));
}
DataType dtype = obj->schema.dtype();
DimensionIndex rank = obj->schema.rank();
SpecOptions base_options;
static_cast<Schema&>(base_options) = std::exchange(obj->schema, {});
obj->schema.Set(dtype).IgnoreError();
obj->schema.Set(RankConstraint{rank}).IgnoreError();
return obj->ApplyOptions(std::move(base_options));
}));
Result<IndexDomain<>> GetDomain() const override {
return internal::GetEffectiveDomain(base);
}
Result<ChunkLayout> GetChunkLayout() const override {
return internal::GetEffectiveChunkLayout(base);
}
Result<CodecSpec> GetCodec() const override {
return internal::GetEffectiveCodec(base);
}
Result<SharedArray<const void>> GetFillValue(
IndexTransformView<> transform) const override {
TENSORSTORE_ASSIGN_OR_RETURN(
auto adjusted_transform,
tensorstore::ComposeOptionalTransforms(base.transform, transform));
TENSORSTORE_ASSIGN_OR_RETURN(
auto fill_value, base.driver_spec->GetFillValue(adjusted_transform));
if (!fill_value.valid()) return {std::in_place};
auto dtype = schema.dtype();
if (dtype == fill_value.dtype()) return fill_value;
auto converter = internal::GetDataTypeConverter(fill_value.dtype(), dtype);
if (!(converter.flags & DataTypeConversionFlags::kSupported)) {
return {std::in_place};
}
return MakeCopy(fill_value, skip_repeated_elements, dtype);
}
Result<DimensionUnitsVector> GetDimensionUnits() const override {
return internal::GetEffectiveDimensionUnits(base);
}
kvstore::Spec GetKvstore() const override {
return base.driver_spec->GetKvstore();
}
Result<TransformedDriverSpec> GetBase(
IndexTransformView<> transform) const override {
TransformedDriverSpec new_base;
TENSORSTORE_ASSIGN_OR_RETURN(
new_base.transform,
ComposeOptionalTransforms(base.transform, transform));
new_base.driver_spec = base.driver_spec;
return new_base;
}
Future<internal::Driver::Handle> Open(
internal::DriverOpenRequest request) const override {
DataType target_dtype = schema.dtype();
if (!target_dtype.valid()) {
return absl::InvalidArgumentError("dtype must be specified");
}
auto read_write_mode = request.read_write_mode;
return MapFutureValue(
InlineExecutor{},
[target_dtype, read_write_mode](internal::Driver::Handle handle)
-> Result<internal::Driver::Handle> {
return MakeCastDriver(std::move(handle), target_dtype,
read_write_mode);
},
internal::OpenDriver(base, std::move(request)));
}
};
class CastDriver
: public internal::RegisteredDriver<CastDriver,
internal::Driver> {
public:
Result<TransformedDriverSpec> GetBoundSpec(
internal::OpenTransactionPtr transaction,
IndexTransformView<> transform) override {
auto driver_spec = internal::DriverSpec::Make<CastDriverSpec>();
driver_spec->context_binding_state_ = ContextBindingState::bound;
TENSORSTORE_ASSIGN_OR_RETURN(
driver_spec->base,
base_driver_->GetBoundSpec(std::move(transaction), transform));
driver_spec->schema.Set(target_dtype_).IgnoreError();
const DimensionIndex base_rank = base_driver_->rank();
driver_spec->schema.Set(RankConstraint{base_rank}).IgnoreError();
TransformedDriverSpec spec;
spec.transform = std::exchange(driver_spec->base.transform, {});
spec.driver_spec = std::move(driver_spec);
return spec;
}
Result<ChunkLayout> GetChunkLayout(IndexTransformView<> transform) override {
return base_driver_->GetChunkLayout(transform);
}
Result<CodecSpec> GetCodec() override { return base_driver_->GetCodec(); }
Result<SharedArray<const void>> GetFillValue(
IndexTransformView<> transform) override {
if (!(input_conversion_.flags & DataTypeConversionFlags::kSupported)) {
return {std::in_place};
}
TENSORSTORE_ASSIGN_OR_RETURN(auto base_fill_value,
base_driver_->GetFillValue(transform));
if (!base_fill_value.valid()) return {std::in_place};
if (base_fill_value.dtype() == target_dtype_) {
return base_fill_value;
}
return tensorstore::MakeCopy(base_fill_value, skip_repeated_elements,
target_dtype_);
}
Result<DimensionUnitsVector> GetDimensionUnits() override {
return base_driver_->GetDimensionUnits();
}
KvStore GetKvstore(const Transaction& transaction) override {
return base_driver_->GetKvstore(transaction);
}
Result<internal::DriverHandle> GetBase(
ReadWriteMode read_write_mode, IndexTransformView<> transform,
const Transaction& transaction) override {
internal::DriverHandle base_handle;
base_handle.driver = base_driver_;
base_handle.driver.set_read_write_mode(read_write_mode);
base_handle.transform = transform;
base_handle.transaction = transaction;
return base_handle;
}
Future<ArrayStorageStatistics> GetStorageStatistics(
GetStorageStatisticsRequest request) override {
return base_driver_->GetStorageStatistics(std::move(request));
}
explicit CastDriver(internal::DriverPtr base, DataType target_dtype,
DataTypeConversionLookupResult input_conversion,
DataTypeConversionLookupResult output_conversion)
: base_driver_(std::move(base)),
target_dtype_(target_dtype),
input_conversion_(input_conversion),
output_conversion_(output_conversion) {}
DataType dtype() override { return target_dtype_; }
DimensionIndex rank() override { return base_driver_->rank(); }
Executor data_copy_executor() override {
return base_driver_->data_copy_executor();
}
void Read(ReadRequest request, ReadChunkReceiver receiver) override;
void Write(WriteRequest request, WriteChunkReceiver receiver) override;
Future<IndexTransform<>> ResolveBounds(
ResolveBoundsRequest request) override {
return base_driver_->ResolveBounds(std::move(request));
}
Future<IndexTransform<>> Resize(ResizeRequest request) override {
return base_driver_->Resize(std::move(request));
}
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.base_driver_, x.target_dtype_, x.input_conversion_,
x.output_conversion_);
};
internal::DriverPtr base_driver_;
DataType target_dtype_;
DataTypeConversionLookupResult input_conversion_;
DataTypeConversionLookupResult output_conversion_;
};
struct ReadChunkImpl {
IntrusivePtr<CastDriver> self;
ReadChunk::Impl base;
absl::Status operator()(internal::LockCollection& lock_collection) {
return base(lock_collection);
}
Result<NDIterable::Ptr> operator()(ReadChunk::BeginRead,
IndexTransform<> chunk_transform,
Arena* arena) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto iterable,
base(ReadChunk::BeginRead{}, std::move(chunk_transform), arena));
return GetConvertedInputNDIterable(std::move(iterable), self->target_dtype_,
self->input_conversion_);
}
};
struct WriteChunkImpl {
IntrusivePtr<CastDriver> self;
WriteChunk::Impl base;
absl::Status operator()(internal::LockCollection& lock_collection) {
return base(lock_collection);
}
Result<NDIterable::Ptr> operator()(WriteChunk::BeginWrite,
IndexTransform<> chunk_transform,
Arena* arena) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto iterable,
base(WriteChunk::BeginWrite{}, std::move(chunk_transform), arena));
return GetConvertedOutputNDIterable(
std::move(iterable), self->target_dtype_, self->output_conversion_);
}
WriteChunk::EndWriteResult operator()(WriteChunk::EndWrite,
IndexTransformView<> chunk_transform,
bool success, Arena* arena) {
return base(WriteChunk::EndWrite{}, chunk_transform, success, arena);
}
bool operator()(WriteChunk::WriteArray, IndexTransformView<> chunk_transform,
WriteChunk::GetWriteSourceArrayFunction get_source_array,
Arena* arena, WriteChunk::EndWriteResult& end_write_result) {
if (!(self->output_conversion_.flags &
DataTypeConversionFlags::kCanReinterpretCast)) {
return false;
}
return base(WriteChunk::WriteArray{}, chunk_transform, get_source_array,
arena, end_write_result);
}
};
template <typename Chunk, typename ChunkImpl>
struct ChunkReceiverAdapter {
IntrusivePtr<CastDriver> self;
AnyFlowReceiver<absl::Status, Chunk, IndexTransform<>> base;
template <typename CancelReceiver>
void set_starting(CancelReceiver receiver) {
tensorstore::execution::set_starting(base, std::move(receiver));
}
void set_value(Chunk chunk, IndexTransform<> transform) {
tensorstore::execution::set_value(
base,
Chunk{ChunkImpl{self, std::move(chunk.impl)},
std::move(chunk.transform)},
std::move(transform));
}
void set_done() { tensorstore::execution::set_done(base); }
void set_error(absl::Status status) {
tensorstore::execution::set_error(base, std::move(status));
}
void set_stopping() { tensorstore::execution::set_stopping(base); }
};
void CastDriver::Read(ReadRequest request, ReadChunkReceiver receiver) {
base_driver_->Read(std::move(request),
ChunkReceiverAdapter<ReadChunk, ReadChunkImpl>{
IntrusivePtr<CastDriver>(this), std::move(receiver)});
}
void CastDriver::Write(WriteRequest request, WriteChunkReceiver receiver) {
base_driver_->Write(std::move(request),
ChunkReceiverAdapter<WriteChunk, WriteChunkImpl>{
IntrusivePtr<CastDriver>(this), std::move(receiver)});
}
const internal::DriverRegistration<CastDriverSpec> driver_registration;
}
}
namespace internal {
Result<CastDataTypeConversions> GetCastDataTypeConversions(
DataType source_dtype, DataType target_dtype, ReadWriteMode existing_mode,
ReadWriteMode required_mode) {
assert((existing_mode & required_mode) == required_mode);
CastDataTypeConversions result = {};
if (required_mode == ReadWriteMode::dynamic &&
existing_mode != ReadWriteMode::read_write) {
required_mode = existing_mode;
}
const ReadWriteMode requested_mode =
required_mode == ReadWriteMode::dynamic ? existing_mode : required_mode;
result.mode = requested_mode;
if ((requested_mode & ReadWriteMode::read) == ReadWriteMode::read) {
result.input = GetDataTypeConverter(source_dtype, target_dtype);
if (!(result.input.flags & DataTypeConversionFlags::kSupported)) {
if ((required_mode & ReadWriteMode::read) == ReadWriteMode::read) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Read access requires unsupported ", source_dtype, " -> ",
target_dtype, " conversion"));
}
result.mode &= ~ReadWriteMode::read;
}
}
if ((requested_mode & ReadWriteMode::write) == ReadWriteMode::write) {
result.output = GetDataTypeConverter(target_dtype, source_dtype);
if (!(result.output.flags & DataTypeConversionFlags::kSupported)) {
if ((required_mode & ReadWriteMode::write) == ReadWriteMode::write) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Write access requires unsupported ", target_dtype, " -> ",
source_dtype, " conversion"));
}
result.mode &= ~ReadWriteMode::write;
}
}
if (result.mode == ReadWriteMode{}) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot convert ", source_dtype, " <-> ", target_dtype));
}
return result;
}
Result<Driver::Handle> MakeCastDriver(Driver::Handle base,
DataType target_dtype,
ReadWriteMode read_write_mode) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto conversions, GetCastDataTypeConversions(
base.driver->dtype(), target_dtype,
base.driver.read_write_mode(), read_write_mode));
base.driver = internal::MakeReadWritePtr<internal_cast_driver::CastDriver>(
conversions.mode, std::move(base.driver), target_dtype, conversions.input,
conversions.output);
return base;
}
Result<TransformedDriverSpec> MakeCastDriverSpec(TransformedDriverSpec base,
DataType target_dtype) {
if (!base.driver_spec) return {std::in_place};
DataType source_dtype = base.driver_spec->schema.dtype();
if (source_dtype.valid()) {
TENSORSTORE_RETURN_IF_ERROR(GetCastDataTypeConversions(
source_dtype, target_dtype, ReadWriteMode::read_write,
ReadWriteMode::dynamic));
}
auto driver_spec =
internal::DriverSpec::Make<internal_cast_driver::CastDriverSpec>();
driver_spec->schema
.Set(base.transform.valid() ? RankConstraint{base.transform.output_rank()}
: base.driver_spec->schema.rank())
.IgnoreError();
driver_spec->schema.Set(target_dtype).IgnoreError();
driver_spec->context_binding_state_ = base.context_binding_state();
driver_spec->base.driver_spec = std::move(base.driver_spec);
base.driver_spec = std::move(driver_spec);
return base;
}
}
Result<Spec> Cast(const Spec& base_spec, DataType target_dtype) {
Spec spec;
auto& base_impl = internal_spec::SpecAccess::impl(base_spec);
auto& impl = internal_spec::SpecAccess::impl(spec);
TENSORSTORE_ASSIGN_OR_RETURN(
impl, internal::MakeCastDriverSpec(base_impl, target_dtype));
return spec;
}
} | #include "tensorstore/cast.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/chunk_layout.h"
#include "tensorstore/codec_spec.h"
#include "tensorstore/context.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/cast/cast.h"
#include "tensorstore/driver/driver_testutil.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/json.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/open.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/schema.h"
#include "tensorstore/spec.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/tensorstore.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Cast;
using ::tensorstore::ChunkLayout;
using ::tensorstore::DataTypeConversionFlags;
using ::tensorstore::DimensionIndex;
using ::tensorstore::dtype_v;
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::ReadWriteMode;
using ::tensorstore::Result;
using ::tensorstore::zero_origin;
using ::tensorstore::dtypes::string_t;
using ::tensorstore::internal::CastDataTypeConversions;
using ::tensorstore::internal::GetCastDataTypeConversions;
using ::tensorstore::internal::GetCastMode;
using ::tensorstore::internal::TestSpecSchema;
using ::tensorstore::internal::TestTensorStoreCreateCheckSchema;
#ifndef _MSC_VER
template <class T>
constexpr void test_helper(T&& t) {}
#define TENSORSTORE_IS_CONSTEXPR(...) noexcept(test_helper(__VA_ARGS__))
static_assert(!TENSORSTORE_IS_CONSTEXPR(
GetCastMode<std::byte, std::string>(ReadWriteMode::dynamic)));
static_assert(!TENSORSTORE_IS_CONSTEXPR(
GetCastMode<std::byte, std::string>(ReadWriteMode::read)));
static_assert(!TENSORSTORE_IS_CONSTEXPR(
GetCastMode<std::byte, std::string>(ReadWriteMode::write)));
static_assert(!TENSORSTORE_IS_CONSTEXPR(
GetCastMode<std::byte, std::string>(ReadWriteMode::read_write)));
#endif
static_assert(GetCastMode<std::int32_t, float>(ReadWriteMode::dynamic) ==
ReadWriteMode::dynamic);
static_assert(GetCastMode<std::int32_t, float>(ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(GetCastMode<std::int32_t, float>(ReadWriteMode::write) ==
ReadWriteMode::write);
static_assert(GetCastMode<std::int32_t, float>(ReadWriteMode::read_write) ==
ReadWriteMode::read_write);
static_assert(GetCastMode<std::int32_t, std::string>(ReadWriteMode::dynamic) ==
ReadWriteMode::read);
static_assert(GetCastMode<std::int32_t, std::string>(ReadWriteMode::read) ==
ReadWriteMode::read);
#ifndef _MSC_VER
static_assert(!TENSORSTORE_IS_CONSTEXPR(
GetCastMode<std::int32_t, std::string>(ReadWriteMode::write)));
#endif
static_assert(GetCastMode<std::int32_t, std::string>(
ReadWriteMode::read_write) == ReadWriteMode::read);
static_assert(GetCastMode<std::string, std::int32_t>(ReadWriteMode::dynamic) ==
ReadWriteMode::write);
static_assert(GetCastMode<std::string, std::int32_t>(ReadWriteMode::write) ==
ReadWriteMode::write);
#ifndef _MSC_VER
static_assert(!TENSORSTORE_IS_CONSTEXPR(
GetCastMode<std::string, std::int32_t>(ReadWriteMode::read)));
#endif
static_assert(GetCastMode<std::string, std::int32_t>(
ReadWriteMode::read_write) == ReadWriteMode::write);
static_assert(GetCastMode<std::int32_t, std::int32_t>(
ReadWriteMode::read_write) == ReadWriteMode::read_write);
static_assert(GetCastMode<std::int32_t, std::int32_t>(ReadWriteMode::dynamic) ==
ReadWriteMode::dynamic);
static_assert(GetCastMode<std::int32_t, std::int32_t>(ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(GetCastMode<std::int32_t, std::int32_t>(ReadWriteMode::write) ==
ReadWriteMode::write);
static_assert(GetCastMode<std::int32_t, void>(ReadWriteMode::write) ==
ReadWriteMode::write);
static_assert(GetCastMode<std::int32_t, void>(ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(GetCastMode<std::int32_t, void>(ReadWriteMode::dynamic) ==
ReadWriteMode::dynamic);
static_assert(GetCastMode<std::int32_t, void>(ReadWriteMode::read_write) ==
ReadWriteMode::dynamic);
static_assert(GetCastMode<void, std::int32_t>(ReadWriteMode::write) ==
ReadWriteMode::write);
static_assert(GetCastMode<void, std::int32_t>(ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(GetCastMode<void, std::int32_t>(ReadWriteMode::dynamic) ==
ReadWriteMode::dynamic);
static_assert(GetCastMode<void, std::int32_t>(ReadWriteMode::read_write) ==
ReadWriteMode::dynamic);
static_assert(GetCastMode<void, void>(ReadWriteMode::write) ==
ReadWriteMode::write);
static_assert(GetCastMode<void, void>(ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(GetCastMode<void, void>(ReadWriteMode::dynamic) ==
ReadWriteMode::dynamic);
static_assert(GetCastMode<void, void>(ReadWriteMode::read_write) ==
ReadWriteMode::dynamic);
::testing::Matcher<Result<CastDataTypeConversions>>
MatchesCastDataTypeConversions(DataTypeConversionFlags input_flags,
DataTypeConversionFlags output_flags,
ReadWriteMode mode) {
return ::testing::Optional(::testing::AllOf(
::testing::ResultOf([](const auto& x) { return x.input.flags; },
input_flags),
::testing::ResultOf([](const auto& x) { return x.output.flags; },
output_flags),
::testing::Field(&CastDataTypeConversions::mode, mode)));
}
TEST(GetCastDataTypeConversions, Basic) {
constexpr static DataTypeConversionFlags kSupported =
DataTypeConversionFlags::kSupported;
constexpr static DataTypeConversionFlags kIdentity =
DataTypeConversionFlags::kIdentity;
constexpr static DataTypeConversionFlags kSafeAndImplicit =
DataTypeConversionFlags::kSafeAndImplicit;
constexpr static DataTypeConversionFlags kCanReinterpretCast =
DataTypeConversionFlags::kCanReinterpretCast;
constexpr static DataTypeConversionFlags kNone = {};
constexpr static DataTypeConversionFlags kAll =
kSupported | kIdentity | kCanReinterpretCast | kSafeAndImplicit;
constexpr static ReadWriteMode read = ReadWriteMode::read;
constexpr static ReadWriteMode write = ReadWriteMode::write;
constexpr static ReadWriteMode read_write = ReadWriteMode::read_write;
constexpr static ReadWriteMode dynamic = ReadWriteMode::dynamic;
constexpr auto IfMode = [](ReadWriteMode mode, ReadWriteMode condition,
DataTypeConversionFlags true_value) {
return ((mode & condition) == condition) ? true_value : kNone;
};
for (const auto existing_mode : {read, write, read_write}) {
for (const auto required_mode : {existing_mode, dynamic}) {
EXPECT_THAT(GetCastDataTypeConversions(dtype_v<std::int32_t>,
dtype_v<std::int32_t>,
existing_mode, required_mode),
MatchesCastDataTypeConversions(
IfMode(existing_mode, read, kAll),
IfMode(existing_mode, write, kAll),
existing_mode));
}
}
for (const auto existing_mode : {read, write, read_write}) {
for (const auto required_mode : {existing_mode, dynamic}) {
EXPECT_THAT(
GetCastDataTypeConversions(dtype_v<std::int32_t>, dtype_v<float>,
existing_mode, required_mode),
MatchesCastDataTypeConversions(
IfMode(existing_mode, read, kSupported),
IfMode(existing_mode, write, kSupported),
existing_mode));
}
}
for (const auto existing_mode : {read, write, read_write}) {
for (const auto required_mode : {existing_mode, dynamic}) {
EXPECT_THAT(
GetCastDataTypeConversions(dtype_v<std::int16_t>, dtype_v<float>,
existing_mode, required_mode),
MatchesCastDataTypeConversions(
IfMode(existing_mode, read,
kSupported | kSafeAndImplicit),
IfMode(existing_mode, write, kSupported),
existing_mode));
}
}
for (const auto existing_mode : {read, read_write}) {
for (const auto required_mode : {read, dynamic}) {
EXPECT_THAT(GetCastDataTypeConversions(dtype_v<std::int32_t>,
dtype_v<std::string>,
existing_mode, required_mode),
MatchesCastDataTypeConversions(
kSupported,
kNone,
read));
}
}
for (const auto required_mode : {write, read_write}) {
EXPECT_THAT(
GetCastDataTypeConversions(dtype_v<std::int32_t>, dtype_v<std::string>,
read_write, required_mode),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
for (const auto required_mode : {write, dynamic}) {
EXPECT_THAT(
GetCastDataTypeConversions(dtype_v<std::int32_t>, dtype_v<std::string>,
write, required_mode),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
for (const auto existing_mode : {write, read_write}) {
for (const auto required_mode : {write, dynamic}) {
EXPECT_THAT(GetCastDataTypeConversions(dtype_v<std::string>,
dtype_v<std::int32_t>,
existing_mode, required_mode),
MatchesCastDataTypeConversions(
kNone,
kSupported,
write));
}
}
for (const auto required_mode : {read, dynamic}) {
EXPECT_THAT(
GetCastDataTypeConversions(dtype_v<std::string>, dtype_v<std::int32_t>,
read, required_mode),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
for (const auto required_mode : {read, read_write}) {
EXPECT_THAT(
GetCastDataTypeConversions(dtype_v<std::string>, dtype_v<std::int32_t>,
read_write, required_mode),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
for (const auto existing_mode : {read, write, read_write}) {
for (const auto required_mode : {read, write, read_write, dynamic}) {
if ((existing_mode & required_mode) != required_mode) continue;
EXPECT_THAT(GetCastDataTypeConversions(
dtype_v<std::byte>, dtype_v<std::string>, existing_mode,
required_mode & existing_mode),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
}
}
TEST(CastTest, Int32ToStringDynamic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::Open(
{{"driver", "array"}, {"array", {1, 2, 3}}, {"dtype", "int32"}})
.result());
EXPECT_EQ(store.read_write_mode(), ReadWriteMode::read_write);
ASSERT_EQ(tensorstore::Box<1>({3}), store.domain().box());
auto cast_store = Cast(store, dtype_v<std::string>).value();
EXPECT_EQ(cast_store.read_write_mode(), ReadWriteMode::read);
EXPECT_EQ(tensorstore::Read<zero_origin>(cast_store).result(),
MakeArray<std::string>({"1", "2", "3"}));
EXPECT_THAT(
cast_store.spec().value().ToJson({tensorstore::IncludeDefaults{false}}),
::testing::Optional(tensorstore::MatchesJson(
{{"driver", "cast"},
{"dtype", "string"},
{"transform",
::nlohmann::json(tensorstore::IdentityTransform<1>({3}))},
{"base",
{
{"driver", "array"},
{"array", {1, 2, 3}},
{"dtype", "int32"},
}}})));
}
TEST(CastTest, StringToInt32Dynamic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::Open({{"driver", "array"},
{"array", {"a", "b", "c"}},
{"dtype", "string"}})
.result());
EXPECT_EQ(store.read_write_mode(), ReadWriteMode::read_write);
auto cast_store = Cast(store, dtype_v<std::int32_t>).value();
EXPECT_EQ(cast_store.read_write_mode(), ReadWriteMode::write);
TENSORSTORE_EXPECT_OK(
tensorstore::Write(MakeArray<std::int32_t>({1, 2, 3}), cast_store));
EXPECT_EQ(tensorstore::Read<zero_origin>(store).result(),
MakeArray<std::string>({"1", "2", "3"}));
}
TEST(CastTest, OpenInt32ToInt64) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::Open(
{{"driver", "cast"},
{"dtype", "int64"},
{"base",
{{"driver", "array"}, {"array", {1, 2, 3}}, {"dtype", "int32"}}}})
.result());
EXPECT_EQ(store.read_write_mode(), ReadWriteMode::read_write);
EXPECT_EQ(tensorstore::Read<zero_origin>(store).result(),
MakeArray<std::int64_t>({1, 2, 3}));
TENSORSTORE_EXPECT_OK(tensorstore::Write(
tensorstore::MakeScalarArray<std::int64_t>(10), store));
EXPECT_EQ(tensorstore::Read<zero_origin>(store).result(),
MakeArray<std::int64_t>({10, 10, 10}));
}
TEST(CastTest, OpenInputConversionError) {
EXPECT_THAT(
tensorstore::Open(
{{"driver", "cast"},
{"dtype", "byte"},
{"base",
{{"driver", "array"}, {"array", {1, 2, 3}}, {"dtype", "int32"}}}},
tensorstore::ReadWriteMode::read)
.result(),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Error opening \"cast\" driver: "
"Read access requires unsupported int32 -> byte conversion"));
}
TEST(CastTest, OpenOutputConversionError) {
EXPECT_THAT(
tensorstore::Open(
{{"driver", "cast"},
{"dtype", "byte"},
{"base",
{{"driver", "array"}, {"array", {1, 2, 3}}, {"dtype", "int32"}}}},
tensorstore::ReadWriteMode::write)
.result(),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Error opening \"cast\" driver: "
"Write access requires unsupported byte -> int32 conversion"));
}
TEST(CastTest, OpenAnyConversionError) {
EXPECT_THAT(
tensorstore::Open(
{{"driver", "cast"},
{"dtype", "byte"},
{"base",
{{"driver", "array"}, {"array", {1, 2, 3}}, {"dtype", "int32"}}}})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error opening \"cast\" driver: "
"Cannot convert int32 <-> byte"));
}
TEST(CastTest, OpenMissingDataType) {
EXPECT_THAT(
tensorstore::Open(
{{"driver", "cast"},
{"base",
{{"driver", "array"}, {"array", {1, 2, 3}}, {"dtype", "int32"}}}})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: dtype must be specified"));
}
TEST(CastTest, ComposeTransforms) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::Open(
{{"driver", "cast"},
{"transform",
{{"input_inclusive_min", {10}},
{"input_shape", {3}},
{"output", {{{"input_dimension", 0}, {"offset", -8}}}}}},
{"dtype", "int64"},
{"base",
{{"driver", "array"},
{"array", {1, 2, 3}},
{"transform",
{{"input_inclusive_min", {2}},
{"input_shape", {3}},
{"output", {{{"input_dimension", 0}, {"offset", -2}}}}}},
{"dtype", "int32"}}}})
.result());
EXPECT_THAT(
store.spec().value().ToJson({tensorstore::IncludeDefaults{false}}),
::testing::Optional(tensorstore::MatchesJson(
{{"driver", "cast"},
{"base",
{
{"driver", "array"},
{"array", {1, 2, 3}},
{"dtype", "int32"},
}},
{"dtype", "int64"},
{"transform",
::nlohmann::json(tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({10})
.input_shape({3})
.output_single_input_dimension(0, -10, 1, 0)
.Finalize()
.value())}})));
}
TEST(CastTest, ComposeTransformsError) {
EXPECT_THAT(tensorstore::Open({{"driver", "cast"},
{"rank", 2},
{"dtype", "int64"},
{"base",
{{"driver", "array"},
{"array", {1, 2, 3}},
{"rank", 1},
{"dtype", "int32"}}}})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"base\": "
"Error parsing object member \"rank\": "
"Expected 2, but received: 1"));
}
TEST(CastTest, SpecRankPropagation) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec, tensorstore::Spec::FromJson({
{"driver", "cast"},
{"base",
{
{"driver", "array"},
{"array", {1, 2, 3}},
{"dtype", "int32"},
}},
{"dtype", "int64"},
}));
EXPECT_EQ(1, spec.rank());
}
TEST(CastTest, ChunkLayout) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::Open({
{"driver", "cast"},
{"dtype", "int32"},
{"base",
{{"driver", "array"},
{"dtype", "int64"},
{"array", {{1, 2, 3}, {4, 5, 6}}}}},
})
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_layout,
ChunkLayout::FromJson({
{"grid_origin", {0, 0}},
{"inner_order", {0, 1}},
}));
EXPECT_THAT(store.chunk_layout(), ::testing::Optional(expected_layout));
}
TEST(SpecSchemaTest, CastArray) {
TestSpecSchema(
{
{"driver", "cast"},
{"base",
{
{"driver", "array"},
{"array", {{1, 2, 3}, {4, 5, 6}}},
{"dtype", "float32"},
{"schema", {{"dimension_units", {"4nm", "5nm"}}}},
}},
{"dtype", "int32"},
},
{
{"rank", 2},
{"dtype", "int32"},
{"domain", {{"shape", {2, 3}}}},
{"chunk_layout", {{"grid_origin", {0, 0}}, {"inner_order", {0, 1}}}},
{"dimension_units", {"4nm", "5nm"}},
});
}
TEST(DriverCreateCheckSchemaTest, CastArray) {
TestTensorStoreCreateCheckSchema(
{
{"driver", "cast"},
{"base",
{
{"driver", "array"},
{"array", {{1, 2, 3}, {4, 5, 6}}},
{"dtype", "float32"},
{"schema", {{"dimension_units", {"4nm", "5nm"}}}},
}},
{"dtype", "int32"},
},
{
{"rank", 2},
{"dtype", "int32"},
{"domain", {{"shape", {2, 3}}}},
{"chunk_layout", {{"grid_origin", {0, 0}}, {"inner_order", {0, 1}}}},
{"dimension_units", {"4nm", "5nm"}},
});
}
TEST(CastTest, FillValueNotSpecified) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store,
tensorstore::Open(
{
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
},
tensorstore::OpenMode::create, tensorstore::dtype_v<uint16_t>,
tensorstore::Schema::Shape({100, 4, 3}))
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::Cast(base_store, tensorstore::dtype_v<int32_t>));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto fill_value, store.fill_value());
EXPECT_FALSE(fill_value.valid());
}
TEST(CastTest, FillValueSpecified) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store,
tensorstore::Open(
{
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
},
tensorstore::OpenMode::create, tensorstore::dtype_v<uint16_t>,
tensorstore::Schema::Shape({100, 4, 3}),
tensorstore::Schema::FillValue(
tensorstore::MakeScalarArray<uint16_t>(42)))
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::Cast(base_store, tensorstore::dtype_v<int32_t>));
EXPECT_THAT(store.fill_value(),
::testing::Optional(tensorstore::MakeScalarArray<int32_t>(42)));
}
TEST(CastTest, Codec) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store,
tensorstore::Open(
{
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
{"metadata", {{"compressor", nullptr}}},
},
tensorstore::OpenMode::create, tensorstore::dtype_v<uint16_t>,
tensorstore::Schema::Shape({100, 4, 3}))
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::Cast(base_store, tensorstore::dtype_v<int32_t>));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_codec,
tensorstore::CodecSpec::FromJson({
{"driver", "zarr"},
{"compressor", nullptr},
{"filters", nullptr},
}));
EXPECT_THAT(store.codec(), ::testing::Optional(expected_codec));
}
TEST(SpecSchemaTest, ChunkLayout) {
TestSpecSchema(
{
{"driver", "cast"},
{"dtype", "uint32"},
{"base",
{
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
{"metadata", {{"dtype", "<u2"}, {"chunks", {3, 4, 5}}}},
}},
},
{
{"dtype", "uint32"},
{"chunk_layout",
{
{"grid_origin", {0, 0, 0}},
{"chunk", {{"shape", {3, 4, 5}}}},
}},
{"codec", {{"driver", "zarr"}}},
});
}
TEST(SpecSchemaTest, Codec) {
TestSpecSchema(
{
{"driver", "cast"},
{"dtype", "uint32"},
{"base",
{
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
{"metadata", {{"dtype", "<u2"}, {"compressor", nullptr}}},
}},
},
{
{"dtype", "uint32"},
{"codec", {{"driver", "zarr"}, {"compressor", nullptr}}},
});
}
TEST(SpecSchemaTest, FillValue) {
TestSpecSchema(
{
{"driver", "cast"},
{"dtype", "uint32"},
{"base",
{
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
{"metadata", {{"dtype", "<f4"}, {"fill_value", 3.5}}},
}},
},
{
{"dtype", "uint32"},
{"fill_value", 3},
{"codec", {{"driver", "zarr"}}},
});
}
TEST(SpecSchemaTest, FillValueSameDtype) {
TestSpecSchema(
{
{"driver", "cast"},
{"dtype", "uint32"},
{"base",
{
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
{"metadata", {{"dtype", "<u4"}, {"fill_value", 3}}},
}},
},
{
{"dtype", "uint32"},
{"fill_value", 3},
{"codec", {{"driver", "zarr"}}},
});
}
TENSORSTORE_GLOBAL_INITIALIZER {
tensorstore::internal::TestTensorStoreDriverSpecRoundtripOptions options;
options.test_name = "cast";
options.create_spec = {
{"driver", "cast"},
{"base",
{
{"driver", "array"},
{"dtype", "float32"},
{"array", {{1, 2, 3}, {4, 5, 6}}},
}},
{"dtype", "uint32"},
};
options.full_spec = {
{"driver", "cast"},
{"base",
{
{"driver", "array"},
{"array", {{1, 2, 3}, {4, 5, 6}}},
{"dtype", "float32"},
}},
{"dtype", "uint32"},
{"transform",
{{"input_inclusive_min", {0, 0}}, {"input_exclusive_max", {2, 3}}}},
};
options.full_base_spec = {
{"driver", "array"},
{"array", {{1, 2, 3}, {4, 5, 6}}},
{"dtype", "float32"},
{"transform",
{{"input_inclusive_min", {0, 0}}, {"input_exclusive_max", {2, 3}}}},
};
options.minimal_spec = options.full_spec;
options.check_not_found_before_create = false;
options.check_not_found_before_commit = false;
options.supported_transaction_modes = {};
tensorstore::internal::RegisterTensorStoreDriverSpecRoundtripTest(
std::move(options));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/cast/cast.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/cast/cast_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c5486942-8a3d-4c55-9215-7af08ca87df2 | cpp | google/arolla | std_function_operator | arolla/expr/operators/std_function_operator.cc | arolla/expr/operators/std_function_operator_test.cc | #include "arolla/expr/operators/std_function_operator.h"
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/qtype/qtype.h"
#include "arolla/util/fingerprint.h"
namespace arolla::expr_operators {
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::ExprOperatorSignature;
StdFunctionOperator::StdFunctionOperator(absl::string_view name,
ExprOperatorSignature signature,
absl::string_view doc,
OutputQTypeFn output_qtype_fn,
EvalFn eval_fn)
: BasicExprOperator(name, signature, doc, RandomFingerprint()),
output_qtype_fn_(std::move(output_qtype_fn)),
eval_fn_(std::move(eval_fn)) {}
absl::StatusOr<QTypePtr> StdFunctionOperator::GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const {
return output_qtype_fn_(input_qtypes);
}
const StdFunctionOperator::OutputQTypeFn&
StdFunctionOperator::GetOutputQTypeFn() const {
return output_qtype_fn_;
}
const StdFunctionOperator::EvalFn& StdFunctionOperator::GetEvalFn() const {
return eval_fn_;
}
} | #include "arolla/expr/operators/std_function_operator.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/array/qtype/types.h"
#include "arolla/expr/eval/invoke.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/unit.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr_operators {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::expr::ExprOperatorSignature;
using ::arolla::expr::Leaf;
using ::arolla::expr::Literal;
using ::testing::HasSubstr;
absl::StatusOr<TypedValue> GetFirst(absl::Span<const TypedRef> inputs) {
return TypedValue(inputs[0]);
}
absl::StatusOr<QTypePtr> FirstQType(absl::Span<const QTypePtr> input_qtypes) {
return input_qtypes[0];
}
absl::StatusOr<TypedValue> Add(absl::Span<const TypedRef> inputs) {
ASSIGN_OR_RETURN(int32_t x, inputs[0].As<int32_t>());
ASSIGN_OR_RETURN(int32_t y, inputs[1].As<int32_t>());
return TypedValue::FromValue(x + y);
}
TEST(StdFunctionOperatorTest, GetName) {
StdFunctionOperator op("get_first_fn", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, GetFirst);
ASSERT_THAT(op.display_name(), "get_first_fn");
}
TEST(StdFunctionOperatorTest, GetDoc) {
StdFunctionOperator op("get_first_fn", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, GetFirst);
ASSERT_THAT(op.GetDoc(), IsOkAndHolds("dummy op docstring"));
}
TEST(StdFunctionOperatorTest, GetEvalFn) {
StdFunctionOperator op("add_fn", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, Add);
int32_t x = 1;
int32_t y = 2;
auto res = op.GetEvalFn()({TypedRef::FromValue(x), TypedRef::FromValue(y)});
EXPECT_OK(res);
EXPECT_THAT(res.value().As<int32_t>(), IsOkAndHolds(x + y));
}
TEST(StdFunctionOperatorTest, GetOutputQTypeFn) {
StdFunctionOperator op("add_fn", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, Add);
auto output_qtype_fn = op.GetOutputQTypeFn();
auto res = output_qtype_fn({GetArrayQType<int32_t>(), GetQType<int32_t>()});
EXPECT_THAT(res, IsOkAndHolds(GetArrayQType<int32_t>()));
}
TEST(StdFunctionOperatorTest, GetOutputQType) {
{
StdFunctionOperator op("get_first_fn", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, GetFirst);
EXPECT_THAT(
op.GetOutputQType({GetArrayQType<int32_t>(), GetQType<int32_t>()}),
IsOkAndHolds(GetArrayQType<int32_t>()));
}
{
auto get_snd =
[](absl::Span<const QTypePtr> inputs) -> absl::StatusOr<QTypePtr> {
return inputs[1];
};
StdFunctionOperator op("add_fn", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", std::move(get_snd), Add);
EXPECT_THAT(
op.GetOutputQType({GetArrayQType<int32_t>(), GetQType<float>()}),
IsOkAndHolds(GetQType<float>()));
}
{
auto status_fn =
[](absl::Span<const QTypePtr> inputs) -> absl::StatusOr<QTypePtr> {
return absl::InvalidArgumentError("foo bar");
};
StdFunctionOperator op("add_fn", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", std::move(status_fn), Add);
EXPECT_THAT(
op.GetOutputQType({GetArrayQType<int32_t>(), GetQType<float>()}),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("foo bar")));
}
}
TEST(StdFunctionOperatorTest, QTypeInference) {
{
auto op = std::make_shared<StdFunctionOperator>(
"my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, GetFirst);
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(op, {Literal(1.5f), Literal(kUnit)}));
EXPECT_EQ(expr->qtype(), GetQType<float>());
}
{
auto get_snd =
[](absl::Span<const QTypePtr> inputs) -> absl::StatusOr<QTypePtr> {
return inputs[1];
};
auto op = std::make_shared<StdFunctionOperator>(
"my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", std::move(get_snd), GetFirst);
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(op, {Literal(1.5f), Literal(kUnit)}));
EXPECT_EQ(expr->qtype(), GetQType<Unit>());
}
{
auto op = std::make_shared<StdFunctionOperator>(
"my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, GetFirst);
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Leaf("x"), Leaf("y")}));
EXPECT_EQ(expr->qtype(), nullptr);
}
}
TEST(StdFunctionOperatorTest, Eval) {
{
auto op = std::make_shared<StdFunctionOperator>(
"get_first", ExprOperatorSignature{{"x"}, {"y"}}, "dummy op docstring",
FirstQType, GetFirst);
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Literal(1), Literal(2)}));
auto res = Invoke(expr, {});
EXPECT_OK(res.status());
EXPECT_THAT(res.value().As<int32_t>(), IsOkAndHolds(1));
}
{
auto op = std::make_shared<StdFunctionOperator>(
"add", ExprOperatorSignature{{"x"}, {"y"}}, "dummy op docstring",
FirstQType, Add);
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Literal(1), Literal(2)}));
auto res = Invoke(expr, {});
EXPECT_OK(res.status());
EXPECT_THAT(res.value().As<int32_t>(), IsOkAndHolds(3));
}
{
auto op = std::make_shared<StdFunctionOperator>(
"add", ExprOperatorSignature{{"x"}, {"y"}}, "dummy op docstring",
FirstQType, Add);
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Leaf("x"), Leaf("y")}));
auto res = Invoke(expr, {{"x", TypedValue::FromValue(1)},
{"y", TypedValue::FromValue(2)}});
EXPECT_OK(res.status());
EXPECT_THAT(res.value().As<int32_t>(), IsOkAndHolds(3));
}
}
TEST(StdFunctionOperatorTest, VariadicInput) {
ASSERT_OK_AND_ASSIGN(auto signature, ExprOperatorSignature::Make("*args"));
auto op = std::make_shared<StdFunctionOperator>(
"add", signature, "dummy op docstring", FirstQType, Add);
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Literal(1), Literal(2)}));
auto res = Invoke(expr, {});
EXPECT_OK(res.status());
EXPECT_THAT(res.value().As<int32_t>(), IsOkAndHolds(3));
}
TEST(StdFunctionOperatorTest, IncorrectFnOutput) {
auto op = std::make_shared<StdFunctionOperator>(
"get_first", ExprOperatorSignature{{"x"}}, "dummy op docstring",
[](absl::Span<const QTypePtr> input_qtypes) {
return GetQType<int32_t>();
},
GetFirst);
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Literal(1.0)}));
EXPECT_THAT(
Invoke(expr, {}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("expected the result to have qtype INT32, got FLOAT64")));
}
TEST(StdFunctionOperatorTest, FnRaises) {
auto op = std::make_shared<StdFunctionOperator>(
"get_first", ExprOperatorSignature{{"x"}}, "dummy op docstring",
FirstQType, [](absl::Span<const TypedRef> inputs) {
return absl::InvalidArgumentError("foo bar");
});
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Literal(1)}));
EXPECT_THAT(Invoke(expr, {}), StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("foo bar")));
}
TEST(StdFunctionOperatorTest, Fingerprint) {
StdFunctionOperator op1("my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, GetFirst);
{
StdFunctionOperator op2("my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, GetFirst);
EXPECT_NE(op1.fingerprint(), op2.fingerprint());
}
{
StdFunctionOperator op2("another_name", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, GetFirst);
EXPECT_NE(op1.fingerprint(), op2.fingerprint());
}
{
StdFunctionOperator op2("my_dummy_op", ExprOperatorSignature{{"x"}},
"dummy op docstring", FirstQType, GetFirst);
EXPECT_NE(op1.fingerprint(), op2.fingerprint());
}
{
StdFunctionOperator op2("my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"another docstring", FirstQType, GetFirst);
EXPECT_NE(op1.fingerprint(), op2.fingerprint());
}
{
StdFunctionOperator op2(
"my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring",
[](absl::Span<const QTypePtr> input_qtypes) {
return GetQType<float>();
},
GetFirst);
EXPECT_NE(op1.fingerprint(), op2.fingerprint());
}
{
StdFunctionOperator op2(
"my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType,
[](absl::Span<const TypedRef> inputs) -> absl::StatusOr<TypedValue> {
return TypedValue(inputs[1]);
});
EXPECT_NE(op1.fingerprint(), op2.fingerprint());
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/std_function_operator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/std_function_operator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
92843351-d9ed-433d-8b94-3590ca2b505e | cpp | tensorflow/tensorflow | ops | tensorflow/compiler/mlir/python/mlir_wrapper/ops.cc | tensorflow/c/ops_test.cc | #include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "tensorflow/compiler/mlir/python/mlir_wrapper/mlir_wrapper.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
void init_ops(py::module& m) {
py::class_<mlir::Operation, std::unique_ptr<mlir::Operation, py::nodelete>>(
m, "Operation")
.def("getRegion", &mlir::Operation::getRegion,
py::return_value_policy::reference)
.def("getResult", &mlir::Operation::getResult)
.def("dump", &mlir::Operation::dump)
.def("getNumResults", &mlir::Operation::getNumResults);
py::class_<mlir::OperationState>(m, "OperationState")
.def(py::init([](mlir::Location loc, std::string name) {
return mlir::OperationState(loc, llvm::StringRef(name));
}))
.def("addTypes",
[](mlir::OperationState& state, std::vector<mlir::Type> tys) {
state.addTypes(mlir::ArrayRef<mlir::Type>(tys));
})
.def("addOperands",
[](mlir::OperationState& os, std::vector<mlir::Value> ops) {
os.addOperands(mlir::ArrayRef<mlir::Value>(ops));
})
.def("addRegion", py::overload_cast<>(&mlir::OperationState::addRegion),
py::return_value_policy::reference);
py::class_<mlir::ModuleOp>(m, "ModuleOp")
.def("create",
[](mlir::Location loc) { return mlir::ModuleOp::create(loc); })
.def("push_back",
[](mlir::ModuleOp& m, mlir::func::FuncOp f) { m.push_back(f); })
.def("dump", &mlir::ModuleOp::dump)
.def("getAsStr", [](mlir::ModuleOp& m) {
std::string str;
llvm::raw_string_ostream os(str);
m.print(os);
return os.str();
});
py::class_<mlir::func::FuncOp>(m, "FuncOp")
.def("create",
[](mlir::Location location, std::string name,
mlir::FunctionType type) {
auto func = mlir::func::FuncOp::create(location, name, type);
func.addEntryBlock();
return func;
})
.def(
"getBody",
[](mlir::func::FuncOp& f) -> mlir::Region& { return f.getBody(); },
py::return_value_policy::reference)
.def("getArguments",
[](mlir::func::FuncOp& f) { return f.getArguments().vec(); })
.def("getName", [](mlir::func::FuncOp& f) { return f.getName().str(); })
.def("getType", &mlir::func::FuncOp::getFunctionType);
py::class_<mlir::func::ReturnOp>(m, "ReturnOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc,
std::vector<mlir::Value> values) -> mlir::Operation* {
return opb
.create<mlir::func::ReturnOp>(
loc, mlir::ArrayRef<mlir::Value>(values))
.getOperation();
});
py::class_<mlir::TF::AddV2Op>(m, "Tf_AddV2Op")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value x,
mlir::Value y) -> mlir::Operation* {
return opb.create<mlir::TF::AddV2Op>(loc, x, y).getOperation();
});
py::class_<mlir::TF::AnyOp>(m, "Tf_AnyOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value input,
mlir::Value reduction_indices,
bool keep_dims = false) -> mlir::Operation* {
return opb
.create<mlir::TF::AnyOp>(loc, opb.getI1Type(), input,
reduction_indices, keep_dims)
.getOperation();
});
py::class_<mlir::TF::ConstOp>(m, "Tf_ConstOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc,
mlir::Attribute value) -> mlir::Operation* {
return opb.create<mlir::TF::ConstOp>(loc, value).getOperation();
});
py::class_<mlir::TF::EqualOp>(m, "Tf_EqualOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value x,
mlir::Value y) -> mlir::Operation* {
return opb
.create<mlir::TF::EqualOp>(loc, x, y, opb.getBoolAttr(true))
.getOperation();
});
py::class_<mlir::TF::GreaterEqualOp>(m, "Tf_GreaterEqualOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value x,
mlir::Value y) -> mlir::Operation* {
return opb.create<mlir::TF::GreaterEqualOp>(loc, x, y)
.getOperation();
});
py::class_<mlir::TF::GreaterOp>(m, "Tf_GreaterOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value x,
mlir::Value y) -> mlir::Operation* {
return opb.create<mlir::TF::GreaterOp>(loc, x, y).getOperation();
});
py::class_<mlir::TF::LegacyCallOp>(m, "Tf_LegacyCallOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc,
std::vector<mlir::Type> output, std::vector<mlir::Value> args,
std::string f) -> mlir::Operation* {
return opb
.create<mlir::TF::LegacyCallOp>(
loc, mlir::ArrayRef<mlir::Type>(output),
mlir::ArrayRef<mlir::Value>(args), mlir::StringRef(f))
.getOperation();
});
py::class_<mlir::TF::LessEqualOp>(m, "Tf_LessEqualOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value x,
mlir::Value y) -> mlir::Operation* {
return opb.create<mlir::TF::LessEqualOp>(loc, x, y).getOperation();
});
py::class_<mlir::TF::LessOp>(m, "Tf_LessOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value x,
mlir::Value y) -> mlir::Operation* {
return opb.create<mlir::TF::LessOp>(loc, x, y).getOperation();
});
py::class_<mlir::TF::NegOp>(m, "Tf_NegOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc,
mlir::Value x) -> mlir::Operation* {
return opb.create<mlir::TF::NegOp>(loc, x).getOperation();
});
py::class_<mlir::TF::NotEqualOp>(m, "Tf_NotEqualOp")
.def("create", [](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value x,
mlir::Value y) {
return opb
.create<mlir::TF::NotEqualOp>(
loc, x, y, mlir::BoolAttr::get(opb.getContext(), true))
.getOperation();
});
py::class_<mlir::TF::SubOp>(m, "Tf_SubOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value x,
mlir::Value y) -> mlir::Operation* {
return opb.create<mlir::TF::SubOp>(loc, x, y).getOperation();
});
} | #include "tensorflow/c/ops.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/c/c_api.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(OpsTest, TestBasicOpRegistration) {
TF_OpDefinitionBuilder* builder = TF_NewOpDefinitionBuilder("SomeOp");
TF_OpDefinitionBuilderAddAttr(builder, "attr1: string");
TF_OpDefinitionBuilderAddInput(builder, "input1: uint8");
TF_OpDefinitionBuilderAddInput(builder, "input2: uint16");
TF_OpDefinitionBuilderAddOutput(builder, "output1: uint32");
TF_Status* status = TF_NewStatus();
TF_RegisterOpDefinition(builder, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_Buffer* op_list_buffer = TF_GetAllOpList();
::tensorflow::OpList op_list;
op_list.ParseFromArray(op_list_buffer->data, op_list_buffer->length);
bool found = false;
for (const auto& op : op_list.op()) {
if (op.name() == "SomeOp") {
ASSERT_EQ(2, op.input_arg_size());
ASSERT_EQ("input1", op.input_arg(0).name());
ASSERT_EQ(::tensorflow::DT_UINT8, op.input_arg(0).type());
ASSERT_EQ(1, op.attr_size());
ASSERT_EQ("string", op.attr(0).type());
found = true;
}
}
EXPECT_TRUE(found);
TF_DeleteStatus(status);
TF_DeleteBuffer(op_list_buffer);
}
void identity_shape_fn(TF_ShapeInferenceContext* ctx, TF_Status* status) {
TF_ShapeHandle* handle = TF_NewShapeHandle();
TF_ShapeInferenceContextGetInput(ctx, 0, handle, status);
ASSERT_EQ(TF_OK, TF_GetCode(status));
TF_ShapeInferenceContextSetOutput(ctx, 0, handle, status);
TF_DeleteShapeHandle(handle);
}
TEST(OpsTest, TestShapeInference_IdentityFunction) {
ShapeInferenceTestOp op("SomeTestOp");
TF_OpDefinitionBuilder* builder = TF_NewOpDefinitionBuilder("SomeTestOp");
TF_OpDefinitionBuilderAddInput(builder, "input1: uint8");
TF_OpDefinitionBuilderAddOutput(builder, "output1: uint8");
TF_OpDefinitionBuilderSetShapeInferenceFunction(builder, &identity_shape_fn);
TF_Status* status = TF_NewStatus();
TF_RegisterOpDefinition(builder, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_ASSERT_OK(
shape_inference::ShapeInferenceTestutil::InferShapes(op, "[1,2]", "in0"));
TF_DeleteStatus(status);
}
TEST(OpsTest, TestShapeInference_UnknownShape) {
ShapeInferenceTestOp op("UnknownShapeOp");
TF_OpDefinitionBuilder* builder = TF_NewOpDefinitionBuilder("UnknownShapeOp");
TF_OpDefinitionBuilderAddInput(builder, "input1: uint8");
TF_OpDefinitionBuilderAddInput(builder, "input2: uint32");
TF_OpDefinitionBuilderAddOutput(builder, "output1: uint8");
TF_OpDefinitionBuilderAddOutput(builder, "output2: uint8");
TF_OpDefinitionBuilderSetShapeInferenceFunction(
builder, &TF_ShapeInferenceContextSetUnknownShape);
TF_Status* status = TF_NewStatus();
TF_RegisterOpDefinition(builder, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_ASSERT_OK(shape_inference::ShapeInferenceTestutil::InferShapes(
op, "[1,2];[3,4]", "?;?"));
TF_DeleteStatus(status);
}
void vectorize_shape_fn(TF_ShapeInferenceContext* ctx, TF_Status* status) {
TF_ShapeHandle* handle = TF_NewShapeHandle();
TF_ShapeInferenceContextGetInput(ctx, 0, handle, status);
ASSERT_EQ(TF_OK, TF_GetCode(status));
TF_ShapeHandle* new_shape = TF_ShapeInferenceContextVectorFromSize(
ctx, TF_ShapeInferenceContextRank(ctx, handle));
TF_ShapeInferenceContextSetOutput(ctx, 0, new_shape, status);
TF_DeleteShapeHandle(handle);
TF_DeleteShapeHandle(new_shape);
}
TEST(OpsTest, TestShapeInference_VectorizeFunction) {
ShapeInferenceTestOp op("VectorizeTestOp");
TF_OpDefinitionBuilder* builder =
TF_NewOpDefinitionBuilder("VectorizeTestOp");
TF_OpDefinitionBuilderAddInput(builder, "input1: uint8");
TF_OpDefinitionBuilderAddOutput(builder, "output1: uint8");
TF_OpDefinitionBuilderSetShapeInferenceFunction(builder, &vectorize_shape_fn);
TF_Status* status = TF_NewStatus();
TF_RegisterOpDefinition(builder, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_ASSERT_OK(shape_inference::ShapeInferenceTestutil::InferShapes(
op, "[4,5,9]", "[3]"));
TF_DeleteStatus(status);
}
TEST(OpsTest, AttributeAccessors) {
TF_OpDefinitionBuilder* builder =
TF_NewOpDefinitionBuilder("AttributeAccessorsOp");
TF_OpDefinitionBuilderAddAttr(builder, "foo1: int >= 2");
TF_OpDefinitionBuilderAddAttr(builder, "foo2: string=\"my string\"");
TF_OpDefinitionBuilderSetIsCommutative(builder, true);
TF_OpDefinitionBuilderSetIsAggregate(builder, true);
TF_OpDefinitionBuilderSetAllowsUninitializedInput(builder, true);
std::string deprecation_msg = "use something else instead";
TF_OpDefinitionBuilderDeprecated(builder, 4, deprecation_msg.c_str());
TF_Status* status = TF_NewStatus();
TF_RegisterOpDefinition(builder, status);
ASSERT_EQ(TF_OK, TF_GetCode(status));
TF_Buffer* op_list_buffer = TF_GetAllOpList();
::tensorflow::OpList op_list;
op_list.ParseFromArray(op_list_buffer->data, op_list_buffer->length);
bool found = false;
for (const auto& op : op_list.op()) {
if (op.name() == "AttributeAccessorsOp") {
ASSERT_TRUE(op.is_commutative());
ASSERT_TRUE(op.is_aggregate());
ASSERT_TRUE(op.allows_uninitialized_input());
ASSERT_EQ(4, op.deprecation().version());
ASSERT_EQ(deprecation_msg, op.deprecation().explanation());
ASSERT_EQ(2, op.attr_size());
ASSERT_EQ("int", op.attr(0).type());
ASSERT_EQ(2, op.attr(0).minimum());
ASSERT_EQ("string", op.attr(1).type());
ASSERT_EQ("my string", op.attr(1).default_value().s());
found = true;
}
}
ASSERT_TRUE(found);
TF_DeleteStatus(status);
TF_DeleteBuffer(op_list_buffer);
}
#define C_CTX(x) reinterpret_cast<TF_ShapeInferenceContext*>(x)
#define C_SHP(x) reinterpret_cast<TF_ShapeHandle*>(x)
static OpDef MakeOpDef(int num_inputs, int num_outputs) {
OpRegistrationData op_reg_data;
OpDefBuilder b("dummy");
for (int i = 0; i < num_inputs; ++i) {
b.Input(strings::StrCat("i", i, ": float"));
}
for (int i = 0; i < num_outputs; ++i) {
b.Output(strings::StrCat("o", i, ": float"));
}
CHECK(b.Attr("foo:string").Finalize(&op_reg_data).ok());
return op_reg_data.op_def;
}
PartialTensorShape S(std::initializer_list<int64_t> dims) {
return PartialTensorShape(dims);
}
PartialTensorShape Unknown() { return PartialTensorShape(); }
TEST(OpsTest, ShapeInferenceWithRank) {
NodeDef def;
shape_inference::InferenceContext c(0, def, MakeOpDef(1, 0),
{S({10, 20, 30})}, {}, {}, {});
shape_inference::ShapeHandle in0 = c.input(0);
shape_inference::ShapeHandle s1;
TF_Status* status = TF_NewStatus();
TF_ShapeInferenceContextWithRankAtMost(C_CTX(&c), C_SHP(&in0), 3, C_SHP(&s1),
status);
EXPECT_EQ("[10,20,30]", c.DebugString(s1));
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_ShapeInferenceContextWithRankAtLeast(C_CTX(&c), C_SHP(&in0), 3, C_SHP(&s1),
status);
EXPECT_EQ("[10,20,30]", c.DebugString(s1));
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_ShapeInferenceContextWithRankAtLeast(C_CTX(&c), C_SHP(&in0), 6, C_SHP(&s1),
status);
ASSERT_NE(TF_OK, TF_GetCode(status));
TF_SetStatus(status, TF_OK, "");
TF_ShapeInferenceContextWithRankAtMost(C_CTX(&c), C_SHP(&in0), 1, C_SHP(&s1),
status);
ASSERT_NE(TF_OK, TF_GetCode(status));
TF_SetStatus(status, TF_OK, "");
TF_ShapeInferenceContextWithRank(C_CTX(&c), C_SHP(&in0), 3, C_SHP(&s1),
status);
ASSERT_EQ(TF_OK, TF_GetCode(status));
TF_ShapeInferenceContextWithRank(C_CTX(&c), C_SHP(&in0), 4, C_SHP(&s1),
status);
ASSERT_NE(TF_OK, TF_GetCode(status));
TF_DeleteStatus(status);
}
TEST(OpsTest, ShapeInferenceWithRank_UnknownRank) {
NodeDef def;
shape_inference::InferenceContext c(0, def, MakeOpDef(2, 2),
{Unknown(), S({1, -1, 3})}, {}, {}, {});
shape_inference::ShapeHandle in0 = c.input(0);
shape_inference::ShapeHandle s1;
TF_Status* status = TF_NewStatus();
TF_ShapeInferenceContextWithRankAtMost(C_CTX(&c), C_SHP(&in0), 1, C_SHP(&s1),
status);
EXPECT_EQ("?", c.DebugString(s1));
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_ShapeInferenceContextWithRankAtLeast(C_CTX(&c), C_SHP(&in0), 1, C_SHP(&s1),
status);
EXPECT_EQ("?", c.DebugString(s1));
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_DeleteStatus(status);
}
TEST(OpsTest, ShapeInferenceConcatenateShapes) {
NodeDef def;
shape_inference::InferenceContext c(0, def, MakeOpDef(2, 0),
{S({1, 2}), S({3, 4})}, {}, {}, {});
ASSERT_EQ(2, TF_ShapeInferenceContextNumInputs(C_CTX(&c)));
shape_inference::ShapeHandle a = c.input(0);
shape_inference::ShapeHandle b = c.input(1);
TF_ShapeHandle* result = TF_NewShapeHandle();
TF_Status* status = TF_NewStatus();
TF_ShapeInferenceContextConcatenateShapes(C_CTX(&c), C_SHP(&a), C_SHP(&b),
result, status);
EXPECT_EQ(
"[1,2,3,4]",
c.DebugString(*reinterpret_cast<shape_inference::ShapeHandle*>(result)));
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_DeleteShapeHandle(result);
TF_DeleteStatus(status);
}
TEST(OpsTest, DimensionHandleValueKnown) {
NodeDef def;
shape_inference::InferenceContext c(0, def, MakeOpDef(2, 0),
{S({1, 2}), S({3, 4})}, {}, {}, {});
TF_ShapeHandle* handle =
TF_ShapeInferenceContextVectorFromSize(C_CTX(&c), 43);
ASSERT_EQ(
"[43]",
c.DebugString(*reinterpret_cast<shape_inference::ShapeHandle*>(handle)));
ASSERT_EQ(1, TF_ShapeInferenceContextRankKnown(C_CTX(&c), handle));
ASSERT_EQ(1, TF_ShapeInferenceContextRank(C_CTX(&c), handle));
TF_DimensionHandle* dim_handle = TF_NewDimensionHandle();
TF_ShapeInferenceContextDim(C_CTX(&c), handle, 0, dim_handle);
ASSERT_EQ(1, TF_DimensionHandleValueKnown(dim_handle));
ASSERT_EQ(43, TF_DimensionHandleValue(dim_handle));
TF_DeleteShapeHandle(handle);
TF_DeleteDimensionHandle(dim_handle);
}
TEST(OpsTest, ShapeInferenceSubshape) {
NodeDef def;
shape_inference::InferenceContext c(0, def, MakeOpDef(1, 0),
{S({10, 20, 30, 40, 50})}, {}, {}, {});
ASSERT_EQ("[10,20,30,40,50]", c.DebugString(c.input(0)));
TF_ShapeHandle* handle = TF_NewShapeHandle();
TF_Status* status = TF_NewStatus();
TF_ShapeInferenceContextGetInput(C_CTX(&c), 0, handle, status);
ASSERT_EQ(TF_OK, TF_GetCode(status));
TF_ShapeInferenceContextSubshape(C_CTX(&c), handle, 1, -1, handle, status);
ASSERT_EQ(TF_OK, TF_GetCode(status));
ASSERT_EQ(
"[20,30,40]",
c.DebugString(*reinterpret_cast<shape_inference::ShapeHandle*>(handle)));
TF_DeleteStatus(status);
TF_DeleteShapeHandle(handle);
}
TEST(OpsTest, ShapeInferenceScalarShape) {
NodeDef def;
shape_inference::InferenceContext c(0, def, MakeOpDef(0, 0), {S({})}, {}, {},
{});
TF_ShapeHandle* TF_scalar_shape = TF_ShapeInferenceContextScalar(C_CTX(&c));
shape_inference::ShapeHandle* scalar_shape =
reinterpret_cast<shape_inference::ShapeHandle*>(TF_scalar_shape);
ASSERT_EQ("[]", c.DebugString(*scalar_shape));
TF_DeleteShapeHandle(TF_scalar_shape);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/python/mlir_wrapper/ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5edad00f-282c-410e-9002-a59cd79e26b9 | cpp | tensorflow/tensorflow | host_offloader | third_party/xla/xla/service/host_offloader.cc | third_party/xla/xla/service/host_offloader_test.cc | #include "xla/service/host_offloader.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <iomanip>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_value.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/service/host_offload_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::xla::host_offload_utils::InstructionAndShapeIndex;
void SetMemorySpace(Shape* shape, int64_t memory_space_color) {
CHECK(shape->has_layout());
shape->mutable_layout()->set_memory_space(memory_space_color);
}
bool SetBuffersToMemorySpaceColor(
const std::vector<InstructionAndShapeIndex>& buffers_to_set_to_host_memory,
int64_t memory_space_color) {
bool changed = false;
for (const auto& instr_and_shape : buffers_to_set_to_host_memory) {
VLOG(2) << absl::StreamFormat("Setting %s to memory space %d",
instr_and_shape.ToString(),
memory_space_color);
Shape* shape = ShapeUtil::GetMutableSubshape(
instr_and_shape.instruction->mutable_shape(),
instr_and_shape.shape_index);
CHECK(shape->has_layout()) << "Shape must have a layout";
SetMemorySpace(ShapeUtil::GetMutableSubshape(
instr_and_shape.instruction->mutable_shape(),
instr_and_shape.shape_index),
memory_space_color);
changed = true;
}
return changed;
}
}
bool HostOffloader::InstructionIsAllowedBetweenMoveToHostAndDus(
const HloInstruction* instruction) const {
if (instruction->opcode() == HloOpcode::kReshape) {
return ShapeUtil::ReshapeIsBitcast(instruction->operand(0)->shape(),
instruction->shape());
}
return (instruction->opcode() == HloOpcode::kBitcast ||
instruction->opcode() == HloOpcode::kCopy);
}
bool HostOffloader::InstructionIsAllowedBetweenDsAndMoveToDevice(
const HloInstruction* instruction) const {
if (instruction->opcode() == HloOpcode::kReduce) {
return ShapeUtil::TrueRank(instruction->operand(0)->shape()) ==
ShapeUtil::TrueRank(instruction->shape());
}
if (instruction->opcode() == HloOpcode::kReshape) {
return ShapeUtil::ReshapeIsBitcast(instruction->operand(0)->shape(),
instruction->shape());
}
return instruction->opcode() == HloOpcode::kBitcast ||
instruction->opcode() == HloOpcode::kCopy;
}
absl::StatusOr<bool> HostOffloader::WalkDownHostMemoryOffloadPaths(
const InstructionAndShapeIndex& starting_instruction_and_index,
bool insert_copy_before) {
bool changed = false;
absl::flat_hash_set<HloInstruction*> mth_custom_calls_to_remove;
absl::flat_hash_set<HloInstruction*> slices_to_dynamify;
absl::flat_hash_set<HloInstruction*> custom_calls_to_insert_copies_before;
std::vector<InstructionAndShapeIndex> buffers_to_set_to_host_memory;
std::vector<HloInstruction*> dynamic_update_slices;
HloInstruction* starting_instruction =
starting_instruction_and_index.instruction;
std::queue<InstructionAndShapeIndex> queue;
queue.push(starting_instruction_and_index);
while (!queue.empty()) {
InstructionAndShapeIndex instruction_and_shape_index = queue.front();
queue.pop();
HloInstruction* instruction = instruction_and_shape_index.instruction;
VLOG(4) << absl::StreamFormat("Visiting instruction: %s",
instruction_and_shape_index.ToString());
bool already_saved_buffer = false;
if (instruction->opcode() == HloOpcode::kCustomCall &&
instruction->custom_call_target() ==
host_memory_offload_annotations::kMoveToHostCustomCallTarget) {
already_visited_move_to_host_custom_calls_.insert(instruction);
mth_custom_calls_to_remove.insert(instruction);
} else if (instruction->opcode() == HloOpcode::kCustomCall &&
instruction->custom_call_target() ==
host_memory_offload_annotations::
kMoveToDeviceCustomCallTarget) {
custom_calls_to_insert_copies_before.insert(instruction);
continue;
} else if (instruction->opcode() == HloOpcode::kDynamicUpdateSlice) {
if (instruction == starting_instruction) {
dynamic_update_slices.push_back(instruction);
} else {
dynamic_update_slices_already_allocated_.insert(instruction);
}
} else if (host_offload_utils::IsValidDuringPureMemoryOffload(
instruction)) {
if (instruction->opcode() == HloOpcode::kAsyncStart) {
already_saved_buffer = true;
} else if (instruction->opcode() == HloOpcode::kAsyncDone) {
HloInstruction* async_start = instruction->mutable_operand(0);
buffers_to_set_to_host_memory.emplace_back(async_start, ShapeIndex{1});
} else if (instruction->opcode() == HloOpcode::kParameter) {
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(instruction->GetModule());
std::vector<HloInstruction*> callers =
call_graph->GetComputationCallers(instruction->parent());
for (HloInstruction* caller : callers) {
if (caller->opcode() == HloOpcode::kAsyncStart) {
ShapeIndex tmp_index = instruction_and_shape_index.shape_index;
tmp_index.push_front(instruction->parameter_number());
tmp_index.push_front(
0);
buffers_to_set_to_host_memory.emplace_back(caller, tmp_index);
}
}
}
} else if (instruction->opcode() == HloOpcode::kDynamicSlice) {
TF_RETURN_IF_ERROR(
ValidateSliceLeadsToMoveToDeviceCustomCall(instruction));
continue;
} else if (instruction->opcode() == HloOpcode::kSlice) {
TF_RETURN_IF_ERROR(
ValidateSliceLeadsToMoveToDeviceCustomCall(instruction));
slices_to_dynamify.insert(instruction);
continue;
} else {
return absl::InvalidArgumentError(
absl::StrFormat("Tensor which is moved to host (starting from "
"\"%s\") is used by an instruction (\"%s\") which is "
"not acceptable during pure memory offload.",
starting_instruction->name(), instruction->name()));
}
if (!already_saved_buffer) {
VLOG(5) << "Saving " << instruction_and_shape_index.ToString()
<< " to be set to host memory.";
buffers_to_set_to_host_memory.push_back(instruction_and_shape_index);
}
if (instruction->IsRoot() && instruction->parent()->IsEntryComputation()) {
const Shape& output_shape = ShapeUtil::GetSubshape(
instruction->GetModule()->entry_computation_layout().result_shape(),
instruction_and_shape_index.shape_index);
CHECK(output_shape.has_layout())
<< "Expecting output shape of entry computation to have a layout.";
if (output_shape.layout().memory_space() == kHostMemorySpaceColor) {
VLOG(2) << absl::StreamFormat(
"Memory offloaded starting from %s is output streamed",
starting_instruction_and_index.ToString());
continue;
} else {
return absl::InvalidArgumentError(
absl::StrFormat("Tensor which is moved to host (starting from %s) "
"is returned from the entry computation but the "
"layout for this output is not set to host memory.",
starting_instruction->name()));
}
}
TF_ASSIGN_OR_RETURN(
const std::vector<InstructionAndShapeIndex> successors,
host_offload_utils::GetSuccessors(instruction_and_shape_index));
for (const InstructionAndShapeIndex& successor : successors) {
queue.push(successor);
}
}
const bool set_buffers_changed = SetBuffersToMemorySpaceColor(
buffers_to_set_to_host_memory, kHostMemorySpaceColor);
changed = changed || set_buffers_changed;
for (HloInstruction* dus : dynamic_update_slices) {
TF_RETURN_IF_ERROR(CreateAllocateBufferForDynamicUpdateSlice(dus));
changed = true;
}
if (insert_copy_before) {
const auto predecessors =
host_offload_utils::GetPredecessors(starting_instruction_and_index);
CHECK_EQ(predecessors.size(), 1);
TF_ASSIGN_OR_RETURN(bool inserted_copy,
InsertCopyBetween(predecessors.front(),
starting_instruction_and_index));
changed = changed || inserted_copy;
}
for (HloInstruction* custom_call : custom_calls_to_insert_copies_before) {
HloInstruction* data_to_copy = custom_call->mutable_operand(0);
HloInstruction* copy_to_device =
data_to_copy->parent()->AddInstruction(HloInstruction::CreateUnary(
data_to_copy->shape(), HloOpcode::kCopy, data_to_copy));
SetMemorySpace(copy_to_device->mutable_shape(),
Layout::kDefaultMemorySpace);
VLOG(1) << absl::StreamFormat(
"Inserted copy \"%s\" before custom call \"%s\"",
copy_to_device->name(), custom_call->name());
TF_RETURN_IF_ERROR(custom_call->ReplaceAllUsesWith(copy_to_device));
changed = true;
}
for (HloInstruction* custom_call : mth_custom_calls_to_remove) {
VLOG(1) << absl::StreamFormat("Removing MoveToHost custom call \"%s\"",
custom_call->name());
TF_RETURN_IF_ERROR(
custom_call->ReplaceAllUsesWith(custom_call->mutable_operand(0)));
TF_RETURN_IF_ERROR(custom_call->parent()->RemoveInstruction(custom_call));
changed = true;
}
for (HloInstruction* slice : slices_to_dynamify) {
TF_ASSIGN_OR_RETURN(HloInstruction * dynamic_slice, DynamifySlice(slice));
validated_slices_.insert(dynamic_slice);
changed = true;
}
return changed;
}
absl::StatusOr<bool> HostOffloader::HandleInputStreaming(
HloComputation* entry_computation) {
bool changed = false;
const ComputationLayout& entry_computation_layout =
entry_computation->parent()->entry_computation_layout();
for (int i = 0; i < entry_computation_layout.parameter_count(); ++i) {
if (entry_computation_layout.parameter_shape(i).IsToken()) {
LOG(WARNING) << "Token parameters are not supported for streaming.";
continue;
}
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
entry_computation_layout.parameter_shape(i),
[&](const Shape& subshape, const ShapeIndex& index) {
if (subshape.has_layout() &&
subshape.layout().memory_space() == kHostMemorySpaceColor) {
HloInstruction* parameter_instruction =
entry_computation->parameter_instruction(i);
VLOG(1) << "Host parameter streamed into program with shape: "
<< subshape.ToString(true) << " at index "
<< index.ToString();
TF_ASSIGN_OR_RETURN(
bool result,
WalkDownHostMemoryOffloadPaths(
InstructionAndShapeIndex(parameter_instruction, index),
false));
changed = changed || result;
}
return absl::OkStatus();
}));
}
return changed;
}
absl::StatusOr<bool> HostOffloader::HandleMoveToHostCustomCall(
HloInstruction* custom_call_instruction) {
if (already_visited_move_to_host_custom_calls_.contains(
custom_call_instruction)) {
return false;
}
VLOG(1) << "Offloading " << custom_call_instruction->operand(0)->name()
<< " to host.";
TF_ASSIGN_OR_RETURN(
std::vector<InstructionAndShapeIndex> starting_instruction_and_shapes,
GetStartingInstructions(custom_call_instruction));
if (starting_instruction_and_shapes.empty()) {
if (custom_call_instruction == custom_call_instruction->GetModule()
->entry_computation()
->root_instruction()) {
HloInstruction* data_to_copy =
custom_call_instruction->mutable_operand(0);
HloInstruction* copy_to_host =
data_to_copy->parent()->AddInstruction(HloInstruction::CreateUnary(
data_to_copy->shape(), HloOpcode::kCopy, data_to_copy));
SetMemorySpace(copy_to_host->mutable_shape(), kHostMemorySpaceColor);
TF_RETURN_IF_ERROR(
custom_call_instruction->ReplaceAllUsesWith(copy_to_host));
VLOG(2) << absl::StreamFormat(
"Custom call \"%s\" is entry computation root. Inserted copy \"%s\" "
"and replaced root instruction.",
custom_call_instruction->name(), copy_to_host->name());
}
}
for (const InstructionAndShapeIndex& starting_instruction_and_shape :
starting_instruction_and_shapes) {
const bool should_insert_copy_before_instruction =
starting_instruction_and_shape.instruction->opcode() !=
HloOpcode::kDynamicUpdateSlice;
TF_ASSIGN_OR_RETURN(
bool result,
WalkDownHostMemoryOffloadPaths(starting_instruction_and_shape,
should_insert_copy_before_instruction));
(void)result;
}
already_visited_move_to_host_custom_calls_.insert(custom_call_instruction);
VLOG(2) << absl::StreamFormat("Removing MoveToHost custom call \"%s\"",
custom_call_instruction->name());
TF_RETURN_IF_ERROR(custom_call_instruction->ReplaceAllUsesWith(
custom_call_instruction->mutable_operand(0)));
TF_RETURN_IF_ERROR(custom_call_instruction->parent()->RemoveInstruction(
custom_call_instruction));
return true;
}
absl::StatusOr<bool> HostOffloader::HandleMoveToDeviceCustomCall(
HloInstruction* custom_call_instruction) {
VLOG(2) << absl::StreamFormat("Removing MoveToDevice custom call \"%s\"",
custom_call_instruction->name());
TF_RETURN_IF_ERROR(custom_call_instruction->ReplaceAllUsesWith(
custom_call_instruction->mutable_operand(0)));
TF_RETURN_IF_ERROR(custom_call_instruction->parent()->RemoveInstruction(
custom_call_instruction));
move_to_device_custom_calls_to_remove_.insert(custom_call_instruction);
return true;
}
absl::StatusOr<bool> HostOffloader::InsertCopyBetween(
const InstructionAndShapeIndex& before_instruction_and_index,
const InstructionAndShapeIndex& after_instruction_and_index) {
bool changed = false;
HloInstruction* after_instruction = after_instruction_and_index.instruction;
std::vector<InstructionAndShapeIndex> instructions_to_insert_copies_before;
if (after_instruction->opcode() == HloOpcode::kParameter) {
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(after_instruction->GetModule());
auto callers =
call_graph->GetComputationCallers(after_instruction->parent());
for (HloInstruction* caller : callers) {
const auto indices =
caller->OperandIndices(before_instruction_and_index.instruction);
for (int64_t index : indices) {
instructions_to_insert_copies_before.push_back(
InstructionAndShapeIndex{caller, {index}});
}
}
} else {
instructions_to_insert_copies_before.push_back(after_instruction_and_index);
}
for (const InstructionAndShapeIndex& instruction_and_index :
instructions_to_insert_copies_before) {
if (already_inserted_copy_before_.find(instruction_and_index) ==
already_inserted_copy_before_.end()) {
HloInstruction* data_to_copy = before_instruction_and_index.instruction;
HloInstruction* copy_to_host;
auto it = copies_created_after_.find(data_to_copy);
if (it == copies_created_after_.end()) {
copy_to_host =
data_to_copy->parent()->AddInstruction(HloInstruction::CreateUnary(
data_to_copy->shape(), HloOpcode::kCopy, data_to_copy));
SetMemorySpace(copy_to_host->mutable_shape(), kHostMemorySpaceColor);
copies_created_after_[data_to_copy] = copy_to_host;
} else {
copy_to_host = it->second;
}
const int64_t operand_index =
after_instruction_and_index.shape_index.empty()
? 0
: after_instruction_and_index.shape_index.front();
TF_RETURN_IF_ERROR(instruction_and_index.instruction->ReplaceOperandWith(
operand_index, copy_to_host));
VLOG(2) << absl::StreamFormat(
"Inserted copy \"%s\" between \"%s\" and \"%s\"",
copy_to_host->name(), before_instruction_and_index.ToString(),
after_instruction_and_index.ToString());
already_inserted_copy_before_.insert(instruction_and_index);
changed = true;
}
}
return changed;
}
absl::StatusOr<std::vector<InstructionAndShapeIndex>>
HostOffloader::GetStartingInstructions(
HloInstruction* custom_call_instruction) {
std::vector<InstructionAndShapeIndex> result;
std::queue<InstructionAndShapeIndex> queue;
TF_ASSIGN_OR_RETURN(
const std::vector<InstructionAndShapeIndex> successors_of_custom_call,
host_offload_utils::GetSuccessors(
InstructionAndShapeIndex(custom_call_instruction)));
for (const InstructionAndShapeIndex& successor : successors_of_custom_call) {
queue.push(successor);
}
while (!queue.empty()) {
InstructionAndShapeIndex instruction_and_shape = queue.front();
queue.pop();
HloInstruction* current_instruction = instruction_and_shape.instruction;
if (current_instruction->opcode() == HloOpcode::kDynamicUpdateSlice) {
result.push_back(instruction_and_shape);
continue;
} else if (!InstructionIsAllowedBetweenMoveToHostAndDus(
current_instruction)) {
result.push_back(instruction_and_shape);
continue;
} else {
}
TF_ASSIGN_OR_RETURN(
const std::vector<InstructionAndShapeIndex> successors,
host_offload_utils::GetSuccessors(instruction_and_shape));
for (const InstructionAndShapeIndex& successor : successors) {
queue.push(successor);
}
}
return result;
}
absl::Status HostOffloader::ValidateSliceLeadsToMoveToDeviceCustomCall(
HloInstruction* slice) {
if (validated_slices_.find(slice) != validated_slices_.end()) {
return absl::OkStatus();
}
CHECK(slice->opcode() == HloOpcode::kDynamicSlice ||
slice->opcode() == HloOpcode::kSlice)
<< "This function must only be called with a slice or dynamic slice.";
std::queue<InstructionAndShapeIndex> queue;
TF_ASSIGN_OR_RETURN(
const std::vector<InstructionAndShapeIndex> successors_of_slice,
host_offload_utils::GetSuccessors(InstructionAndShapeIndex(slice)));
for (const InstructionAndShapeIndex& successor : successors_of_slice) {
queue.push(successor);
}
while (!queue.empty()) {
InstructionAndShapeIndex instruction_and_shape = queue.front();
queue.pop();
HloInstruction* current_instruction = instruction_and_shape.instruction;
if (current_instruction->opcode() == HloOpcode::kCustomCall &&
current_instruction->custom_call_target() ==
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget) {
continue;
}
if (!InstructionIsAllowedBetweenDsAndMoveToDevice(current_instruction)) {
return absl::InvalidArgumentError(absl::StrFormat(
"Tensor which is moved to host and back to device (ending at \"%s\") "
"has an invalid instruction (\"%s\") between DynamicSlice/Slice and "
"the MoveToDevice custom call.",
slice->name(), current_instruction->name()));
}
TF_ASSIGN_OR_RETURN(
const std::vector<InstructionAndShapeIndex> successors,
host_offload_utils::GetSuccessors(instruction_and_shape));
for (const InstructionAndShapeIndex& successor : successors) {
queue.push(successor);
}
}
validated_slices_.insert(slice);
return absl::OkStatus();
}
absl::Status HostOffloader::CreateAllocateBufferForDynamicUpdateSlice(
HloInstruction* dynamic_update_slice) {
if (dynamic_update_slices_already_allocated_.find(dynamic_update_slice) !=
dynamic_update_slices_already_allocated_.end()) {
return absl::OkStatus();
}
VLOG(2) << absl::StreamFormat(
"Creating a AllocateBuffer in host memory space for \"%s\"",
dynamic_update_slice->name());
std::queue<InstructionAndShapeIndex> queue;
queue.push(InstructionAndShapeIndex(dynamic_update_slice));
bool found_broadcast = false;
while (!queue.empty()) {
InstructionAndShapeIndex instruction_and_shape = queue.front();
queue.pop();
VLOG(2) << absl::StreamFormat("Setting %s to have host memory space",
instruction_and_shape.ToString());
SetMemorySpace(ShapeUtil::GetMutableSubshape(
instruction_and_shape.instruction->mutable_shape(),
instruction_and_shape.shape_index),
kHostMemorySpaceColor);
HloInstruction* instruction = instruction_and_shape.instruction;
if (instruction->opcode() == HloOpcode::kParameter) {
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(instruction->GetModule());
const std::vector<HloInstruction*> callers =
call_graph->GetComputationCallers(instruction->parent());
for (HloInstruction* caller : callers) {
if (caller->opcode() == HloOpcode::kWhile) {
CHECK(caller->while_body() == instruction->parent())
<< "We assume that we're starting from the while body";
HloComputation* while_condition_computation =
caller->while_condition();
CHECK(while_condition_computation->num_parameters() == 1)
<< "Expecting While to have just 1 parameter";
HloInstruction* while_condition_parameter =
while_condition_computation->parameter_instruction(0);
VLOG(2) << absl::StreamFormat("Setting %s to have host memory space",
while_condition_parameter->name());
SetMemorySpace(ShapeUtil::GetMutableSubshape(
while_condition_parameter->mutable_shape(),
instruction_and_shape.shape_index),
kHostMemorySpaceColor);
std::queue<InstructionAndShapeIndex> nested_queue;
nested_queue.push(InstructionAndShapeIndex(
while_condition_parameter, instruction_and_shape.shape_index));
while (!nested_queue.empty()) {
InstructionAndShapeIndex nested_instruction_and_shape =
nested_queue.front();
nested_queue.pop();
if (!host_offload_utils::IsValidDuringPureMemoryOffload(
nested_instruction_and_shape.instruction)) {
return absl::InvalidArgumentError(absl::StrFormat(
"Tensor which is moved to host is used by an invalid "
"instruction (\"%s\") during while condition body.",
nested_instruction_and_shape.instruction->name()));
}
SetMemorySpace(
ShapeUtil::GetMutableSubshape(
nested_instruction_and_shape.instruction->mutable_shape(),
nested_instruction_and_shape.shape_index),
kHostMemorySpaceColor);
TF_ASSIGN_OR_RETURN(
const std::vector<InstructionAndShapeIndex> successors,
host_offload_utils::GetSuccessors(
nested_instruction_and_shape));
for (const InstructionAndShapeIndex& successor : successors) {
nested_queue.push(successor);
}
}
}
}
} else if (instruction->opcode() == HloOpcode::kDynamicUpdateSlice) {
dynamic_update_slices_already_allocated_.insert(instruction);
} else if (instruction->IsCustomCall("AllocateBuffer")) {
VLOG(2) << absl::StreamFormat(
"DynamicUpdateSlice \"%s\" already writes into an AllocateBuffer "
"\"%s\"",
dynamic_update_slice->name(), instruction->name());
return absl::OkStatus();
}
const std::vector<InstructionAndShapeIndex> predecessors =
host_offload_utils::GetPredecessors(instruction_and_shape);
for (const InstructionAndShapeIndex& predecessor : predecessors) {
HloInstruction* predecessor_instruction = predecessor.instruction;
if (predecessor_instruction->opcode() == HloOpcode::kBroadcast) {
found_broadcast = true;
HloInstruction* broadcast_user = instruction_and_shape.instruction;
const auto operand_indices =
broadcast_user->OperandIndices(predecessor_instruction);
CHECK(!operand_indices.empty())
<< "We could only have the broadcast as a predecessor if it is an "
"operand of this instruction; something is wrong.";
HloInstruction* allocate_buffer =
predecessor_instruction->parent()->AddInstruction(
HloInstruction::CreateCustomCall(
predecessor_instruction->shape(), {}, "AllocateBuffer"));
VLOG(1) << absl::StreamFormat(
"Created new AllocateBuffer instruction \"%s\"",
allocate_buffer->ToString());
SetMemorySpace(allocate_buffer->mutable_shape(), kHostMemorySpaceColor);
for (int64_t index : operand_indices) {
TF_RETURN_IF_ERROR(
broadcast_user->ReplaceOperandWith(index, allocate_buffer));
}
if (predecessor_instruction->user_count() == 0) {
VLOG(3) << absl::StreamFormat(
"Broadcast \"%s\" has no remaining users; removing.",
predecessor_instruction->name());
TF_RETURN_IF_ERROR(
predecessor_instruction->parent()->RemoveInstruction(
predecessor_instruction));
}
} else {
queue.push(predecessor);
}
}
}
if (!found_broadcast) {
return absl::InvalidArgumentError(
absl::StrFormat("DynamicUpdateSlice \"%s\"'s first operand is not the "
"result of a broadcast.",
dynamic_update_slice->name()));
}
return absl::OkStatus();
}
absl::StatusOr<HloInstruction*> HostOffloader::DynamifySlice(
HloInstruction* slice) {
std::vector<HloInstruction*> start_constants;
for (int64_t start : slice->slice_starts()) {
HloInstruction* constant = slice->parent()->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(start)));
start_constants.push_back(constant);
}
std::vector<int64_t> slice_sizes;
slice_sizes.reserve(slice->slice_limits().size());
for (int i = 0; i < slice->slice_limits().size(); ++i) {
slice_sizes.push_back(slice->slice_limits()[i] - slice->slice_starts()[i]);
}
HloInstruction* new_ds =
slice->parent()->AddInstruction(HloInstruction::CreateDynamicSlice(
slice->shape(), slice->mutable_operand(0), start_constants,
slice_sizes));
TF_RETURN_IF_ERROR(slice->ReplaceAllUsesWith(new_ds));
VLOG(2) << absl::StreamFormat(
"Changed slice \"%s\" into dynamic slice \"%s\"", slice->name(),
new_ds->name());
TF_RETURN_IF_ERROR(slice->parent()->RemoveInstruction(slice));
return new_ds;
}
absl::StatusOr<bool> HostOffloader::ApplySchedulingFix(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module));
auto uses_parameter_buffer = [&](HloInstruction* hlo) {
for (const HloBuffer* buffer : alias_analysis->ComputeBuffersAt(hlo)) {
for (const HloValue* value : buffer->values()) {
for (const HloPosition& pos : value->positions()) {
if (absl::c_linear_search(hlo->parent()->parameter_instructions(),
pos.instruction)) {
return true;
}
}
}
}
return false;
};
for (HloComputation* computation :
module->MakeComputationPostOrder(execution_threads)) {
if (computation == computation->parent()->entry_computation()) {
continue;
}
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->opcode() != HloOpcode::kDynamicUpdateSlice) {
continue;
}
if (instruction->shape().layout().memory_space() !=
kHostMemorySpaceColor) {
continue;
}
HloInstruction* operand = instruction->mutable_operand(1);
if (uses_parameter_buffer(operand)) {
HloInstruction* copy =
instruction->parent()->AddInstruction(HloInstruction::CreateUnary(
operand->shape(), HloOpcode::kCopy, operand));
VLOG(5) << "Added copy " << std::quoted(copy->name())
<< " for DynamicUpdateSlice " << instruction->name()
<< "'s 1st operand " << operand->name();
TF_RETURN_IF_ERROR(instruction->ReplaceOperandWith(1, copy));
changed = true;
}
}
}
return changed;
}
namespace {
absl::Status ValidateAsyncComputationStructure(HloComputation* computation) {
for (HloInstruction* instr : computation->instructions()) {
if (instr->opcode() == HloOpcode::kParameter || instr->IsRoot()) {
continue;
}
return absl::InternalError(
absl::StrCat("Unexpected instruction found in async computation: ",
instr->ToString()));
}
return absl::OkStatus();
}
absl::StatusOr<bool> UpdateMemorySpaceForHostOffloadedOutputs(
HloInstruction* call_start,
ShapeTree<std::vector<InstructionAndShapeIndex>> host_instrs_tree) {
std::vector<InstructionAndShapeIndex> to_replace;
HloComputation* called_computation = call_start->async_wrapped_computation();
TF_RETURN_IF_ERROR(ValidateAsyncComputationStructure(called_computation));
HloInstruction* root = called_computation->root_instruction();
Shape* root_shape = root->mutable_shape();
host_instrs_tree.ForEachMutableElement([&](ShapeIndex output_index,
std::vector<
InstructionAndShapeIndex>*
instruction_and_shape_indexes)
-> void {
for (InstructionAndShapeIndex& instr_and_shape :
*instruction_and_shape_indexes) {
if (instr_and_shape.instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget)) {
to_replace.emplace_back(instr_and_shape);
continue;
}
SetMemorySpace(ShapeUtil::GetMutableSubshape(
instr_and_shape.instruction->mutable_shape(),
instr_and_shape.shape_index),
Layout::kHostMemorySpace);
}
if (!instruction_and_shape_indexes->empty()) {
SetMemorySpace(ShapeUtil::GetMutableSubshape(root_shape, output_index),
Layout::kHostMemorySpace);
}
});
bool modified = false;
for (InstructionAndShapeIndex& instr_and_shape : to_replace) {
modified = true;
HloInstruction* pred = instr_and_shape.instruction->mutable_operand(0);
TF_RETURN_IF_ERROR(instr_and_shape.instruction->ReplaceAllUsesWith(pred));
}
return modified;
}
bool ExtraCheckForValidUsageOnHostForHostOffloadedOutputs(
const Shape& entry_computation_shape,
InstructionAndShapeIndex& instruction_and_shape_index) {
HloInstruction* instruction = instruction_and_shape_index.instruction;
ShapeIndex& shape_index = instruction_and_shape_index.shape_index;
if (instruction->IsRoot() && instruction->parent()->IsEntryComputation()) {
if (ShapeUtil::GetSubshape(entry_computation_shape, shape_index)
.layout()
.memory_space() != Layout::kHostMemorySpace) {
return false;
}
}
if (instruction->opcode() == HloOpcode::kCustomCall &&
instruction->custom_call_target() !=
host_memory_offload_annotations::kMoveToHostCustomCallTarget) {
return false;
}
if (instruction->opcode() == HloOpcode::kAsyncStart ||
instruction->opcode() == HloOpcode::kAsyncDone) {
return false;
}
return true;
}
}
absl::StatusOr<bool> HostOffloader::HandleRedundantCopiesBackToHost(
const HloModule* module, HloInstruction* instruction) {
HloAsyncInstruction* call_start = Cast<HloAsyncInstruction>(instruction);
CHECK_EQ(call_start->users().size(), 1);
HloInstruction* call_done = call_start->users()[0];
const Shape& entry_computation_shape =
module->entry_computation_layout().result_layout().shape();
Shape* done_shape = call_done->mutable_shape();
ShapeTree<std::vector<InstructionAndShapeIndex>> host_instrs_tree(done_shape);
TF_RETURN_IF_ERROR(ShapeUtil::ForEachMutableLeafShapeWithStatus(
done_shape, [&](Shape* subshape, const ShapeIndex& output_shape_index) {
std::queue<InstructionAndShapeIndex> queue;
queue.push(InstructionAndShapeIndex(call_done, output_shape_index));
constexpr int64_t kShapeTupleOutputIndexInAsyncStart = 1;
std::vector<int32_t> start_shape_index_vec;
start_shape_index_vec.push_back(kShapeTupleOutputIndexInAsyncStart);
start_shape_index_vec.insert(start_shape_index_vec.end(),
output_shape_index.begin(),
output_shape_index.end());
ShapeIndex start_shape_index = {start_shape_index_vec.begin(),
start_shape_index_vec.end()};
host_instrs_tree.mutable_element(output_shape_index)
->push_back(
InstructionAndShapeIndex(call_start, start_shape_index));
host_instrs_tree.mutable_element(output_shape_index)
->push_back(
InstructionAndShapeIndex(call_done, output_shape_index));
bool host_only = true;
bool entry_compute_output = false;
while (!queue.empty() && host_only) {
InstructionAndShapeIndex instruction_and_shape_index = queue.front();
queue.pop();
for (HloInstruction* user :
instruction_and_shape_index.instruction->users()) {
if (user->opcode() == HloOpcode::kAsyncStart) {
host_only = false;
break;
}
}
TF_ASSIGN_OR_RETURN(
std::vector<InstructionAndShapeIndex> successors,
host_offload_utils::GetSuccessors(InstructionAndShapeIndex(
instruction_and_shape_index.instruction,
instruction_and_shape_index.shape_index)));
for (InstructionAndShapeIndex& successor : successors) {
if (!host_offload_utils::IsValidDuringPureMemoryOffload(
successor.instruction) ||
!ExtraCheckForValidUsageOnHostForHostOffloadedOutputs(
entry_computation_shape, successor)) {
host_only = false;
break;
}
if (successor.instruction->IsRoot() &&
successor.instruction->parent()->IsEntryComputation()) {
entry_compute_output = true;
}
queue.push(successor);
host_instrs_tree.mutable_element(output_shape_index)
->emplace_back(successor);
}
}
if (!host_only || !entry_compute_output) {
host_instrs_tree.mutable_element(output_shape_index)->clear();
}
return absl::OkStatus();
}));
return UpdateMemorySpaceForHostOffloadedOutputs(call_start, host_instrs_tree);
}
absl::StatusOr<bool> HostOffloader::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
bool changed_in_loop;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (host_offload_utils::IsHostAsyncStart(instruction)) {
TF_ASSIGN_OR_RETURN(changed_in_loop, HandleRedundantCopiesBackToHost(
module, instruction));
changed = changed || changed_in_loop;
}
}
}
TF_ASSIGN_OR_RETURN(const bool input_streaming_changed_module,
HandleInputStreaming(module->entry_computation()));
changed = changed || input_streaming_changed_module;
do {
changed_in_loop = false;
std::vector<HloComputation*> post_order_computations =
module->MakeComputationPostOrder(execution_threads);
for (auto it = post_order_computations.rbegin();
it != post_order_computations.rend(); ++it) {
HloComputation* computation = *it;
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget)) {
TF_ASSIGN_OR_RETURN(changed_in_loop,
HandleMoveToHostCustomCall(instruction));
if (changed_in_loop) {
changed = true;
break;
}
}
}
if (changed_in_loop) {
break;
}
}
} while (changed_in_loop);
for (HloComputation* computation :
module->MakeComputationPostOrder(execution_threads)) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
TF_ASSIGN_OR_RETURN(bool result,
HandleMoveToDeviceCustomCall(instruction));
changed = changed || result;
}
}
}
TF_ASSIGN_OR_RETURN(bool applied_scheduling_fix,
ApplySchedulingFix(module, execution_threads));
changed = changed || applied_scheduling_fix;
HloCSE cse(true);
TF_ASSIGN_OR_RETURN(bool cse_changed, cse.Run(module, execution_threads));
changed = changed || cse_changed;
return changed;
}
} | #include "xla/service/host_offloader.h"
#include <cstdint>
#include <memory>
#include <stack>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/service/host_offload_legalize.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace m = ::xla::match;
namespace xla {
namespace {
class HostOffloaderTest : public HloTestBase {
protected:
absl::StatusOr<bool> RunHostOffloader(HloModule* module,
bool after_layout = false) {
TF_EXPECT_OK(verifier().Run(module).status());
if (module->has_schedule()) {
return absl::InternalError("Expected a non-scheduled module");
}
bool changed = false;
HostOffloadLegalize host_offload_legalize(Layout::kHostMemorySpace,
after_layout);
TF_ASSIGN_OR_RETURN(bool legal_changed, host_offload_legalize.Run(module));
changed |= legal_changed;
HostOffloader host_offloader(Layout::kHostMemorySpace);
TF_ASSIGN_OR_RETURN(bool offload_changed, host_offloader.Run(module));
changed |= offload_changed;
return changed;
}
void TestShapeHasMemorySpace(const Shape& shape, int64_t memory_space) {
ASSERT_TRUE(shape.has_layout());
EXPECT_EQ(shape.layout().memory_space(), memory_space);
}
bool HaveRemainingOffloadAnnotations(const HloModule* module) {
for (const HloComputation* computation : module->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->IsCustomCall(
{host_memory_offload_annotations::kMoveToHostCustomCallTarget,
host_memory_offload_annotations::
kMoveToDeviceCustomCallTarget})) {
return true;
}
}
}
return false;
}
};
absl::flat_hash_set<const HloInstruction*>
getInstructionsWithOpcodeFromComputation(const HloComputation* computation,
HloOpcode target_opcode) {
absl::flat_hash_set<const HloInstruction*> instructions;
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == target_opcode) {
instructions.emplace(instruction);
}
}
return instructions;
}
TEST_F(HostOffloaderTest, BasicDusDs) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* allocate_buffer;
HloInstruction* dynamic_update_slice;
HloInstruction* dynamic_slice;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice(
&dynamic_slice,
m::DynamicUpdateSlice(
&dynamic_update_slice,
m::CustomCall(&allocate_buffer, {"AllocateBuffer"}),
m::Parameter(¶m, 0), m::Op(), m::Op(), m::Op()),
m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, DusFirstOperandIsNotFromABroadcast) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
param_2 = f32[2,2048,2048] parameter(2)
constant_s32_0 = s32[] constant(0)
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(param_2, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const absl::StatusOr<bool> result = RunHostOffloader(module.get());
EXPECT_FALSE(result.ok());
}
TEST_F(HostOffloaderTest, DusDsWithTupleAfterBroadcast) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
tuple = (f32[2,2048,2048]) tuple(broadcast)
gte = f32[2,2048,2048] get-tuple-element(tuple), index=0
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(gte, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* allocate_buffer;
HloInstruction* tuple;
HloInstruction* gte;
HloInstruction* dynamic_update_slice;
HloInstruction* dynamic_slice;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice(
&dynamic_slice,
m::DynamicUpdateSlice(
&dynamic_update_slice,
m::GetTupleElement(
>e,
m::Tuple(&tuple, m::CustomCall(&allocate_buffer,
{"AllocateBuffer"})),
0),
m::Parameter(¶m, 0), m::Op(), m::Op(), m::Op()),
m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, DusWithoutDs) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
ROOT load_custom_call = f32[2,2048,2048] custom-call(dynamic_update_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* allocate_buffer;
HloInstruction* dynamic_update_slice;
HloInstruction* copy;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©, m::DynamicUpdateSlice(
&dynamic_update_slice,
m::CustomCall(&allocate_buffer, {"AllocateBuffer"}),
m::Parameter(¶m, 0), m::Op(), m::Op(), m::Op()))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, DusAndNoCopyFromSameCustomCall) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
tuple = (f32[1,2048,2048]) tuple(offload_custom_call)
gte = f32[1,2048,2048] get-tuple-element(tuple), index=0
load_custom_call_0 = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
load_custom_call_1 = f32[1,2048,2048] custom-call(gte), custom_call_target="MoveToDevice"
ROOT tuple_1 = (f32[1,2048,2048], f32[1,2048,2048]) tuple(load_custom_call_0, load_custom_call_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param_match_1;
HloInstruction* param_match_2;
HloInstruction* allocate_buffer;
HloInstruction* dynamic_update_slice;
HloInstruction* dynamic_slice;
HloInstruction* copy_to_host;
HloInstruction* tuple_0;
HloInstruction* gte;
HloInstruction* copy_to_device;
HloInstruction* tuple_1;
const auto dynamic_slice_pattern = m::DynamicSlice(
&dynamic_slice,
m::DynamicUpdateSlice(&dynamic_update_slice,
m::CustomCall(&allocate_buffer, {"AllocateBuffer"}),
m::Parameter(¶m_match_1, 0), m::Op(), m::Op(),
m::Op()),
m::Op(), m::Op(), m::Op());
const auto copy_pattern = m::Copy(
©_to_device,
m::GetTupleElement(
>e,
m::Tuple(&tuple_0,
m::Copy(©_to_host, m::Parameter(¶m_match_2, 0))),
0));
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(&tuple_1, dynamic_slice_pattern, copy_pattern)));
EXPECT_EQ(param_match_1, param_match_2);
TestShapeHasMemorySpace(param_match_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_0->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_1->shape(), {0}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_1->shape(), {1}),
Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, BasicAsyncCustomCallWithAliasing) {
const std::string& hlo_string = R"(
HloModule m, input_output_alias={{}: (0, {}, must-alias)},
entry_computation_layout={(f32[4096]{0:T(128)S(5)})->f32[4096]{0:T(128)S(5)}}
ENTRY %main (a: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%async-start = ((f32[4096]{0}), f32[4096]{0}, u32[]) custom-call-start(%a),
custom_call_target="Foo",
output_to_operand_aliasing={{}: (0, {})}
ROOT %async-done = f32[4096]{0} custom-call-done(%async-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
TestShapeHasMemorySpace(async_done->shape(), Layout::kHostMemorySpace);
}
TEST_F(HostOffloaderTest, ParameterStreamingWithXposeCopyFeedingIntoWhile) {
const std::string& hlo_string = R"(
HloModule jit__prefill_impl, entry_computation_layout={(bf16[2,16,16]{2,1,0:T(8,128)(2,1)S(5)})->bf16[2,16,16]{1,2,0:T(8,128)(2,1)}}
while_condition {
condition_param = (s32[], bf16[2,16,16]{1,2,0:T(8,128)(2,1)}, bf16[2,16,16]{1,2,0:T(8,128)(2,1)}) parameter(0)
condition_current_iteration_index = s32[] get-tuple-element(condition_param), index=0
condition_iteration_count = s32[] constant(16)
ROOT condition_result = pred[] compare(condition_current_iteration_index, condition_iteration_count), direction=LT
}
while_body {
input_tuple.0 = (s32[], bf16[2,16,16]{1,2,0:T(8,128)(2,1)}, bf16[2,16,16]{1,2,0:T(8,128)(2,1)}) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
orig_data = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} get-tuple-element(input_tuple.0), index=1
custom-call.0 = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} custom-call(orig_data), custom_call_target="MoveToDevice"
sum = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} get-tuple-element(input_tuple.0), index=2
sum.1 = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} add(custom-call.0, sum)
constant_1 = s32[] constant(1)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1)
ROOT tuple_result.0 = (s32[], bf16[2,16,16]{1,2,0:T(8,128)(2,1)}, bf16[2,16,16]{1,2,0:T(8,128)(2,1)}) tuple(incremented_index.0, custom-call.0, sum.1)
}
ENTRY main {
param.0 = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} parameter(0)
copy = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} copy(param.0)
constant_0 = s32[] constant(0)
constant_0.0 = bf16[] constant(0.0)
broadcast = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} broadcast(constant_0.0), dimensions={}
tuple_for_while = (s32[], bf16[2,16,16]{1,2,0:T(8,128)(2,1)}, bf16[2,16,16]{1,2,0:T(8,128)(2,1)}) tuple(constant_0, copy, broadcast)
while = (s32[], bf16[2,16,16]{1,2,0:T(8,128)(2,1)}, bf16[2,16,16]{1,2,0:T(8,128)(2,1)}) while(tuple_for_while), condition=while_condition, body=while_body
ROOT gte = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} get-tuple-element(while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, RunHostOffloader(module.get(), true));
EXPECT_TRUE(changed);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
HloVerifier verifier(true,
true);
TF_EXPECT_OK(verifier.Run(module.get()).status());
VLOG(1) << "module after: " << module->ToString();
}
TEST_F(HostOffloaderTest, ParameterStreamingFeedingIntoWhile) {
const std::string& hlo_string = R"(
HloModule jit__prefill_impl, entry_computation_layout={(bf16[2,16,16]{2,1,0:T(8,128)(2,1)S(5)})->bf16[2,16,16]{2,1,0:T(8,128)(2,1)}}
while_condition {
condition_param = (s32[], bf16[2,16,16]{2,1,0:T(8,128)(2,1)}, bf16[2,16,16]{2,1,0:T(8,128)(2,1)}) parameter(0)
condition_current_iteration_index = s32[] get-tuple-element(condition_param), index=0
condition_iteration_count = s32[] constant(16)
ROOT condition_result = pred[] compare(condition_current_iteration_index, condition_iteration_count), direction=LT
}
while_body {
input_tuple.0 = (s32[], bf16[2,16,16]{2,1,0:T(8,128)(2,1)}, bf16[2,16,16]{2,1,0:T(8,128)(2,1)}) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
orig_data = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} get-tuple-element(input_tuple.0), index=1
custom-call.0 = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} custom-call(orig_data), custom_call_target="MoveToDevice"
sum = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} get-tuple-element(input_tuple.0), index=2
sum.1 = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} add(custom-call.0, sum)
constant_1 = s32[] constant(1)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1)
ROOT tuple_result.0 = (s32[], bf16[2,16,16]{2,1,0:T(8,128)(2,1)}, bf16[2,16,16]{2,1,0:T(8,128)(2,1)}) tuple(incremented_index.0, custom-call.0, sum.1)
}
ENTRY main {
param.0 = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} parameter(0)
constant_0 = s32[] constant(0)
constant_0.0 = bf16[] constant(0.0)
broadcast = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} broadcast(constant_0.0), dimensions={}
tuple_for_while = (s32[], bf16[2,16,16]{2,1,0:T(8,128)(2,1)}, bf16[2,16,16]{2,1,0:T(8,128)(2,1)}) tuple(constant_0, param.0, broadcast)
while = (s32[], bf16[2,16,16]{2,1,0:T(8,128)(2,1)}, bf16[2,16,16]{2,1,0:T(8,128)(2,1)}) while(tuple_for_while), condition=while_condition, body=while_body
ROOT gte = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} get-tuple-element(while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, RunHostOffloader(module.get(), true));
EXPECT_TRUE(changed);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
HloVerifier verifier(true,
true);
TF_EXPECT_OK(verifier.Run(module.get()).status());
VLOG(1) << "module after: " << module->ToString();
}
TEST_F(HostOffloaderTest, ParameterStreamingInScanLoop) {
const std::string& hlo_string = R"(
HloModule m,
entry_computation_layout={(f32[8,2]{0,1:T(2,128)S(5)})->(f32[]{:T(256)}, f32[8,2]{0,1:T(2,128)})},
allow_spmd_sharding_propagation_to_output={true,true}
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
while_body {
arg_tuple.8 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[8,2]{0,1:T(2,128)}) parameter(0)
get-tuple-element.9 = s32[]{:T(256)} get-tuple-element(arg_tuple.8), index=0
constant.12 = s32[]{:T(256)} constant(1)
add.29 = s32[]{:T(256)} add(get-tuple-element.9, constant.12)
get-tuple-element.10 = f32[8,2]{0,1:T(2,128)} get-tuple-element(arg_tuple.8), index=1
get-tuple-element.11 = f32[8,2]{0,1:T(2,128)} get-tuple-element(arg_tuple.8), index=2
constant.16 = s32[]{:T(256)} constant(0)
dynamic-slice.20 = f32[1,2]{0,1:T(2,128)} dynamic-slice(get-tuple-element.11, get-tuple-element.9, constant.16), dynamic_slice_sizes={1,2}
constant.1 = f32[] constant(-0)
reduce = f32[2]{0:T(256)} reduce(dynamic-slice.20, constant.1), dimensions={0}, to_apply=add
custom-call = f32[2]{0:T(256)} custom-call(reduce), custom_call_target="MoveToDevice"
constant.13 = f32[]{:T(256)} constant(1)
broadcast.14 = f32[2]{0:T(256)} broadcast(constant.13), dimensions={}
add.23 = f32[2]{0:T(256)} add(custom-call, broadcast.14)
reshape.24 = f32[1,2]{0,1:T(2,128)} reshape(add.23)
dynamic-update-slice.28 = f32[8,2]{0,1:T(2,128)} dynamic-update-slice(get-tuple-element.10, reshape.24, get-tuple-element.9, constant.16)
ROOT tuple.30 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[8,2]{0,1:T(2,128)}) tuple(add.29, dynamic-update-slice.28, get-tuple-element.11)
}
condition {
arg_tuple.32 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[8,2]{0,1:T(2,128)}) parameter(0)
get-tuple-element.33 = s32[]{:T(256)} get-tuple-element(arg_tuple.32), index=0
constant.36 = s32[]{:T(256)} constant(8)
ROOT compare.37 = pred[]{:T(1024)} compare(get-tuple-element.33, constant.36), direction=LT
}
ENTRY e {
constant.3 = f32[]{:T(256)} constant(1)
constant.2 = s32[]{:T(256)} constant(0)
constant.4 = f32[]{:T(256)} constant(0)
broadcast.5 = f32[8,2]{0,1:T(2,128)} broadcast(constant.4), dimensions={}
Arg_0.1 = f32[8,2]{0,1:T(2,128)} parameter(0), sharding={replicated}
tuple.6 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[8,2]{0,1:T(2,128)}) tuple(constant.2, broadcast.5, Arg_0.1)
while.38 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[8,2]{0,1:T(2,128)}) while(tuple.6), condition=condition, body=while_body
get-tuple-element.40 = f32[8,2]{0,1:T(2,128)} get-tuple-element(while.38), index=1
ROOT tuple.42 = (f32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}) tuple(constant.3, get-tuple-element.40)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, RunHostOffloader(module.get(), true));
EXPECT_TRUE(changed);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
HloVerifier verifier(true,
true);
TF_EXPECT_OK(verifier.Run(module.get()).status());
}
TEST_F(HostOffloaderTest, OutputStreamingInScanLoop) {
const std::string& hlo_string = R"(
HloModule m,
entry_computation_layout={(f32[4,1]{0,1:T(2,128)})->(f32[]{:T(256)}, f32[8,2]{0,1:T(2,128)S(5)})},
allow_spmd_sharding_propagation_to_output={true,true}
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
while_body {
param.1 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[4,1]{0,1:T(2,128)}) parameter(0)
get-tuple-element.1 = s32[]{:T(256)} get-tuple-element(param.1), index=0
constant.9 = s32[]{:T(256)} constant(1)
add.1 = s32[]{:T(256)} add(get-tuple-element.1, constant.9)
get-tuple-element.2 = f32[8,2]{0,1:T(2,128)} get-tuple-element(param.1), index=1
get-tuple-element.3 = f32[4,1]{0,1:T(2,128)} get-tuple-element(param.1), index=2
bitcast = f32[1,4,1]{1,2,0:T(2,128)} bitcast(get-tuple-element.3)
all-gather.2 = f32[4,4,1]{1,2,0:T(2,128)} all-gather(bitcast), channel_id=2, replica_groups={{0,1,2,3}}, dimensions={0}, use_global_device_ids=true
constant.20 = f32[] constant(-0)
reduce = f32[4,4]{1,0:T(4,128)} reduce(all-gather.2, constant.20), dimensions={2}, to_apply=add
bitcast.1 = f32[2,4,2,1]{1,2,0,3:T(2,128)} bitcast(reduce)
copy.1 = f32[2,4,2,1]{1,0,2,3:T(2,128)} copy(bitcast.1)
reshape.6 = f32[8,2]{0,1:T(2,128)} reshape(copy.1)
constant.10 = s32[]{:T(256)} constant(0)
dynamic-slice.0 = f32[1,2]{0,1:T(2,128)} dynamic-slice(reshape.6, get-tuple-element.1, constant.10), dynamic_slice_sizes={1,2}
constant.11 = f32[]{:T(256)} constant(1)
broadcast.4 = f32[1,2]{0,1:T(2,128)} broadcast(constant.11), dimensions={}
add.2 = f32[1,2]{0,1:T(2,128)} add(dynamic-slice.0, broadcast.4)
reduce.1 = f32[2]{0:T(256)} reduce(add.2, constant.20), dimensions={0}, to_apply=add
custom-call.1 = f32[2]{0:T(256)} custom-call(reduce.1), custom_call_target="MoveToHost"
reshape.8 = f32[1,2]{0,1:T(2,128)} reshape(custom-call.1)
dynamic-update-slice.0 = f32[8,2]{0,1:T(2,128)} dynamic-update-slice(get-tuple-element.2, reshape.8, get-tuple-element.1, constant.10)
ROOT tuple = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[4,1]{0,1:T(2,128)}) tuple(add.1, dynamic-update-slice.0, get-tuple-element.3)
}
condition {
param = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[4,1]{0,1:T(2,128)}) parameter(0)
get-tuple-element = s32[]{:T(256)} get-tuple-element(param), index=0
constant.8 = s32[]{:T(256)} constant(8)
ROOT compare.0 = pred[]{:T(1024)} compare(get-tuple-element, constant.8), direction=LT
}
ENTRY e {
constant.17 = f32[]{:T(256)} constant(1)
constant.18 = s32[]{:T(256)} constant(0)
constant.19 = f32[]{:T(256)} constant(0)
broadcast.6 = f32[8,2]{0,1:T(2,128)} broadcast(constant.19), dimensions={}
param.2 = f32[4,1]{0,1:T(2,128)} parameter(0), sharding={devices=[2,2]<=[4]}
tuple.1 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[4,1]{0,1:T(2,128)}) tuple(constant.18, broadcast.6, param.2)
while = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[4,1]{0,1:T(2,128)}) while(tuple.1), condition=condition, body=while_body
get-tuple-element.4 = f32[8,2]{0,1:T(2,128)} get-tuple-element(while), index=1
ROOT tuple.2 = (f32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}) tuple(constant.17, get-tuple-element.4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, RunHostOffloader(module.get(), true));
EXPECT_TRUE(changed);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
HloVerifier verifier(true,
true);
TF_EXPECT_OK(verifier.Run(module.get()).status());
}
TEST_F(HostOffloaderTest, BasicNoCopy) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[2048] parameter(0)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
ROOT load_custom_call = f32[2048] custom-call(offload_custom_call), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(©_to_device,
m::Copy(©_to_host, m::Parameter(¶m, 0)))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyThroughTuple) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[2048] parameter(0)
other_param = f32[2048] parameter(1)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
tuple = (f32[2048], f32[2048]) tuple(offload_custom_call, other_param)
gte_0 = f32[2048] get-tuple-element(tuple), index=0
gte_1 = f32[2048] get-tuple-element(tuple), index=1
ROOT load_custom_call = f32[2048] custom-call(gte_0), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* tuple;
HloInstruction* gte;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©_to_device,
m::GetTupleElement(
>e,
m::Tuple(&tuple, m::Copy(©_to_host, m::Parameter(¶m, 0)),
m::Op()),
0))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(gte->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyThroughNestedTuple) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[2048] parameter(0)
other_param_0 = f32[2048] parameter(1)
other_param_1 = f32[2048] parameter(2)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
tuple_0 = (f32[2048], f32[2048]) tuple(offload_custom_call, other_param_0)
tuple_1 = ((f32[2048], f32[2048]), f32[2048]) tuple(tuple_0, other_param_1)
gte_0 = (f32[2048], f32[2048]) get-tuple-element(tuple_1), index=0
gte_1 = f32[2048] get-tuple-element(tuple_1), index=1
gte_2 = f32[2048] get-tuple-element(gte_0), index=0
gte_3 = f32[2048] get-tuple-element(gte_0), index=1
ROOT load_custom_call = f32[2048] custom-call(gte_2), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* tuple_0;
HloInstruction* gte_0;
HloInstruction* tuple_1;
HloInstruction* gte_1;
HloInstruction* copy_to_device;
const auto copy_param_pattern =
m::Copy(©_to_host, m::Parameter(¶m, 0));
const auto tuple_of_tuple_pattern = m::Tuple(
&tuple_1, m::Tuple(&tuple_0, copy_param_pattern, m::Op()), m::Op());
const auto gte_of_gte_pattern = m::GetTupleElement(
>e_1, m::GetTupleElement(>e_0, tuple_of_tuple_pattern, 0), 0);
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(©_to_device, gte_of_gte_pattern)));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_0->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_0->shape(), {1}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(gte_0->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(gte_0->shape(), {1}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_1->shape(), {0, 0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_1->shape(), {0, 1}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_1->shape(), {1}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyThroughComputation) {
const std::string& hlo_string = R"(
HloModule my_module
other_computation {
ROOT param = f32[2048] parameter(0)
}
ENTRY main {
data_param = f32[2048] parameter(0)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
call = f32[2048] call(offload_custom_call), to_apply=other_computation
ROOT load_custom_call = f32[2048] custom-call(call), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* call;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©_to_device,
m::Call(&call, m::Copy(©_to_host, m::Parameter(¶m, 0))))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(call->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
ASSERT_THAT(call->called_computations(), ::testing::SizeIs(1));
HloComputation* called_computation = call->called_computations()[0];
HloInstruction* called_computation_param;
ASSERT_THAT(called_computation->root_instruction(),
GmockMatch(m::Parameter(&called_computation_param, 0)));
TestShapeHasMemorySpace(called_computation_param->shape(),
Layout::kHostMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyThroughComputationAndTuple) {
const std::string& hlo_string = R"(
HloModule my_module
other_computation {
param_0 = f32[2048] parameter(0)
param_1 = f32[2048] parameter(1)
ROOT tuple = (f32[2048], f32[2048]) tuple(param_0, param_1)
}
ENTRY main {
data_param = f32[2048] parameter(0)
other_param = f32[2048] parameter(1)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
call = (f32[2048], f32[2048]) call(offload_custom_call, other_param), to_apply=other_computation
gte_0 = f32[2048] get-tuple-element(call), index=0
gte_1 = f32[2048] get-tuple-element(call), index=1
ROOT load_custom_call = f32[2048] custom-call(gte_0), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* call;
HloInstruction* gte;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©_to_device,
m::GetTupleElement(
>e,
m::Call(&call, m::Copy(©_to_host, m::Parameter(¶m, 0)),
m::Op())))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(call->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(call->shape(), {1}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(gte->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_THAT(call->called_computations(), ::testing::SizeIs(1));
HloComputation* called_computation = call->called_computations()[0];
HloInstruction* called_computation_param_0;
HloInstruction* called_computation_param_1;
HloInstruction* tuple;
ASSERT_THAT(
called_computation->root_instruction(),
GmockMatch(m::Tuple(&tuple, m::Parameter(&called_computation_param_0, 0),
m::Parameter(&called_computation_param_1, 1))));
TestShapeHasMemorySpace(called_computation_param_0->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(called_computation_param_1->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyThroughWhile) {
const std::string& hlo_string = R"(
HloModule my_module
while_body {
ROOT param = f32[2048] parameter(0)
}
while_condition {
param = f32[2048] parameter(0)
constant_0 = s32[] constant(0)
constant_1 = s32[] constant(1)
ROOT pred_result = pred[] compare(constant_1, constant_0), direction=LT
}
ENTRY main {
data_param = f32[2048] parameter(0)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
while = f32[2048] while(offload_custom_call), condition=while_condition, body=while_body
ROOT load_custom_call = f32[2048] custom-call(while), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* while_instr;
HloInstruction* copy_to_device;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©_to_device,
m::While(&while_instr,
m::Copy(©_to_host, m::Parameter(¶m, 0))))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(while_instr->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
HloComputation* while_condition = while_instr->while_condition();
ASSERT_THAT(while_condition->parameter_instructions(), ::testing::SizeIs(1));
TestShapeHasMemorySpace(while_condition->parameter_instruction(0)->shape(),
Layout::kHostMemorySpace);
HloInstruction* while_body_param;
HloComputation* while_body = while_instr->while_body();
ASSERT_THAT(while_body->root_instruction(),
GmockMatch(m::Parameter(&while_body_param, 0)));
TestShapeHasMemorySpace(while_body_param->shape(), Layout::kHostMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyWithOptBarrier) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[2048] parameter(0)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
tuple = (f32[2048]) tuple(offload_custom_call)
opt_barrier = (f32[2048]) opt-barrier(tuple)
get_tuple_element = f32[2048] get-tuple-element(opt_barrier), index=0
ROOT load_custom_call = f32[2048] custom-call(get_tuple_element), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* tuple;
HloInstruction* opt_barrier;
HloInstruction* gte;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©_to_device,
m::GetTupleElement(
>e, m::OptimizationBarrier(
&opt_barrier,
m::Tuple(&tuple, m::Copy(©_to_host,
m::Parameter(¶m, 0))))))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(opt_barrier->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyMultipleToDevice) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
constant = f32[] constant(0)
custom_call_0 = f32[] custom-call(constant), custom_call_target="MoveToHost"
tuple_0 = (f32[], f32[]) tuple(custom_call_0, custom_call_0)
opt_barrier = (f32[], f32[]) opt-barrier(tuple_0)
gte_0 = f32[] get-tuple-element(opt_barrier), index=0
custom_call_1 = f32[] custom-call(gte_0), custom_call_target="MoveToDevice"
gte_1 = f32[] get-tuple-element(opt_barrier), index=1
custom_call_2 = f32[] custom-call(gte_1), custom_call_target="MoveToDevice"
ROOT tuple_1 = (f32[], f32[]) tuple(custom_call_1, custom_call_2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* constant;
HloInstruction* copy_to_host_1;
HloInstruction* copy_to_host_2;
HloInstruction* tuple_1;
HloInstruction* opt_barrier;
HloInstruction* gte_1;
HloInstruction* copy_to_device_1;
HloInstruction* gte_2;
HloInstruction* copy_to_device_2;
HloInstruction* tuple_2;
const auto constant_pattern = m::ConstantScalar(&constant, 0);
const auto opt_barrier_pattern = m::OptimizationBarrier(
&opt_barrier,
m::Tuple(&tuple_1, m::Copy(©_to_host_1, constant_pattern),
m::Copy(©_to_host_2, constant_pattern)));
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
&tuple_2,
m::Copy(©_to_device_1,
m::GetTupleElement(>e_1, opt_barrier_pattern)),
m::Copy(©_to_device_2,
m::GetTupleElement(>e_2, opt_barrier_pattern)))));
TestShapeHasMemorySpace(constant->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_host_2->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_1->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_1->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(opt_barrier->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(opt_barrier->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device_1->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(gte_2->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device_2->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_2->shape(), {0}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_2->shape(), {1}),
Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyWithOptBarrierMoreElaborate) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16]{0})->f32[16]{0}}
ENTRY main.24 {
Arg_0.1 = f32[16]{0} parameter(0), sharding={devices=[2]<=[2]}
cosine.4 = f32[16]{0} cosine(Arg_0.1)
custom-call.5 = f32[16]{0} custom-call(cosine.4), custom_call_target="MoveToHost"
sine.3 = f32[16]{0} sine(Arg_0.1)
cosine.7 = f32[16]{0} cosine(sine.3)
custom-call.8 = f32[16]{0} custom-call(cosine.7), custom_call_target="MoveToHost"
sine.6 = f32[16]{0} sine(sine.3)
cosine.9 = f32[16]{0} cosine(sine.6)
custom-call.10 = f32[16]{0} custom-call(cosine.9), custom_call_target="MoveToHost"
constant.2 = f32[] constant(1)
tuple.11 = (f32[16]{0}, f32[16]{0}, f32[16]{0}, f32[]) tuple(custom-call.5, custom-call.8, custom-call.10, constant.2)
opt-barrier.12 = (f32[16]{0}, f32[16]{0}, f32[16]{0}, f32[]) opt-barrier(tuple.11)
get-tuple-element.16 = f32[] get-tuple-element(opt-barrier.12), index=3
broadcast.20 = f32[16]{0} broadcast(get-tuple-element.16), dimensions={}
get-tuple-element.15 = f32[16]{0} get-tuple-element(opt-barrier.12), index=2
custom-call.19 = f32[16]{0} custom-call(get-tuple-element.15), custom_call_target="MoveToDevice"
multiply.21 = f32[16]{0} multiply(broadcast.20, custom-call.19)
get-tuple-element.14 = f32[16]{0} get-tuple-element(opt-barrier.12), index=1
custom-call.18 = f32[16]{0} custom-call(get-tuple-element.14), custom_call_target="MoveToDevice"
multiply.22 = f32[16]{0} multiply(multiply.21, custom-call.18)
get-tuple-element.13 = f32[16]{0} get-tuple-element(opt-barrier.12), index=0
custom-call.17 = f32[16]{0} custom-call(get-tuple-element.13), custom_call_target="MoveToDevice"
ROOT multiply.23 = f32[16]{0} multiply(multiply.22, custom-call.17)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* constant;
HloInstruction* sine_0;
HloInstruction* sine_1;
HloInstruction* cosine_0;
HloInstruction* cosine_1;
HloInstruction* cosine_2;
HloInstruction* copy_to_host_0;
HloInstruction* copy_to_host_1;
HloInstruction* copy_to_host_2;
HloInstruction* tuple;
HloInstruction* opt_barrier;
HloInstruction* gte_0;
HloInstruction* gte_1;
HloInstruction* gte_2;
HloInstruction* gte_3;
HloInstruction* broadcast;
HloInstruction* copy_to_device_0;
HloInstruction* copy_to_device_1;
HloInstruction* copy_to_device_2;
HloInstruction* multiply_0;
HloInstruction* multiply_1;
HloInstruction* multiply_2;
auto parameter_matcher = m::Parameter(¶m, 0);
auto first_sine_matcher = m::Op(&sine_0)
.WithOpcode(xla::HloOpcode::kSin)
.WithOperand(0, parameter_matcher);
auto opt_barrier_matcher = m::OptimizationBarrier(
&opt_barrier,
m::Tuple(
&tuple,
m::Copy(©_to_host_0, m::Op(&cosine_0)
.WithOpcode(xla::HloOpcode::kCos)
.WithOperand(0, parameter_matcher)),
m::Copy(©_to_host_1, m::Op(&cosine_1)
.WithOpcode(xla::HloOpcode::kCos)
.WithOperand(0, first_sine_matcher)),
m::Copy(©_to_host_2,
m::Op(&cosine_2)
.WithOpcode(xla::HloOpcode::kCos)
.WithOperand(0, m::Op(&sine_1)
.WithOpcode(xla::HloOpcode::kSin)
.WithOperand(0, first_sine_matcher))),
m::Constant(&constant)));
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Multiply(
&multiply_0,
m::Multiply(
&multiply_1,
m::Multiply(
&multiply_2,
m::Broadcast(&broadcast, m::GetTupleElement(
>e_3, opt_barrier_matcher, 3)),
m::Copy(©_to_device_2,
m::GetTupleElement(>e_2, opt_barrier_matcher, 2))),
m::Copy(©_to_device_1,
m::GetTupleElement(>e_1, opt_barrier_matcher, 1))),
m::Copy(©_to_device_0,
m::GetTupleElement(>e_0, opt_barrier_matcher, 0)))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(constant->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(sine_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(sine_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(cosine_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(cosine_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(cosine_2->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_host_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_host_2->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {3}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(opt_barrier->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(opt_barrier->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(opt_barrier->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(opt_barrier->shape(), {3}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_2->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_3->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_device_0->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_device_1->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_device_2->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_2->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyMultipleUsers) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[2048] parameter(0)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
sine = f32[2048] sine(data_param)
load_custom_call = f32[2048] custom-call(offload_custom_call), custom_call_target="MoveToDevice"
ROOT add = f32[2048] add(sine, load_custom_call)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* sine;
HloInstruction* copy_to_host;
HloInstruction* copy_to_device;
HloInstruction* add;
const auto param_pattern = m::Parameter(¶m, 0);
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Add(
&add, m::Sin(&sine, param_pattern),
m::Copy(©_to_device, m::Copy(©_to_host, param_pattern)))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(sine->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(add->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, BasicDusDsWithMultipleBroadcastUsers) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
tanh = f32[2,2048,2048] tanh(broadcast)
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* allocate_buffer;
HloInstruction* dynamic_update_slice;
HloInstruction* dynamic_slice;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice(
&dynamic_slice,
m::DynamicUpdateSlice(
&dynamic_update_slice,
m::CustomCall(&allocate_buffer, {"AllocateBuffer"}),
m::Parameter(¶m, 0), m::Op(), m::Op(), m::Op()),
m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
HloInstruction* tanh = nullptr;
for (HloInstruction* instruction :
module->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kTanh) {
tanh = instruction;
break;
}
}
ASSERT_NE(tanh, nullptr);
HloInstruction* broadcast;
EXPECT_THAT(tanh, GmockMatch(m::Tanh(m::Broadcast(&broadcast))));
TestShapeHasMemorySpace(broadcast->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(tanh->shape(), Layout::kDefaultMemorySpace);
}
TEST_F(HostOffloaderTest, BasicDusDsBitcastBeforeDus) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
offload_custom_call = f32[2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
bitcast = f32[1,2048,2048] bitcast(offload_custom_call)
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, bitcast, index_param, constant_s32_0, constant_s32_0)
dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* bitcast;
HloInstruction* allocate_buffer;
HloInstruction* dynamic_update_slice;
HloInstruction* dynamic_slice;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice(
&dynamic_slice,
m::DynamicUpdateSlice(
&dynamic_update_slice,
m::CustomCall(&allocate_buffer, {"AllocateBuffer"}),
m::Bitcast(&bitcast, m::Parameter(¶m, 0)), m::Op(),
m::Op(), m::Op()),
m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(bitcast->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, BasicDusDsDusAnnotationOnWrongSide) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, data_param, index_param, constant_s32_0, constant_s32_0)
offload_custom_call = f32[1,2048,2048] custom-call(dynamic_update_slice), custom_call_target="MoveToHost"
dynamic_slice = f32[1,2048,2048] dynamic-slice(offload_custom_call, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
absl::StatusOr<bool> statusOrChanged = RunHostOffloader(module.get());
ASSERT_FALSE(statusOrChanged.ok());
}
TEST_F(HostOffloaderTest, BasicDusDsDsAnnotationOnWrongSide) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
load_custom_call = f32[2,2048,2048] custom-call(dynamic_update_slice), custom_call_target="MoveToDevice"
ROOT dynamic_slice = f32[1,2048,2048] dynamic-slice(load_custom_call, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* allocate_buffer;
HloInstruction* dynamic_update_slice;
HloInstruction* copy;
HloInstruction* dynamic_slice;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice(
&dynamic_slice,
m::Copy(©,
m::DynamicUpdateSlice(
&dynamic_update_slice,
m::CustomCall(&allocate_buffer, {"AllocateBuffer"}),
m::Parameter(¶m, 0), m::Op(), m::Op(), m::Op())),
m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, LlmActivation) {
const std::string& hlo_string = R"(
HloModule llm_while
producing_while_condition {
producing_condition_param = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
producing_condition_current_iteration_index = s32[] get-tuple-element(producing_condition_param), index=0
producing_condition_iteration_count = s32[] constant(96)
ROOT producing_condition_result = pred[] compare(producing_condition_current_iteration_index, producing_condition_iteration_count), direction=LT
}
consuming_while_condition {
consuming_condition_param = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
consuming_condition_current_iteration_index = s32[] get-tuple-element(consuming_condition_param), index=0
consuming_condition_iteration_count = s32[] constant(96)
ROOT consuming_condition_result = pred[] compare(consuming_condition_current_iteration_index, consuming_condition_iteration_count), direction=LT
}
producing_while_body {
input_tuple.0 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
data_0.0 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.0), index=1
data_1.0 = f32[96,8,6,2048,1] get-tuple-element(input_tuple.0), index=2
constant_0.0 = s32[] constant(0)
constant_1.0 = s32[] constant(1)
constant_96 = s32[] constant(96)
slice_data_0 = f32[1,8,6,2048,2048] constant({...})
slice_data_1 = f32[1,8,6,2048,1] constant({...})
compare_result.0 = pred[] compare(current_iteration_index.0, constant_0.0), direction=LT
add_result = s32[] add(current_iteration_index.0, constant_96)
select_result.0 = s32[] select(compare_result.0, add_result, current_iteration_index.0)
custom_call_0.0 = f32[1,8,6,2048,2048] custom-call(slice_data_0), custom_call_target="MoveToHost"
custom_call_1.0 = f32[1,8,6,2048,1] custom-call(slice_data_1), custom_call_target="MoveToHost"
dynamic_update_slice_0 = f32[96,8,6,2048,2048] dynamic-update-slice(data_0.0, custom_call_0.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
dynamic_update_slice_1 = f32[96,8,6,2048,1] dynamic-update-slice(data_1.0, custom_call_1.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1.0)
ROOT tuple_result.0 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(incremented_index.0, dynamic_update_slice_0, dynamic_update_slice_1)
}
consuming_while_body {
input_tuple.1 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
current_iteration_index.1 = s32[] get-tuple-element(input_tuple.1), index=0
data_0.1 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.1), index=1
data_1.1 = f32[96,8,6,2048,1] get-tuple-element(input_tuple.1), index=2
constant_0.1 = s32[] constant(0)
constant_1.1 = s32[] constant(1)
constant_95 = s32[] constant(95)
constant_191 = s32[] constant(191)
subtract_0 = s32[] subtract(constant_95, current_iteration_index.1)
compare_result.1 = pred[] compare(subtract_0, constant_0.1), direction=LT
subtract_1 = s32[] subtract(constant_191, current_iteration_index.1)
select_result.1 = s32[] select(compare_result.1, subtract_1, subtract_0)
dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(data_0.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,2048}
dynamic_slice_1 = f32[1,8,6,2048,1] dynamic-slice(data_1.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,1}
custom_call_0.1 = f32[1,8,6,2048,2048] custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
custom_call_1.1 = f32[1,8,6,2048,1] custom-call(dynamic_slice_1), custom_call_target="MoveToDevice"
tanh_0 = f32[1,8,6,2048,2048] tanh(custom_call_0.1)
tanh_1 = f32[1,8,6,2048,1] tanh(custom_call_1.1)
incremented_index.1 = s32[] add(current_iteration_index.1, constant_1.1)
ROOT tuple_result.1 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(incremented_index.1, data_0.1, data_1.1)
}
ENTRY main {
entry_param_0 = f32[] parameter(0)
broadcast_0 = f32[96,8,6,2048,2048] broadcast(entry_param_0), dimensions={}
broadcast_1 = f32[96,8,6,2048,1] broadcast(entry_param_0), dimensions={}
constant_s32_0 = s32[] constant(0)
tuple_for_producing_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(constant_s32_0, broadcast_0, broadcast_1)
producing_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) while(tuple_for_producing_while), condition=producing_while_condition, body=producing_while_body
while_output_1 = f32[96,8,6,2048,2048] get-tuple-element(producing_while), index=1
while_output_2 = f32[96,8,6,2048,1] get-tuple-element(producing_while), index=2
tuple_for_consuming_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(constant_s32_0, while_output_1, while_output_2)
consuming_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) while(tuple_for_consuming_while), condition=consuming_while_condition, body=consuming_while_body
ROOT result = s32[] get-tuple-element(consuming_while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* consuming_while;
HloInstruction* producing_while_0;
HloInstruction* producing_while_1;
{
HloInstruction* tuple;
HloInstruction* gte_0;
HloInstruction* gte_1;
HloInstruction* gte_2;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
>e_2,
m::While(
&consuming_while,
m::Tuple(
&tuple, m::Constant(),
m::GetTupleElement(>e_0, m::While(&producing_while_0)),
m::GetTupleElement(>e_1, m::While(&producing_while_1)))),
0)));
ASSERT_EQ(producing_while_0, producing_while_1);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(consuming_while->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(consuming_while->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(producing_while_0->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(producing_while_0->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {2}),
Layout::kHostMemorySpace);
}
{
HloInstruction* allocate_buffer_0;
HloInstruction* allocate_buffer_1;
ASSERT_THAT(producing_while_0,
GmockMatch(m::While(m::Tuple(
m::Constant(),
m::CustomCall(&allocate_buffer_0, {"AllocateBuffer"}),
m::CustomCall(&allocate_buffer_1, {"AllocateBuffer"})))));
ASSERT_TRUE(allocate_buffer_0->shape().has_layout());
EXPECT_EQ(allocate_buffer_0->shape().layout().memory_space(),
Layout::kHostMemorySpace);
ASSERT_TRUE(allocate_buffer_1->shape().has_layout());
EXPECT_EQ(allocate_buffer_1->shape().layout().memory_space(),
Layout::kHostMemorySpace);
}
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(
consuming_while->while_condition()->parameter_instruction(0)->shape(),
{1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(
consuming_while->while_condition()->parameter_instruction(0)->shape(),
{2}),
Layout::kHostMemorySpace);
{
HloInstruction* tuple;
HloInstruction* dynamic_update_slice_0;
HloInstruction* dynamic_update_slice_1;
HloInstruction* dynamic_update_slice_second_param_0;
HloInstruction* dynamic_update_slice_second_param_1;
HloInstruction* gte_0;
HloInstruction* gte_1;
HloInstruction* param_0;
HloInstruction* param_1;
ASSERT_THAT(producing_while_0->while_body()->root_instruction(),
GmockMatch(m::Tuple(
&tuple, m::Op(),
m::DynamicUpdateSlice(
&dynamic_update_slice_0,
m::GetTupleElement(>e_0, m::Parameter(¶m_0)),
m::Op(&dynamic_update_slice_second_param_0), m::Op(),
m::Op(), m::Op(), m::Op(), m::Op()),
m::DynamicUpdateSlice(
&dynamic_update_slice_1,
m::GetTupleElement(>e_1, m::Parameter(¶m_1)),
m::Op(&dynamic_update_slice_second_param_1), m::Op(),
m::Op(), m::Op(), m::Op(), m::Op()))));
EXPECT_EQ(param_0, param_1);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_0->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_1->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(param_0->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(param_0->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_second_param_0->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_second_param_1->shape(),
Layout::kDefaultMemorySpace);
}
{
const absl::flat_hash_set<const HloInstruction*> dynamic_slices =
getInstructionsWithOpcodeFromComputation(consuming_while->while_body(),
HloOpcode::kDynamicSlice);
ASSERT_EQ(dynamic_slices.size(), 2);
for (const HloInstruction* dynamic_slice : dynamic_slices) {
const HloInstruction* get_tuple_element;
const HloInstruction* parameter;
ASSERT_THAT(
dynamic_slice,
GmockMatch(m::DynamicSlice(
m::GetTupleElement(&get_tuple_element, m::Parameter(¶meter)),
m::Op(), m::Op(), m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(parameter->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(parameter->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(get_tuple_element->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(),
Layout::kDefaultMemorySpace);
}
}
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, LlmActivationSourceIsAllocateBuffer) {
const std::string& hlo_string = R"(
HloModule llm_while
producing_while_condition {
producing_condition_param = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
producing_condition_current_iteration_index = s32[] get-tuple-element(producing_condition_param), index=0
producing_condition_iteration_count = s32[] constant(96)
ROOT producing_condition_result = pred[] compare(producing_condition_current_iteration_index, producing_condition_iteration_count), direction=LT
}
consuming_while_condition {
consuming_condition_param = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
consuming_condition_current_iteration_index = s32[] get-tuple-element(consuming_condition_param), index=0
consuming_condition_iteration_count = s32[] constant(96)
ROOT consuming_condition_result = pred[] compare(consuming_condition_current_iteration_index, consuming_condition_iteration_count), direction=LT
}
producing_while_body {
input_tuple.0 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
data_0.0 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.0), index=1
data_1.0 = f32[96,8,6,2048,1] get-tuple-element(input_tuple.0), index=2
constant_0.0 = s32[] constant(0)
constant_1.0 = s32[] constant(1)
constant_96 = s32[] constant(96)
slice_data_0 = f32[1,8,6,2048,2048] constant({...})
slice_data_1 = f32[1,8,6,2048,1] constant({...})
compare_result.0 = pred[] compare(current_iteration_index.0, constant_0.0), direction=LT
add_result = s32[] add(current_iteration_index.0, constant_96)
select_result.0 = s32[] select(compare_result.0, add_result, current_iteration_index.0)
custom_call_0.0 = f32[1,8,6,2048,2048] custom-call(slice_data_0), custom_call_target="MoveToHost"
custom_call_1.0 = f32[1,8,6,2048,1] custom-call(slice_data_1), custom_call_target="MoveToHost"
dynamic_update_slice_0 = f32[96,8,6,2048,2048] dynamic-update-slice(data_0.0, custom_call_0.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
dynamic_update_slice_1 = f32[96,8,6,2048,1] dynamic-update-slice(data_1.0, custom_call_1.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1.0)
ROOT tuple_result.0 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(incremented_index.0, dynamic_update_slice_0, dynamic_update_slice_1)
}
consuming_while_body {
input_tuple.1 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
current_iteration_index.1 = s32[] get-tuple-element(input_tuple.1), index=0
data_0.1 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.1), index=1
data_1.1 = f32[96,8,6,2048,1] get-tuple-element(input_tuple.1), index=2
constant_0.1 = s32[] constant(0)
constant_1.1 = s32[] constant(1)
constant_95 = s32[] constant(95)
constant_191 = s32[] constant(191)
subtract_0 = s32[] subtract(constant_95, current_iteration_index.1)
compare_result.1 = pred[] compare(subtract_0, constant_0.1), direction=LT
subtract_1 = s32[] subtract(constant_191, current_iteration_index.1)
select_result.1 = s32[] select(compare_result.1, subtract_1, subtract_0)
dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(data_0.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,2048}
dynamic_slice_1 = f32[1,8,6,2048,1] dynamic-slice(data_1.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,1}
custom_call_0.1 = f32[1,8,6,2048,2048] custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
custom_call_1.1 = f32[1,8,6,2048,1] custom-call(dynamic_slice_1), custom_call_target="MoveToDevice"
tanh_0 = f32[1,8,6,2048,2048] tanh(custom_call_0.1)
tanh_1 = f32[1,8,6,2048,1] tanh(custom_call_1.1)
incremented_index.1 = s32[] add(current_iteration_index.1, constant_1.1)
ROOT tuple_result.1 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(incremented_index.1, data_0.1, data_1.1)
}
ENTRY main {
entry_param_0 = f32[] parameter(0)
allocate_buffer_0 = f32[96,8,6,2048,2048] custom-call(), custom_call_target="AllocateBuffer"
allocate_buffer_1 = f32[96,8,6,2048,1] custom-call(), custom_call_target="AllocateBuffer"
constant_s32_0 = s32[] constant(0)
tuple_for_producing_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(constant_s32_0, allocate_buffer_0, allocate_buffer_1)
producing_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) while(tuple_for_producing_while), condition=producing_while_condition, body=producing_while_body
while_output_1 = f32[96,8,6,2048,2048] get-tuple-element(producing_while), index=1
while_output_2 = f32[96,8,6,2048,1] get-tuple-element(producing_while), index=2
tuple_for_consuming_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(constant_s32_0, while_output_1, while_output_2)
consuming_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) while(tuple_for_consuming_while), condition=consuming_while_condition, body=consuming_while_body
ROOT result = s32[] get-tuple-element(consuming_while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* consuming_while;
HloInstruction* producing_while_0;
HloInstruction* producing_while_1;
{
HloInstruction* tuple;
HloInstruction* gte_0;
HloInstruction* gte_1;
HloInstruction* gte_2;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
>e_2,
m::While(
&consuming_while,
m::Tuple(
&tuple, m::Constant(),
m::GetTupleElement(>e_0, m::While(&producing_while_0)),
m::GetTupleElement(>e_1, m::While(&producing_while_1)))),
0)));
ASSERT_EQ(producing_while_0, producing_while_1);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(consuming_while->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(consuming_while->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(producing_while_0->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(producing_while_0->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {2}),
Layout::kHostMemorySpace);
}
{
HloInstruction* allocate_buffer_0;
HloInstruction* allocate_buffer_1;
ASSERT_THAT(producing_while_0,
GmockMatch(m::While(m::Tuple(
m::Constant(),
m::CustomCall(&allocate_buffer_0, {"AllocateBuffer"}),
m::CustomCall(&allocate_buffer_1, {"AllocateBuffer"})))));
ASSERT_TRUE(allocate_buffer_0->shape().has_layout());
EXPECT_EQ(allocate_buffer_0->shape().layout().memory_space(),
Layout::kHostMemorySpace);
ASSERT_TRUE(allocate_buffer_1->shape().has_layout());
EXPECT_EQ(allocate_buffer_1->shape().layout().memory_space(),
Layout::kHostMemorySpace);
}
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(
consuming_while->while_condition()->parameter_instruction(0)->shape(),
{1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(
consuming_while->while_condition()->parameter_instruction(0)->shape(),
{2}),
Layout::kHostMemorySpace);
{
HloInstruction* tuple;
HloInstruction* dynamic_update_slice_0;
HloInstruction* dynamic_update_slice_1;
HloInstruction* dynamic_update_slice_second_param_0;
HloInstruction* dynamic_update_slice_second_param_1;
HloInstruction* gte_0;
HloInstruction* gte_1;
HloInstruction* param_0;
HloInstruction* param_1;
ASSERT_THAT(producing_while_0->while_body()->root_instruction(),
GmockMatch(m::Tuple(
&tuple, m::Op(),
m::DynamicUpdateSlice(
&dynamic_update_slice_0,
m::GetTupleElement(>e_0, m::Parameter(¶m_0)),
m::Op(&dynamic_update_slice_second_param_0), m::Op(),
m::Op(), m::Op(), m::Op(), m::Op()),
m::DynamicUpdateSlice(
&dynamic_update_slice_1,
m::GetTupleElement(>e_1, m::Parameter(¶m_1)),
m::Op(&dynamic_update_slice_second_param_1), m::Op(),
m::Op(), m::Op(), m::Op(), m::Op()))));
EXPECT_EQ(param_0, param_1);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_0->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_1->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(param_0->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(param_0->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_second_param_0->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_second_param_1->shape(),
Layout::kDefaultMemorySpace);
}
{
const absl::flat_hash_set<const HloInstruction*> dynamic_slices =
getInstructionsWithOpcodeFromComputation(consuming_while->while_body(),
HloOpcode::kDynamicSlice);
ASSERT_EQ(dynamic_slices.size(), 2);
for (const HloInstruction* dynamic_slice : dynamic_slices) {
const HloInstruction* get_tuple_element;
const HloInstruction* parameter;
ASSERT_THAT(
dynamic_slice,
GmockMatch(m::DynamicSlice(
m::GetTupleElement(&get_tuple_element, m::Parameter(¶meter)),
m::Op(), m::Op(), m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(parameter->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(parameter->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(get_tuple_element->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(),
Layout::kDefaultMemorySpace);
}
}
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, LlmActivationDsWithReshape) {
const std::string& hlo_string = R"(
HloModule llm_while
producing_while_condition {
producing_condition_param = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
producing_condition_current_iteration_index = s32[] get-tuple-element(producing_condition_param), index=0
producing_condition_iteration_count = s32[] constant(96)
ROOT producing_condition_result = pred[] compare(producing_condition_current_iteration_index, producing_condition_iteration_count), direction=LT
}
consuming_while_condition {
consuming_condition_param = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
consuming_condition_current_iteration_index = s32[] get-tuple-element(consuming_condition_param), index=0
consuming_condition_iteration_count = s32[] constant(96)
ROOT consuming_condition_result = pred[] compare(consuming_condition_current_iteration_index, consuming_condition_iteration_count), direction=LT
}
producing_while_body {
input_tuple.0 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
data_0.0 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.0), index=1
data_1.0 = f32[96,8,6,2048,1] get-tuple-element(input_tuple.0), index=2
constant_0.0 = s32[] constant(0)
constant_1.0 = s32[] constant(1)
constant_96 = s32[] constant(96)
slice_data_0 = f32[1,8,6,2048,2048] constant({...})
slice_data_1 = f32[1,8,6,2048,1] constant({...})
compare_result.0 = pred[] compare(current_iteration_index.0, constant_0.0), direction=LT
add_result = s32[] add(current_iteration_index.0, constant_96)
select_result.0 = s32[] select(compare_result.0, add_result, current_iteration_index.0)
custom_call_0.0 = f32[1,8,6,2048,2048] custom-call(slice_data_0), custom_call_target="MoveToHost"
custom_call_1.0 = f32[1,8,6,2048,1] custom-call(slice_data_1), custom_call_target="MoveToHost"
dynamic_update_slice_0 = f32[96,8,6,2048,2048] dynamic-update-slice(data_0.0, custom_call_0.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
dynamic_update_slice_1 = f32[96,8,6,2048,1] dynamic-update-slice(data_1.0, custom_call_1.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1.0)
ROOT tuple_result.0 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(incremented_index.0, dynamic_update_slice_0, dynamic_update_slice_1)
}
consuming_while_body {
input_tuple.1 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
current_iteration_index.1 = s32[] get-tuple-element(input_tuple.1), index=0
data_0.1 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.1), index=1
data_1.1 = f32[96,8,6,2048,1] get-tuple-element(input_tuple.1), index=2
constant_0.1 = s32[] constant(0)
constant_1.1 = s32[] constant(1)
constant_95 = s32[] constant(95)
constant_191 = s32[] constant(191)
subtract_0 = s32[] subtract(constant_95, current_iteration_index.1)
compare_result.1 = pred[] compare(subtract_0, constant_0.1), direction=LT
subtract_1 = s32[] subtract(constant_191, current_iteration_index.1)
select_result.1 = s32[] select(compare_result.1, subtract_1, subtract_0)
dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(data_0.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,2048}
dynamic_slice_1 = f32[1,8,6,2048,1] dynamic-slice(data_1.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,1}
rs = f32[1,8,6,2048,2048] reshape(dynamic_slice_0)
rs2 = f32[1,8,6,2048,1] reshape(dynamic_slice_1)
custom_call_0.1 = f32[1,8,6,2048,2048] custom-call(rs), custom_call_target="MoveToDevice"
custom_call_1.1 = f32[1,8,6,2048,1] custom-call(rs2), custom_call_target="MoveToDevice"
tanh_0 = f32[1,8,6,2048,2048] tanh(custom_call_0.1)
tanh_1 = f32[1,8,6,2048,1] tanh(custom_call_1.1)
incremented_index.1 = s32[] add(current_iteration_index.1, constant_1.1)
ROOT tuple_result.1 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(incremented_index.1, data_0.1, data_1.1)
}
ENTRY main {
entry_param_0 = f32[] parameter(0)
broadcast_0 = f32[96,8,6,2048,2048] broadcast(entry_param_0), dimensions={}
broadcast_1 = f32[96,8,6,2048,1] broadcast(entry_param_0), dimensions={}
constant_s32_0 = s32[] constant(0)
tuple_for_producing_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(constant_s32_0, broadcast_0, broadcast_1)
producing_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) while(tuple_for_producing_while), condition=producing_while_condition, body=producing_while_body
while_output_1 = f32[96,8,6,2048,2048] get-tuple-element(producing_while), index=1
while_output_2 = f32[96,8,6,2048,1] get-tuple-element(producing_while), index=2
tuple_for_consuming_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(constant_s32_0, while_output_1, while_output_2)
consuming_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) while(tuple_for_consuming_while), condition=consuming_while_condition, body=consuming_while_body
ROOT result = s32[] get-tuple-element(consuming_while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* consuming_while;
HloInstruction* producing_while_0;
HloInstruction* producing_while_1;
{
HloInstruction* tuple;
HloInstruction* gte_0;
HloInstruction* gte_1;
HloInstruction* gte_2;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
>e_2,
m::While(
&consuming_while,
m::Tuple(
&tuple, m::Constant(),
m::GetTupleElement(>e_0, m::While(&producing_while_0)),
m::GetTupleElement(>e_1, m::While(&producing_while_1)))),
0)));
ASSERT_EQ(producing_while_0, producing_while_1);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(consuming_while->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(consuming_while->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(producing_while_0->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(producing_while_0->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {2}),
Layout::kHostMemorySpace);
}
{
HloInstruction* allocate_buffer_0;
HloInstruction* allocate_buffer_1;
ASSERT_THAT(producing_while_0,
GmockMatch(m::While(m::Tuple(
m::Constant(),
m::CustomCall(&allocate_buffer_0, {"AllocateBuffer"}),
m::CustomCall(&allocate_buffer_1, {"AllocateBuffer"})))));
ASSERT_TRUE(allocate_buffer_0->shape().has_layout());
EXPECT_EQ(allocate_buffer_0->shape().layout().memory_space(),
Layout::kHostMemorySpace);
ASSERT_TRUE(allocate_buffer_1->shape().has_layout());
EXPECT_EQ(allocate_buffer_1->shape().layout().memory_space(),
Layout::kHostMemorySpace);
}
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(
consuming_while->while_condition()->parameter_instruction(0)->shape(),
{1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(
consuming_while->while_condition()->parameter_instruction(0)->shape(),
{2}),
Layout::kHostMemorySpace);
{
HloInstruction* tuple;
HloInstruction* dynamic_update_slice_0;
HloInstruction* dynamic_update_slice_1;
HloInstruction* dynamic_update_slice_second_param_0;
HloInstruction* dynamic_update_slice_second_param_1;
HloInstruction* gte_0;
HloInstruction* gte_1;
HloInstruction* param_0;
HloInstruction* param_1;
ASSERT_THAT(producing_while_0->while_body()->root_instruction(),
GmockMatch(m::Tuple(
&tuple, m::Op(),
m::DynamicUpdateSlice(
&dynamic_update_slice_0,
m::GetTupleElement(>e_0, m::Parameter(¶m_0)),
m::Op(&dynamic_update_slice_second_param_0), m::Op(),
m::Op(), m::Op(), m::Op(), m::Op()),
m::DynamicUpdateSlice(
&dynamic_update_slice_1,
m::GetTupleElement(>e_1, m::Parameter(¶m_1)),
m::Op(&dynamic_update_slice_second_param_1), m::Op(),
m::Op(), m::Op(), m::Op(), m::Op()))));
EXPECT_EQ(param_0, param_1);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_0->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_1->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(param_0->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(param_0->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_second_param_0->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_second_param_1->shape(),
Layout::kDefaultMemorySpace);
}
{
const absl::flat_hash_set<const HloInstruction*> dynamic_slices =
getInstructionsWithOpcodeFromComputation(consuming_while->while_body(),
HloOpcode::kDynamicSlice);
ASSERT_EQ(dynamic_slices.size(), 2);
for (const HloInstruction* dynamic_slice : dynamic_slices) {
const HloInstruction* get_tuple_element;
const HloInstruction* parameter;
ASSERT_THAT(
dynamic_slice,
GmockMatch(m::DynamicSlice(
m::GetTupleElement(&get_tuple_element, m::Parameter(¶meter)),
m::Op(), m::Op(), m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(parameter->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(parameter->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(get_tuple_element->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(),
Layout::kDefaultMemorySpace);
}
}
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, LlmActivationHostMemoryMultipleConsumers) {
const std::string& hlo_string = R"(
HloModule llm_while
producing_while_condition {
producing_condition_param = (s32[], f32[96,8,6,2048,2048]) parameter(0)
producing_condition_current_iteration_index = s32[] get-tuple-element(producing_condition_param), index=0
producing_condition_iteration_count = s32[] constant(96)
ROOT producing_condition_result = pred[] compare(producing_condition_current_iteration_index, producing_condition_iteration_count), direction=LT
}
consuming_while_condition {
consuming_condition_param = (s32[], f32[96,8,6,2048,2048]) parameter(0)
consuming_condition_current_iteration_index = s32[] get-tuple-element(consuming_condition_param), index=0
consuming_condition_iteration_count = s32[] constant(96)
ROOT consuming_condition_result = pred[] compare(consuming_condition_current_iteration_index, consuming_condition_iteration_count), direction=LT
}
producing_while_body {
input_tuple.0 = (s32[], f32[96,8,6,2048,2048]) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
data_0.0 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.0), index=1
constant_0.0 = s32[] constant(0)
constant_1.0 = s32[] constant(1)
constant_96 = s32[] constant(96)
slice_data_0 = f32[1,8,6,2048,2048] constant({...})
compare_result.0 = pred[] compare(current_iteration_index.0, constant_0.0), direction=LT
add_result = s32[] add(current_iteration_index.0, constant_96)
select_result.0 = s32[] select(compare_result.0, add_result, current_iteration_index.0)
custom_call_0.0 = f32[1,8,6,2048,2048] custom-call(slice_data_0), custom_call_target="MoveToHost"
dynamic_update_slice_0 = f32[96,8,6,2048,2048] dynamic-update-slice(data_0.0, custom_call_0.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1.0)
ROOT tuple_result.0 = (s32[], f32[96,8,6,2048,2048]) tuple(incremented_index.0, dynamic_update_slice_0)
}
consuming_while_body {
input_tuple.1 = (s32[], f32[96,8,6,2048,2048]) parameter(0)
current_iteration_index.1 = s32[] get-tuple-element(input_tuple.1), index=0
data_0.1 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.1), index=1
constant_0.1 = s32[] constant(0)
constant_1.1 = s32[] constant(1)
constant_95 = s32[] constant(95)
constant_191 = s32[] constant(191)
subtract_0 = s32[] subtract(constant_95, current_iteration_index.1)
compare_result.1 = pred[] compare(subtract_0, constant_0.1), direction=LT
subtract_1 = s32[] subtract(constant_191, current_iteration_index.1)
select_result.1 = s32[] select(compare_result.1, subtract_1, subtract_0)
dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(data_0.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,2048}
custom_call_0.1 = f32[1,8,6,2048,2048] custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
tanh_0 = f32[1,8,6,2048,2048] tanh(custom_call_0.1)
incremented_index.1 = s32[] add(current_iteration_index.1, constant_1.1)
ROOT tuple_result.1 = (s32[], f32[96,8,6,2048,2048]) tuple(incremented_index.1, data_0.1)
}
ENTRY main {
entry_param_0 = f32[] parameter(0)
entry_param_1 = s32[] parameter(1)
entry_param_2 = s32[] parameter(2)
broadcast_0 = f32[96,8,6,2048,2048] broadcast(entry_param_0), dimensions={}
constant_s32_0 = s32[] constant(0)
tuple_for_producing_while = (s32[], f32[96,8,6,2048,2048]) tuple(constant_s32_0, broadcast_0)
producing_while = (s32[], f32[96,8,6,2048,2048]) while(tuple_for_producing_while), condition=producing_while_condition, body=producing_while_body
while_output_1 = f32[96,8,6,2048,2048] get-tuple-element(producing_while), index=1
tuple_for_consuming_while = (s32[], f32[96,8,6,2048,2048]) tuple(constant_s32_0, while_output_1)
consuming_while = (s32[], f32[96,8,6,2048,2048]) while(tuple_for_consuming_while), condition=consuming_while_condition, body=consuming_while_body
second_while_output = f32[96,8,6,2048,2048] get-tuple-element(consuming_while), index=1
final_dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(second_while_output, entry_param_1, constant_s32_0, constant_s32_0, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,8,6,2048,2048}
final_host_to_device_custom_call_0 = f32[1,8,6,2048,2048] custom-call(final_dynamic_slice_0), custom_call_target="MoveToDevice"
final_slice_0 = f32[1,8,6,2048,2048] slice(second_while_output), slice={[41:42], [0:8], [0:6], [0:2048], [0:2048]}
final_host_to_device_custom_call_1 = f32[1,8,6,2048,2048] custom-call(final_slice_0), custom_call_target="MoveToDevice"
ROOT add = f32[1,8,6,2048,2048] add(final_host_to_device_custom_call_0, final_host_to_device_custom_call_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* consuming_while;
HloInstruction* producing_while;
{
HloInstruction* tuple;
HloInstruction* gte_between_whiles;
HloInstruction* final_gte;
HloInstruction* dynamic_slice_0;
HloInstruction* dynalic_slice_1;
HloInstruction* add;
auto pattern_ending_in_gte = m::GetTupleElement(
&final_gte,
m::While(&consuming_while,
m::Tuple(&tuple, m::Constant(),
m::GetTupleElement(>e_between_whiles,
m::While(&producing_while)))));
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Add(&add,
m::DynamicSlice(&dynamic_slice_0, pattern_ending_in_gte,
m::Op(), m::Op(), m::Op(), m::Op(), m::Op()),
m::DynamicSlice(&dynalic_slice_1, pattern_ending_in_gte,
m::ConstantScalar(41), m::Op(), m::Op(),
m::Op(), m::Op()))));
TestShapeHasMemorySpace(gte_between_whiles->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(consuming_while->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(producing_while->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(final_gte->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice_0->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(dynalic_slice_1->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(add->shape(), Layout::kDefaultMemorySpace);
}
{
HloInstruction* allocate_buffer;
ASSERT_THAT(producing_while,
GmockMatch(m::While(m::Tuple(
m::Constant(),
m::CustomCall(&allocate_buffer, {"AllocateBuffer"})))));
ASSERT_TRUE(allocate_buffer->shape().has_layout());
EXPECT_EQ(allocate_buffer->shape().layout().memory_space(),
Layout::kHostMemorySpace);
}
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(
consuming_while->while_condition()->parameter_instruction(0)->shape(),
{1}),
Layout::kHostMemorySpace);
{
HloInstruction* tuple;
HloInstruction* dynamic_update_slice;
HloInstruction* dynamic_update_slice_second_param;
HloInstruction* gte;
HloInstruction* param;
ASSERT_THAT(
producing_while->while_body()->root_instruction(),
GmockMatch(m::Tuple(&tuple, m::Op(),
m::DynamicUpdateSlice(
&dynamic_update_slice,
m::GetTupleElement(>e, m::Parameter(¶m)),
m::Op(&dynamic_update_slice_second_param),
m::Op(), m::Op(), m::Op(), m::Op(), m::Op()))));
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(param->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_second_param->shape(),
Layout::kDefaultMemorySpace);
}
{
const absl::flat_hash_set<const HloInstruction*> dynamic_slices =
getInstructionsWithOpcodeFromComputation(consuming_while->while_body(),
HloOpcode::kDynamicSlice);
ASSERT_EQ(dynamic_slices.size(), 1);
const HloInstruction* dynamic_slice = *dynamic_slices.begin();
const HloInstruction* get_tuple_element;
const HloInstruction* parameter;
ASSERT_THAT(
dynamic_slice,
GmockMatch(m::DynamicSlice(
m::GetTupleElement(&get_tuple_element, m::Parameter(¶meter)),
m::Op(), m::Op(), m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(parameter->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(get_tuple_element->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(),
Layout::kDefaultMemorySpace);
}
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, InsertExtraCopyForScheduling) {
const std::string& hlo_string = R"(
HloModule llm_while
producing_while_condition {
producing_condition_param = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1], f32[1,8,6,2048,1]) parameter(0)
producing_condition_current_iteration_index = s32[] get-tuple-element(producing_condition_param), index=0
producing_condition_iteration_count = s32[] constant(96)
ROOT producing_condition_result = pred[] compare(producing_condition_current_iteration_index, producing_condition_iteration_count), direction=LT
}
consuming_while_condition {
consuming_condition_param = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
consuming_condition_current_iteration_index = s32[] get-tuple-element(consuming_condition_param), index=0
consuming_condition_iteration_count = s32[] constant(96)
ROOT consuming_condition_result = pred[] compare(consuming_condition_current_iteration_index, consuming_condition_iteration_count), direction=LT
}
producing_while_body {
input_tuple.0 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1], f32[1,8,6,2048,1]) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
data_0.0 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.0), index=1
data_1.0 = f32[96,8,6,2048,1] get-tuple-element(input_tuple.0), index=2
data_2.1 = f32[1,8,6,2048,1] get-tuple-element(input_tuple.0), index=3
constant_0.0 = s32[] constant(0)
constant_1.0 = s32[] constant(1)
constant_96 = s32[] constant(96)
slice_data_0 = f32[1,8,6,2048,2048] constant({...})
slice_data_1 = f32[1,8,6,2048,1] constant({...})
compare_result.0 = pred[] compare(current_iteration_index.0, constant_0.0), direction=LT
add_result = s32[] add(current_iteration_index.0, constant_96)
select_result.0 = s32[] select(compare_result.0, add_result, current_iteration_index.0)
custom_call_0.0 = f32[1,8,6,2048,2048] custom-call(slice_data_0), custom_call_target="MoveToHost"
custom_call_1.0 = f32[1,8,6,2048,1] custom-call(data_2.1), custom_call_target="MoveToHost"
dynamic_update_slice_0 = f32[96,8,6,2048,2048] dynamic-update-slice(data_0.0, custom_call_0.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
dynamic_update_slice_1 = f32[96,8,6,2048,1] dynamic-update-slice(data_1.0, custom_call_1.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1.0)
ROOT tuple_result.0 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1], f32[1,8,6,2048,1]) tuple(incremented_index.0, dynamic_update_slice_0, dynamic_update_slice_1, data_2.1)
}
consuming_while_body {
input_tuple.1 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
current_iteration_index.1 = s32[] get-tuple-element(input_tuple.1), index=0
data_0.1 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.1), index=1
data_1.1 = f32[96,8,6,2048,1] get-tuple-element(input_tuple.1), index=2
constant_0.1 = s32[] constant(0)
constant_1.1 = s32[] constant(1)
constant_95 = s32[] constant(95)
constant_191 = s32[] constant(191)
subtract_0 = s32[] subtract(constant_95, current_iteration_index.1)
compare_result.1 = pred[] compare(subtract_0, constant_0.1), direction=LT
subtract_1 = s32[] subtract(constant_191, current_iteration_index.1)
select_result.1 = s32[] select(compare_result.1, subtract_1, subtract_0)
dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(data_0.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,2048}
dynamic_slice_1 = f32[1,8,6,2048,1] dynamic-slice(data_1.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,1}
custom_call_0.1 = f32[1,8,6,2048,2048] custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
custom_call_1.1 = f32[1,8,6,2048,1] custom-call(dynamic_slice_1), custom_call_target="MoveToDevice"
tanh_0 = f32[1,8,6,2048,2048] tanh(custom_call_0.1)
tanh_1 = f32[1,8,6,2048,1] tanh(custom_call_1.1)
incremented_index.1 = s32[] add(current_iteration_index.1, constant_1.1)
ROOT tuple_result.1 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(incremented_index.1, data_0.1, data_1.1)
}
ENTRY main {
entry_param_0 = f32[] parameter(0)
broadcast_0 = f32[96,8,6,2048,2048] broadcast(entry_param_0), dimensions={}
broadcast_1 = f32[96,8,6,2048,1] broadcast(entry_param_0), dimensions={}
broadcast_2 = f32[1,8,6,2048,1] broadcast(entry_param_0), dimensions={}
constant_s32_0 = s32[] constant(0)
tuple_for_producing_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1], f32[1,8,6,2048,1]) tuple(constant_s32_0, broadcast_0, broadcast_1, broadcast_2)
producing_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1], f32[1,8,6,2048,1]) while(tuple_for_producing_while), condition=producing_while_condition, body=producing_while_body
while_output_1 = f32[96,8,6,2048,2048] get-tuple-element(producing_while), index=1
while_output_2 = f32[96,8,6,2048,1] get-tuple-element(producing_while), index=2
tuple_for_consuming_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(constant_s32_0, while_output_1, while_output_2)
consuming_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) while(tuple_for_consuming_while), condition=consuming_while_condition, body=consuming_while_body
ROOT result = s32[] get-tuple-element(consuming_while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
const HloInstruction* dus0 =
FindInstruction(module.get(), "dynamic_update_slice_0");
const HloInstruction* dus1 =
FindInstruction(module.get(), "dynamic_update_slice_1");
EXPECT_THAT(dus0, GmockMatch(m::DynamicUpdateSlice(m::Op(), m::Constant(),
m::Op(), m::Op(), m::Op(),
m::Op(), m::Op())));
EXPECT_THAT(dus1, GmockMatch(m::DynamicUpdateSlice(m::Op(), m::Copy(),
m::Op(), m::Op(), m::Op(),
m::Op(), m::Op())));
}
TEST_F(HostOffloaderTest, ParameterStreaming) {
const std::string& hlo_string = R"(
HloModule ParameterStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)S(5)}, s32[2,1]{1,0:T(2,128)})->(s32[2,1]{1,0:T(2,128)}, s32[2,1]{1,0:T(2,128)})}
ENTRY main {
param_0 = s32[2,1]{1,0} parameter(0)
param_1 = s32[2,1]{1,0} parameter(1)
constant_2 = s32[] constant(2)
constant_4 = s32[] constant(4)
broadcast_0 = s32[2,1]{1,0} broadcast(constant_2), dimensions={}
multiply_0 = s32[2,1]{1,0} multiply(param_1, broadcast_0)
custom_call = s32[2,1]{1,0} custom-call(param_0), custom_call_target="MoveToDevice"
multiply_1 = s32[2,1]{1,0} multiply(multiply_0, custom_call)
broadcast_1 = s32[2,1]{1,0} broadcast(constant_4), dimensions={}
multiply_2 = s32[2,1]{1,0} multiply(multiply_1, broadcast_1)
ROOT tuple = (s32[2,1]{1,0}, s32[2,1]{1,0}) tuple(multiply_2, multiply_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param_1;
HloInstruction* broadcast_0;
HloInstruction* multiply_0;
HloInstruction* param_0;
HloInstruction* copy;
HloInstruction* multiply_1;
HloInstruction* broadcast_1;
HloInstruction* multiply_2;
HloInstruction* tuple;
auto multiplyPattern =
m::Multiply(&multiply_1,
m::Multiply(&multiply_0, m::Parameter(¶m_1),
m::Broadcast(&broadcast_0, m::ConstantScalar(2))),
m::Copy(©, m::Parameter(¶m_0)));
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
&tuple,
m::Multiply(&multiply_2, multiplyPattern,
m::Broadcast(&broadcast_1, m::ConstantScalar(4))),
multiplyPattern)));
TestShapeHasMemorySpace(param_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(param_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_2->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, TupleParameterStreaming) {
const std::string& hlo_string = R"(
HloModule ParameterStreaming, entry_computation_layout={((s32[2,1]{1,0:T(2,128)}, s32[2,1]{1,0:T(2,128)S(5)}))->s32[2,1]{1,0:T(2,128)}}
ENTRY main {
param_tuple = (s32[2,1], s32[2,1]) parameter(0)
x = get-tuple-element(param_tuple), index=0
y_host = get-tuple-element(param_tuple), index=1
y = s32[2,1] custom-call(y_host), custom_call_target="MoveToDevice"
ROOT crs = s32[2,1] add(x, y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* gte_x;
HloInstruction* gte_y;
HloInstruction* copy;
HloInstruction* add;
auto parameter_pattern = m::Parameter(¶m, 0);
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Add(
&add, m::GetTupleElement(>e_x, parameter_pattern),
m::Copy(©, m::GetTupleElement(>e_y, parameter_pattern)))));
TestShapeHasMemorySpace(param->shape().tuple_shapes(0),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(gte_x->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(add->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(param->shape().tuple_shapes(1),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_y->shape(), Layout::kHostMemorySpace);
}
TEST_F(HostOffloaderTest, ParameterStreamingNoOpToHost) {
const std::string& hlo_string = R"(
HloModule ParameterStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)S(5)})->s32[2,1]{1,0:T(2,128)}}
ENTRY main {
param = s32[2,1]{1,0} parameter(0)
to_host = s32[2,1]{1,0} custom-call(param), custom_call_target="MoveToHost"
ROOT to_device = s32[2,1]{1,0} custom-call(to_host), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
LOG(INFO) << module->ToString();
HloInstruction* param;
HloInstruction* copy;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(©, m::Parameter(¶m, 0))));
TestShapeHasMemorySpace(param->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, OutputStreaming) {
const std::string& hlo_string = R"(
HloModule ParameterStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)}, s32[2,1]{1,0:T(2,128)})->(s32[2,1]{1,0:T(2,128)S(5)}, s32[2,1]{1,0:T(2,128)})}
ENTRY main {
param_0 = s32[2,1]{1,0} parameter(0)
param_1 = s32[2,1]{1,0} parameter(1)
constant_2 = s32[] constant(2)
constant_4 = s32[] constant(4)
broadcast_0 = s32[2,1]{1,0} broadcast(constant_2), dimensions={}
multiply_0 = s32[2,1]{1,0} multiply(param_1, broadcast_0)
multiply_1 = s32[2,1]{1,0} multiply(multiply_0, param_0)
broadcast_1 = s32[2,1]{1,0} broadcast(constant_4), dimensions={}
multiply_2 = s32[2,1]{1,0} multiply(multiply_1, broadcast_1)
custom_call = s32[2,1]{1,0} custom-call(multiply_2), custom_call_target="MoveToHost"
ROOT tuple = (s32[2,1]{1,0}, s32[2,1]{1,0}) tuple(custom_call, multiply_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param_1;
HloInstruction* broadcast_0;
HloInstruction* multiply_0;
HloInstruction* param_0;
HloInstruction* multiply_1;
HloInstruction* broadcast_1;
HloInstruction* multiply_2;
HloInstruction* copy;
HloInstruction* tuple;
auto multiplyPattern =
m::Multiply(&multiply_1,
m::Multiply(&multiply_0, m::Parameter(¶m_1),
m::Broadcast(&broadcast_0, m::ConstantScalar(2))),
m::Parameter(¶m_0));
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
&tuple,
m::Copy(©, m::Multiply(
&multiply_2, multiplyPattern,
m::Broadcast(&broadcast_1, m::ConstantScalar(4)))),
multiplyPattern)));
TestShapeHasMemorySpace(param_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(param_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_2->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
Layout::Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, InvalidOutputStreaming) {
const std::string& hlo_string = R"(
HloModule ParameterStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)}, s32[2,1]{1,0:T(2,128)})->(s32[2,1]{1,0:T(2,128)}, s32[2,1]{1,0:T(2,128)})}
ENTRY main {
param_0 = s32[2,1]{1,0} parameter(0)
param_1 = s32[2,1]{1,0} parameter(1)
constant_2 = s32[] constant(2)
constant_4 = s32[] constant(4)
broadcast_0 = s32[2,1]{1,0} broadcast(constant_2), dimensions={}
multiply_0 = s32[2,1]{1,0} multiply(param_1, broadcast_0)
multiply_1 = s32[2,1]{1,0} multiply(multiply_0, param_0)
broadcast_1 = s32[2,1]{1,0} broadcast(constant_4), dimensions={}
multiply_2 = s32[2,1]{1,0} multiply(multiply_1, broadcast_1)
custom_call = s32[2,1]{1,0} custom-call(multiply_2), custom_call_target="MoveToHost"
ROOT tuple = (s32[2,1]{1,0}, s32[2,1]{1,0}) tuple(custom_call, multiply_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
absl::StatusOr<bool> result = RunHostOffloader(module.get());
EXPECT_FALSE(result.ok());
}
TEST_F(HostOffloaderTest, OutputStreamingWithoutTuple) {
const std::string& hlo_string = R"(
HloModule ParameterStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)}, s32[2,1]{1,0:T(2,128)})->s32[2,1]{1,0:T(2,128)S(5)}}
ENTRY main {
param_0 = s32[2,1]{1,0} parameter(0)
param_1 = s32[2,1]{1,0} parameter(1)
constant_2 = s32[] constant(2)
constant_4 = s32[] constant(4)
broadcast_0 = s32[2,1]{1,0} broadcast(constant_2), dimensions={}
multiply_0 = s32[2,1]{1,0} multiply(param_1, broadcast_0)
multiply_1 = s32[2,1]{1,0} multiply(multiply_0, param_0)
broadcast_1 = s32[2,1]{1,0} broadcast(constant_4), dimensions={}
multiply_2 = s32[2,1]{1,0} multiply(multiply_1, broadcast_1)
ROOT custom_call = s32[2,1]{1,0} custom-call(multiply_2), custom_call_target="MoveToHost"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param_1;
HloInstruction* broadcast_0;
HloInstruction* multiply_0;
HloInstruction* param_0;
HloInstruction* multiply_1;
HloInstruction* broadcast_1;
HloInstruction* multiply_2;
HloInstruction* copy;
auto multiplyPattern =
m::Multiply(&multiply_1,
m::Multiply(&multiply_0, m::Parameter(¶m_1),
m::Broadcast(&broadcast_0, m::ConstantScalar(2))),
m::Parameter(¶m_0));
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©, m::Multiply(&multiply_2, multiplyPattern,
m::Broadcast(&broadcast_1,
m::ConstantScalar(4))))));
TestShapeHasMemorySpace(param_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(param_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_2->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kHostMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, OutputStreamingCustomCallRoot) {
const std::string& hlo_string = R"(
HloModule ParameterStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)}, s32[2,1]{1,0:T(2,128)})->s32[2,1]{1,0:T(2,128)S(5)}}
ENTRY main {
param_0 = s32[2,1]{1,0} parameter(0)
param_1 = s32[2,1]{1,0} parameter(1)
constant_2 = s32[] constant(2)
constant_4 = s32[] constant(4)
broadcast_0 = s32[2,1]{1,0} broadcast(constant_2), dimensions={}
multiply_0 = s32[2,1]{1,0} multiply(param_1, broadcast_0)
multiply_1 = s32[2,1]{1,0} multiply(multiply_0, param_0)
broadcast_1 = s32[2,1]{1,0} broadcast(constant_4), dimensions={}
multiply_2 = s32[2,1]{1,0} multiply(multiply_1, broadcast_1)
ROOT custom_call = s32[2,1]{1,0} custom-call(multiply_2), custom_call_target="MoveToHost"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param_1;
HloInstruction* broadcast_0;
HloInstruction* multiply_0;
HloInstruction* param_0;
HloInstruction* multiply_1;
HloInstruction* broadcast_1;
HloInstruction* multiply_2;
HloInstruction* copy;
auto multiplyPattern =
m::Multiply(&multiply_1,
m::Multiply(&multiply_0, m::Parameter(¶m_1),
m::Broadcast(&broadcast_0, m::ConstantScalar(2))),
m::Parameter(¶m_0));
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©, m::Multiply(&multiply_2, multiplyPattern,
m::Broadcast(&broadcast_1,
m::ConstantScalar(4))))));
TestShapeHasMemorySpace(param_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(param_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_2->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kHostMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, OutputStreamingInUnrolledScanLoop) {
const std::string& hlo_string = R"(
HloModule m,
entry_computation_layout={(s32[16,16,8]{1,2,0:T(8,128)})->s32[16,16,8]{1,2,0:T(8,128)S(5)}},
allow_spmd_sharding_propagation_to_output={true}, num_partitions=2
body {
loop_peel_param = (s32[]{:T(256)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[]{:T(256)}, s32[16,8]{0,1:T(8,128)}) parameter(0)
get-tuple-element.12 = s32[]{:T(256)} get-tuple-element(loop_peel_param), index=0
constant.29 = s32[]{:T(256)} constant(1)
add.5 = s32[]{:T(256)} add(get-tuple-element.12, constant.29)
get-tuple-element.13 = s32[16,16,8]{1,2,0:T(8,128)} get-tuple-element(loop_peel_param), index=1
get-tuple-element.18 = s32[16,8]{0,1:T(8,128)} get-tuple-element(loop_peel_param), index=4
custom-call.3 = s32[16,8]{0,1:T(8,128)} custom-call(get-tuple-element.18), custom_call_target="MoveToHost"
bitcast = s32[1,16,8]{1,2,0:T(8,128)} bitcast(custom-call.3)
get-tuple-element.15 = s32[]{:T(256)} get-tuple-element(loop_peel_param), index=3
constant.30 = s32[]{:T(256)} constant(0)
dynamic-update-slice.2 = s32[16,16,8]{1,2,0:T(8,128)} dynamic-update-slice(get-tuple-element.13, bitcast, get-tuple-element.15, constant.30, constant.30), backend_config={"flag_configs":[],"scoped_memory_configs":[],"indices_config":{"index_known_bits":[{"zeroes":"0","ones":"0","bitwidth":"32"},{"zeroes":"4294967295","ones":"0","bitwidth":"32"},{"zeroes":"4294967295","ones":"0","bitwidth":"32"}]},"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[]}
get-tuple-element.14 = s32[16,16,8]{1,2,0:T(8,128)} get-tuple-element(loop_peel_param), index=2
dynamic-slice.2 = s32[1,16,8]{1,2,0:T(8,128)} dynamic-slice(get-tuple-element.14, get-tuple-element.12, constant.30, constant.30), dynamic_slice_sizes={1,16,8}
broadcast.8 = s32[1,16,8]{1,2,0:T(8,128)} broadcast(constant.29), dimensions={}
add.6 = s32[1,16,8]{1,2,0:T(8,128)} add(dynamic-slice.2, broadcast.8)
bitcast.1 = s32[16,8]{0,1:T(8,128)} bitcast(add.6)
ROOT tuple.3 = (s32[]{:T(256)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[]{:T(256)}, s32[16,8]{0,1:T(8,128)}) tuple(add.5, dynamic-update-slice.2, get-tuple-element.14, get-tuple-element.12, bitcast.1)
}
condition {
loop_peel_cond_param = (s32[]{:T(256)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[]{:T(256)}, s32[16,8]{0,1:T(8,128)}) parameter(0)
get-tuple-element.11 = s32[]{:T(256)} get-tuple-element(loop_peel_cond_param), index=0
constant.28 = s32[]{:T(256)} constant(16)
ROOT compare.1 = pred[]{:T(1024)} compare(get-tuple-element.11, constant.28), direction=LT
}
ENTRY entry {
constant.26 = s32[]{:T(256)} constant(1)
constant.24 = s32[]{:T(256)} constant(0)
broadcast.6 = s32[16,16,8]{1,2,0:T(8,128)} broadcast(constant.24), dimensions={}
param.2 = s32[16,16,8]{1,2,0:T(8,128)} parameter(0), sharding={devices=[1,1,2]<=[2]}
slice = s32[1,16,8]{1,2,0:T(8,128)} slice(param.2), slice={[0:1], [0:16], [0:8]}
broadcast.7 = s32[1,16,8]{1,2,0:T(8,128)} broadcast(constant.26), dimensions={}
add.4 = s32[1,16,8]{1,2,0:T(8,128)} add(slice, broadcast.7)
bitcast.2 = s32[16,8]{0,1:T(8,128)} bitcast(add.4)
tuple.4 = (s32[]{:T(256)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[]{:T(256)}, s32[16,8]{0,1:T(8,128)}) tuple(constant.26, broadcast.6, param.2, constant.24, bitcast.2)
while.1 = (s32[]{:T(256)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[]{:T(256)}, s32[16,8]{0,1:T(8,128)}) while(tuple.4), condition=condition, body=body
get-tuple-element.17 = s32[16,16,8]{1,2,0:T(8,128)} get-tuple-element(while.1), index=1
get-tuple-element.19 = s32[16,8]{0,1:T(8,128)} get-tuple-element(while.1), index=4
custom-call.4 = s32[16,8]{0,1:T(8,128)} custom-call(get-tuple-element.19), custom_call_target="MoveToHost"
bitcast.3 = s32[1,16,8]{1,2,0:T(8,128)} bitcast(custom-call.4)
get-tuple-element.16 = s32[]{:T(256)} get-tuple-element(while.1), index=3
ROOT dynamic-update-slice.3 = s32[16,16,8]{1,2,0:T(8,128)} dynamic-update-slice(get-tuple-element.17, bitcast.3, get-tuple-element.16, constant.24, constant.24), backend_config={"flag_configs":[],"scoped_memory_configs":[],"indices_config":{"index_known_bits":[{"zeroes":"0","ones":"0","bitwidth":"32"},{"zeroes":"4294967295","ones":"0","bitwidth":"32"},{"zeroes":"4294967295","ones":"0","bitwidth":"32"}]},"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* bitcast;
HloInstruction* gte_0;
HloInstruction* gte_1;
HloInstruction* dus;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicUpdateSlice(
&dus, m::GetTupleElement(>e_0), m::Bitcast(&bitcast),
m::GetTupleElement(>e_1), m::ConstantScalar(0),
m::ConstantScalar(0))));
TestShapeHasMemorySpace(bitcast->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(dus->shape(), Layout::kHostMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, OutputStreamingNoOpToDevice) {
const std::string& hlo_string = R"(
HloModule OutputStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)})->s32[2,1]{1,0:T(2,128)S(5)}}
ENTRY main {
param = s32[2,1]{1,0} parameter(0)
to_device = s32[2,1]{1,0} custom-call(param), custom_call_target="MoveToDevice"
ROOT to_host = s32[2,1]{1,0} custom-call(to_device), custom_call_target="MoveToHost"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
LOG(INFO) << module->ToString();
HloInstruction* param;
HloInstruction* copy;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(©, m::Parameter(¶m, 0))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kHostMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, ParameterAndOutputStreamingPassThrough) {
const std::string& hlo_string = R"(
HloModule OutputStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)S(5)})->s32[2,1]{1,0:T(2,128)S(5)}}
ENTRY main {
ROOT param = s32[2,1]{1,0} parameter(0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(¶m, 0)));
TestShapeHasMemorySpace(param->shape(), Layout::kHostMemorySpace);
}
TEST_F(HostOffloaderTest, ParameterAndOutputStreamingPassThroughTuple) {
const std::string& hlo_string = R"(
HloModule OutputStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)S(5)})->s32[2,1]{1,0:T(2,128)S(5)}}
ENTRY main {
param = s32[2,1]{1,0} parameter(0)
tuple = (s32[2,1]{1,0}) tuple(param)
ROOT gte = s32[2,1]{1,0} get-tuple-element(tuple), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* tuple;
HloInstruction* gte;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
>e, m::Tuple(&tuple, m::Parameter(¶m, 0)))));
TestShapeHasMemorySpace(param->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte->shape(), Layout::kHostMemorySpace);
}
TEST_F(HostOffloaderTest, LoneMoveToDevice) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16,256]{0,1})->f32[16,256]{1,0}}
ENTRY main {
param_0 = f32[16,256]{0,1} parameter(0)
ROOT custom_call_2 = f32[16,256]{0,1} custom-call(param_0), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(¶m, 0)));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, RepeatedMoveToHost) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16,256]{0,1})->f32[16,256]{1,0}}
ENTRY main {
param_0 = f32[16,256]{0,1} parameter(0)
custom_call_0 = f32[16,256]{0,1} custom-call(param_0), custom_call_target="MoveToHost"
custom_call_1 = f32[16,256]{0,1} custom-call(custom_call_0), custom_call_target="MoveToHost"
ROOT custom_call_2 = f32[16,256]{0,1} custom-call(custom_call_1), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(©_to_device,
m::Copy(©_to_host, m::Parameter(¶m, 0)))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, RepeatedMoveToDevice) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16,256]{0,1})->f32[16,256]{1,0}}
ENTRY main {
param_0 = f32[16,256]{0,1} parameter(0)
custom_call_0 = f32[16,256]{0,1} custom-call(param_0), custom_call_target="MoveToHost"
custom_call_1 = f32[16,256]{0,1} custom-call(custom_call_0), custom_call_target="MoveToDevice"
ROOT custom_call_2 = f32[16,256]{0,1} custom-call(custom_call_1), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(©_to_device,
m::Copy(©_to_host, m::Parameter(¶m, 0)))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, RepeatedMoveToHostNonSequential) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16,256]{0,1})->f32[16,256]{1,0}}
ENTRY main {
param_0 = f32[16,256]{0,1} parameter(0)
custom_call_0 = f32[16,256]{0,1} custom-call(param_0), custom_call_target="MoveToHost"
custom_call_1 = f32[16,256]{0,1} custom-call(param_0), custom_call_target="MoveToHost"
ROOT custom_call_2 = f32[16,256]{0,1} custom-call(custom_call_0), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(©_to_device,
m::Copy(©_to_host, m::Parameter(¶m, 0)))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, RepeatedMoveToDeviceNonSequential) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16,256]{0,1})->f32[16,256]{1,0}}
ENTRY main {
param_0 = f32[16,256]{0,1} parameter(0)
custom_call_0 = f32[16,256]{0,1} custom-call(param_0), custom_call_target="MoveToHost"
custom_call_1 = f32[16,256]{0,1} custom-call(custom_call_0), custom_call_target="MoveToDevice"
ROOT custom_call_2 = f32[16,256]{0,1} custom-call(custom_call_0), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(©_to_device,
m::Copy(©_to_host, m::Parameter(¶m, 0)))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, BasicAsyncHostOffloadedCall_RemoveRedundantCopies) {
const std::string& hlo_string = R"(
HloModule m, entry_computation_layout={(f32[4096]{0:S(5)})->(f32[4096]{0:S(5)}, f32[4096]{0:S(5)})}
%async_computation {
%param_0 = f32[4096] parameter(0)
ROOT %offloaded-custom-call = (f32[4096], f32[4096]) custom-call(%param_0), custom_call_target="HostExecute"
}, execution_thread="host"
ENTRY %main {
%a = f32[4096] parameter(0)
%async-start = ((f32[4096]), (f32[4096], f32[4096]), u32[]) async-start(%a), async_execution_thread="host", calls=%async_computation
%async-done = (f32[4096], f32[4096]) custom-call-done(%async-start)
%gte_0 = f32[4096] get-tuple-element(%async-done), index=0
%gte_1 = f32[4096] get-tuple-element(%async-done), index=1
%gte_0_host = f32[4096] custom-call(%gte_0), custom_call_target="MoveToHost"
%gte_1_host = f32[4096] custom-call(%gte_1), custom_call_target="MoveToHost"
ROOT %tuple = (f32[4096], f32[4096]) tuple(%gte_0_host, %gte_1_host)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
ASSERT_NE(async_start, nullptr);
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
ASSERT_NE(async_done, nullptr);
HloInstruction* gte_0 = FindInstruction(module.get(), "gte_0");
ASSERT_NE(gte_0, nullptr);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
HloInstruction* gte_1 = FindInstruction(module.get(), "gte_1");
ASSERT_NE(gte_1, nullptr);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
HloInstruction* gte_0_host = FindInstruction(module.get(), "gte_0_host");
ASSERT_EQ(gte_0_host, nullptr);
HloInstruction* gte_1_host = FindInstruction(module.get(), "gte_1_host");
ASSERT_EQ(gte_1_host, nullptr);
HloInstruction* tuple = FindInstruction(module.get(), "tuple");
ASSERT_NE(tuple, nullptr);
std::vector<HloInstruction*> expected = {gte_0, gte_1};
EXPECT_THAT(tuple->operands(),
::testing::UnorderedElementsAreArray(expected));
}
TEST_F(HostOffloaderTest,
BasicAsyncHostOffloadedCall_NoChangesWhenEntryLayoutExpectsHBM) {
const std::string& hlo_string = R"(
HloModule m, entry_computation_layout={(f32[4096]{0:S(5)})->(f32[4096]{0:S(0)}, f32[4096]{0:S(0)})}
%async_computation {
%param_0 = f32[4096] parameter(0)
ROOT %offloaded-custom-call = (f32[4096], f32[4096]) custom-call(%param_0), custom_call_target="HostExecute"
}, execution_thread="host"
ENTRY %main {
%a = f32[4096] parameter(0)
%async-start = ((f32[4096]), (f32[4096], f32[4096]), u32[]) async-start(%a), async_execution_thread="host", calls=%async_computation
%async-done = (f32[4096], f32[4096]) custom-call-done(%async-start)
%gte_0 = f32[4096] get-tuple-element(%async-done), index=0
%gte_1 = f32[4096] get-tuple-element(%async-done), index=1
ROOT %tuple = (f32[4096], f32[4096]) tuple(%gte_0, %gte_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunHostOffloader(module.get()));
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
ASSERT_NE(async_start, nullptr);
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
ASSERT_NE(async_done, nullptr);
HloInstruction* gte_0 = FindInstruction(module.get(), "gte_0");
ASSERT_NE(gte_0, nullptr);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kDefaultMemorySpace);
HloInstruction* gte_1 = FindInstruction(module.get(), "gte_1");
ASSERT_NE(gte_1, nullptr);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kDefaultMemorySpace);
}
TEST_F(HostOffloaderTest,
BasicAsyncHostOffloadedCall_RemoveOnlyRedundantCopies) {
const std::string& hlo_string = R"(
HloModule m, entry_computation_layout={(f32[4096]{0:S(5)})->(f32[4096]{0:S(5)}, f32[4096]{0:S(5)})}
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add_res = f32[] add(%lhs, %rhs)
}
%async_computation {
%param_0 = f32[4096] parameter(0)
ROOT %offloaded-custom-call = (f32[4096], f32[4096]) custom-call(%param_0), custom_call_target="HostExecute"
}, execution_thread="host"
ENTRY %main {
%a = f32[4096] parameter(0)
%async-start = ((f32[4096]), (f32[4096], f32[4096]), u32[]) async-start(%a), async_execution_thread="host", calls=%async_computation
%async-done = (f32[4096], f32[4096]) custom-call-done(%async-start)
%gte_0 = f32[4096] get-tuple-element(%async-done), index=0
%gte_1 = f32[4096] get-tuple-element(%async-done), index=1
%sum = f32[4096] add(%gte_0, %gte_0)
%gte_0_host = f32[4096] custom-call(%gte_0), custom_call_target="MoveToHost"
%gte_1_host = f32[4096] custom-call(%gte_1), custom_call_target="MoveToHost"
ROOT %tuple = (f32[4096]{0:S(5)}, f32[4096]) tuple(%gte_0_host, %gte_1_host)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
ASSERT_NE(async_start, nullptr);
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
ASSERT_NE(async_done, nullptr);
HloInstruction* gte_0 = FindInstruction(module.get(), "gte_0");
ASSERT_NE(gte_0, nullptr);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kDefaultMemorySpace);
HloInstruction* gte_1 = FindInstruction(module.get(), "gte_1");
ASSERT_NE(gte_1, nullptr);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
HloInstruction* gte_0_host = FindInstruction(module.get(), "gte_0_host");
ASSERT_EQ(gte_0_host, nullptr);
HloInstruction* copy = FindInstruction(module.get(), "copy");
ASSERT_NE(copy, nullptr);
EXPECT_EQ(copy->operands()[0], gte_0);
HloInstruction* gte_1_host = FindInstruction(module.get(), "gte_1_host");
ASSERT_EQ(gte_1_host, nullptr);
}
TEST_F(HostOffloaderTest,
AsyncHostOffloadedCall_nonEntryPoint_RemoveRedundantCopies) {
const std::string& hlo_string = R"(
HloModule m, entry_computation_layout={(f32[4096]{0:S(5)})->(f32[4096]{0:S(5)}, f32[4096]{0:S(5)})}
%async_computation {
%param_0 = f32[4096] parameter(0)
ROOT %offloaded-custom-call = (f32[4096], f32[4096]) custom-call(%param_0), custom_call_target="HostExecute"
}, execution_thread="host"
%non_async_computation {
%param_0 = f32[4096] parameter(0)
%async-start = ((f32[4096]), (f32[4096], f32[4096]), u32[]) async-start(%param_0), async_execution_thread="host", calls=%async_computation
%async-done = (f32[4096], f32[4096]) custom-call-done(%async-start)
%gte_0 = f32[4096] get-tuple-element(%async-done), index=0
%gte_1 = f32[4096] get-tuple-element(%async-done), index=1
%gte_0_host = f32[4096] custom-call(%gte_0), custom_call_target="MoveToHost"
%gte_1_host = f32[4096] custom-call(%gte_1), custom_call_target="MoveToHost"
ROOT %tuple_non_async = (f32[4096]{0:S(5)}, f32[4096]) tuple(%gte_0_host, %gte_1_host)
}
ENTRY %main {
%a = f32[4096] parameter(0)
%call = (f32[4096], f32[4096]) call(%a), to_apply=%non_async_computation
%call_0 = f32[4096] get-tuple-element(%call), index=0
%call_1 = f32[4096] get-tuple-element(%call), index=1
ROOT %tuple = (f32[4096], f32[4096]) tuple(%call_0, %call_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
ASSERT_NE(async_start, nullptr);
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
ASSERT_NE(async_done, nullptr);
HloInstruction* gte_0 = FindInstruction(module.get(), "gte_0");
ASSERT_NE(gte_0, nullptr);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
HloInstruction* gte_1 = FindInstruction(module.get(), "gte_1");
ASSERT_NE(gte_1, nullptr);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
HloInstruction* gte_0_host = FindInstruction(module.get(), "gte_0_host");
ASSERT_EQ(gte_0_host, nullptr);
HloInstruction* gte_1_host = FindInstruction(module.get(), "gte_1_host");
ASSERT_EQ(gte_1_host, nullptr);
HloInstruction* tuple_non_async =
FindInstruction(module.get(), "tuple_non_async");
ASSERT_NE(tuple_non_async, nullptr);
std::vector<HloInstruction*> expected = {gte_0, gte_1};
EXPECT_THAT(tuple_non_async->operands(),
::testing::UnorderedElementsAreArray(expected));
HloInstruction* tuple = FindInstruction(module.get(), "tuple");
ASSERT_NE(tuple, nullptr);
TestShapeHasMemorySpace(tuple->shape().tuple_shapes(0),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(tuple->shape().tuple_shapes(1),
Layout::kHostMemorySpace);
}
TEST_F(HostOffloaderTest,
AsyncHostOffloadedCall_passedToCall_RemoveRedundantCopies) {
const std::string& hlo_string = R"(
HloModule m, entry_computation_layout={(f32[4096]{0:S(5)})->(f32[4096]{0:S(5)}, f32[4096]{0:S(5)})}
%async_computation {
%param_0 = f32[4096] parameter(0)
ROOT %offloaded-custom-call = (f32[4096], f32[4096]) custom-call(%param_0), custom_call_target="HostExecute"
}, execution_thread="host"
%non_async_computation {
%param_0_non_async = f32[4096] parameter(0)
%param_1_non_async = f32[4096] parameter(1)
ROOT %tuple_non_async = (f32[4096], f32[4096]) tuple(%param_0_non_async, %param_1_non_async)
}
ENTRY %main {
%a = f32[4096] parameter(0)
%async-start = ((f32[4096]), (f32[4096], f32[4096]), u32[]) async-start(%a), async_execution_thread="host", calls=%async_computation
%async-done = (f32[4096], f32[4096]) custom-call-done(%async-start)
%gte_0 = f32[4096] get-tuple-element(%async-done), index=0
%gte_1 = f32[4096] get-tuple-element(%async-done), index=1
%call = (f32[4096], f32[4096]) call(%gte_0, %gte_1), to_apply=%non_async_computation
%call_0 = f32[4096] get-tuple-element(%call), index=0
%call_1 = f32[4096] get-tuple-element(%call), index=1
%call_0_host = f32[4096] custom-call(%call_0), custom_call_target="MoveToHost"
%call_1_host = f32[4096] custom-call(%call_1), custom_call_target="MoveToHost"
ROOT %tuple = (f32[4096], f32[4096]) tuple(%call_0_host, %call_1_host)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
ASSERT_NE(async_start, nullptr);
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
ASSERT_NE(async_done, nullptr);
HloInstruction* gte_0 = FindInstruction(module.get(), "gte_0");
ASSERT_NE(gte_0, nullptr);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
HloInstruction* gte_1 = FindInstruction(module.get(), "gte_1");
ASSERT_NE(gte_1, nullptr);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
HloInstruction* call_0 = FindInstruction(module.get(), "call_0");
ASSERT_NE(call_0, nullptr);
HloInstruction* call_1 = FindInstruction(module.get(), "call_1");
ASSERT_NE(call_1, nullptr);
HloInstruction* call_0_host = FindInstruction(module.get(), "call_0_host");
ASSERT_EQ(call_0_host, nullptr);
HloInstruction* call_1_host = FindInstruction(module.get(), "call_1_host");
ASSERT_EQ(call_1_host, nullptr);
HloInstruction* param_0_non_async =
FindInstruction(module.get(), "param_0_non_async");
ASSERT_NE(param_0_non_async, nullptr);
TestShapeHasMemorySpace(param_0_non_async->shape(), Layout::kHostMemorySpace);
HloInstruction* param_1_non_async =
FindInstruction(module.get(), "param_1_non_async");
ASSERT_NE(param_1_non_async, nullptr);
TestShapeHasMemorySpace(param_1_non_async->shape(), Layout::kHostMemorySpace);
HloInstruction* tuple_non_async =
FindInstruction(module.get(), "tuple_non_async");
ASSERT_NE(tuple_non_async, nullptr);
std::vector<HloInstruction*> expected_operands = {param_0_non_async,
param_1_non_async};
EXPECT_THAT(tuple_non_async->operands(),
::testing::UnorderedElementsAreArray(expected_operands));
HloInstruction* tuple = FindInstruction(module.get(), "tuple");
ASSERT_NE(tuple, nullptr);
TestShapeHasMemorySpace(tuple->shape().tuple_shapes(0),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(tuple->shape().tuple_shapes(1),
Layout::kHostMemorySpace);
std::vector<HloInstruction*> expected = {call_0, call_1};
EXPECT_THAT(tuple->operands(),
::testing::UnorderedElementsAreArray(expected));
}
TEST_F(HostOffloaderTest,
AsyncHostOffloadedCall_passedToAsyncHostOffloadedCall_NoCopiesRemoved) {
const std::string& hlo_string = R"(
HloModule m, entry_computation_layout={(f32[4096]{0:S(5)})->(f32[4096]{0:S(5)}, f32[4096]{0:S(5)}, f32[4096]{0:S(0)}, f32[4096]{0:S(0)})}
%async_computation {
%param_0 = f32[4096] parameter(0)
ROOT %offloaded-custom-call = (f32[4096], f32[4096]) custom-call(%param_0), custom_call_target="HostExecute"
}, execution_thread="host"
%extra_async_computation {
%param_0_extra_async = f32[4096] parameter(0)
%param_1_extra_async = f32[4096] parameter(1)
ROOT %offloaded-extra-custom-call = (f32[4096], f32[4096]) custom-call(%param_0_extra_async, %param_1_extra_async), custom_call_target="HostExecute"
}, execution_thread="host"
ENTRY %main {
%a = f32[4096] parameter(0)
%async-start = ((f32[4096]), (f32[4096], f32[4096]), u32[]) async-start(%a), async_execution_thread="host", calls=%async_computation
%async-done = (f32[4096], f32[4096]) custom-call-done(%async-start)
%gte_0 = f32[4096] get-tuple-element(%async-done), index=0
%gte_1 = f32[4096] get-tuple-element(%async-done), index=1
%extra-async-start = ((f32[4096], f32[4096]), (f32[4096], f32[4096]), u32[]) async-start(%gte_0, %gte_1), async_execution_thread="host", calls=%extra_async_computation
%extra-async-done = (f32[4096], f32[4096]) custom-call-done(%extra-async-start)
%call_0 = f32[4096] get-tuple-element(%extra-async-done), index=0
%call_1 = f32[4096] get-tuple-element(%extra-async-done), index=1
%gte_0_host = f32[4096] custom-call(%gte_0), custom_call_target="MoveToHost"
%gte_1_host = f32[4096] custom-call(%gte_1), custom_call_target="MoveToHost"
ROOT %tuple = (f32[4096], f32[4096], f32[4096], f32[4096]) tuple(%gte_0_host, %gte_1_host, %call_0, %call_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
ASSERT_NE(async_start, nullptr);
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
ASSERT_NE(async_done, nullptr);
HloInstruction* gte_0 = FindInstruction(module.get(), "gte_0");
ASSERT_NE(gte_0, nullptr);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kDefaultMemorySpace);
HloInstruction* gte_1 = FindInstruction(module.get(), "gte_1");
ASSERT_NE(gte_1, nullptr);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kDefaultMemorySpace);
}
TEST_F(HostOffloaderTest, OffloadPassedToEntryComputationRoot) {
const std::string& hlo_string = R"(
HloModule m, entry_computation_layout={()->(s32[]{:T(128)})}
ENTRY %main {
c = s32[] constant(1)
custom-call.331 = s32[]{:T(128)} custom-call(c), custom_call_target="MoveToHost"
custom-call.332 = s32[]{:T(128)} custom-call(custom-call.331), custom_call_target="MoveToDevice"
ROOT tuple = (s32[]{:T(128)}) tuple(custom-call.332)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
VLOG(1) << "module after: " << module->ToString();
}
TEST_F(HostOffloaderTest, MoveToHostInsideWhileLoopBodyShareSameBroadcast) {
const absl::string_view hlo_string = R"(
HloModule MoveToHostFoundOutsideAndInsideOfWhileLoop, entry_computation_layout={(s32[],f32[1,1,128,128],f32[1,1,128,128])->(f32[8,1,128,128]{3,2,1,0:T(8,128)S(5)}, f32[8,1,128,128]{3,2,1,0:T(8,128)S(5)}, f32[1,1,128,128], f32[1,1,128,128], s32[], s32[])}
while_condition {
condition_param = (f32[8,1,128,128], f32[8,1,128,128], f32[1,1,128,128], f32[1,1,128,128], s32[], s32[]) parameter(0)
condition_current_iteration_index = s32[] get-tuple-element(condition_param), index=5
condition_iteration_count = s32[] constant(16)
ROOT condition_result = pred[] compare(condition_current_iteration_index, condition_iteration_count), direction=LT
}
while_body {
while_body_input_tuple = (f32[8,1,128,128], f32[8,1,128,128], f32[1,1,128,128], f32[1,1,128,128], s32[], s32[]) parameter(0)
host_tensor_1 = f32[8,1,128,128] get-tuple-element(while_body_input_tuple), index=0
host_tensor_2 = f32[8,1,128,128] get-tuple-element(while_body_input_tuple), index=1
update_1 = f32[1,1,128,128] get-tuple-element(while_body_input_tuple), index=2
update_2 = f32[1,1,128,128] get-tuple-element(while_body_input_tuple), index=3
offset_dus = s32[] get-tuple-element(while_body_input_tuple), index=4
while_body_num_iter = s32[] get-tuple-element(while_body_input_tuple), index=5
mth_tensor_1 = f32[8,1,128,128] custom-call(host_tensor_1), custom_call_target="MoveToHost"
mth_tensor_2 = f32[8,1,128,128] custom-call(host_tensor_2), custom_call_target="MoveToHost"
constant_zero = s32[] constant(0)
host_dus_1 = f32[8,1,128,128]{3,2,1,0:T(8,128)} dynamic-update-slice(mth_tensor_1, update_1, offset_dus, constant_zero, constant_zero, constant_zero)
host_dus_2 = f32[8,1,128,128]{3,2,1,0:T(8,128)} dynamic-update-slice(mth_tensor_2, update_2, offset_dus, constant_zero, constant_zero, constant_zero)
ROOT while_output_tuple = tuple(host_dus_1,host_dus_2, update_1, update_2, offset_dus, while_body_num_iter)
}
ENTRY main {
offset = s32[] parameter(0)
update = f32[1,1,128,128] parameter(1)
update2 = f32[1,1,128,128] parameter(2)
constant = f32[] constant(1.0)
broadcast = f32[8,1,128,128] broadcast(constant)
shared_host_memory = f32[8,1,128,128] custom-call(broadcast), custom_call_target="MoveToHost"
tuple_for_while = (f32[8,1,128,128], f32[8,1,128,128], f32[1,1,128,128], f32[1,1,128,128], s32[], s32[]) tuple(shared_host_memory, shared_host_memory, update, update2, offset, offset)
ROOT while = (f32[8,1,128,128], f32[8,1,128,128], f32[1,1,128,128], f32[1,1,128,128], s32[], s32[]) while(tuple_for_while), condition=while_condition, body=while_body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
}
TEST_F(HostOffloaderTest, RemoveRedundantCopiesBackToHostOutputIsNonTuple) {
const absl::string_view hlo_string = R"(
HloModule jit_main, input_output_alias={ {0}: (0, {}, may-alias), {1}: (1, {}, may-alias) }, entry_computation_layout={(f32[1048576]{0:T(1024)}, f32[25769803776]{0:T(1024)S(5)})->(f32[1048576]{0:T(1024)}, f32[25769803776]{0:T(1024)S(5)})}, allow_spmd_sharding_propagation_to_parameters={false,false}, allow_spmd_sharding_propagation_to_output={false,false}
%host_fn.6 (Arg_0.7: f32[25769803776]) -> f32[25769803776] {
%Arg_0.7 = f32[25769803776]{0} parameter(0), metadata={op_name="jit(main)/jit(main)/pjit"}
%constant.8 = f32[] constant(1)
%broadcast.9 = f32[25769803776]{0} broadcast(f32[] %constant.8), dimensions={}, metadata={op_name="jit(main)/jit(main)/jit(host_fn)/add" source_file="third_party/py/jax/tests/memories_test.py" source_line=1448}
ROOT %add.10 = f32[25769803776]{0} add(f32[25769803776]{0} %Arg_0.7, f32[25769803776]{0} %broadcast.9), frontend_attributes={_xla_compute_type="host"}, metadata={op_name="jit(main)/jit(main)/jit(host_fn)/add" source_file="third_party/py/jax/tests/memories_test.py" source_line=1448}
}, execution_thread="host"
ENTRY %main.17 (Arg_0.1: f32[1048576], Arg_1.2: f32[25769803776]) -> (f32[1048576], f32[25769803776]) {
%Arg_0.1 = f32[1048576]{0:T(1024)} parameter(0), sharding={replicated}, metadata={op_name="a"}
%constant.3 = f32[]{:T(128)} constant(1)
%broadcast.4 = f32[1048576]{0:T(1024)} broadcast(f32[]{:T(128)} %constant.3), dimensions={}, metadata={op_name="jit(main)/jit(main)/add" source_file="third_party/py/jax/tests/memories_test.py" source_line=1454}
%add.5 = f32[1048576]{0:T(1024)} add(f32[1048576]{0:T(1024)} %Arg_0.1, f32[1048576]{0:T(1024)} %broadcast.4), metadata={op_name="jit(main)/jit(main)/add" source_file="third_party/py/jax/tests/memories_test.py" source_line=1454}
%custom-call = f32[1048576]{0:T(1024)} custom-call(f32[1048576]{0:T(1024)} %add.5), custom_call_target="MoveToDevice"
%Arg_1.2 = f32[25769803776]{0:T(1024)} parameter(1), sharding={replicated}, metadata={op_name="b"}
%host-async-start = ((f32[25769803776]{0:T(1024)}), f32[25769803776]{0:T(1024)}, u32[]{:T(128)}) custom-call-start(f32[25769803776]{0:T(1024)} %Arg_1.2), async_execution_thread="host", custom_call_target="HostExecute", called_computations={%host_fn.6}, backend_config={"flag_configs":[],"scoped_memory_configs":[],"device_type":"DEVICE_TYPE_HOST","used_scoped_memory_configs":[]}
%host-async-done = f32[25769803776]{0:T(1024)} custom-call-done(((f32[25769803776]{0:T(1024)}), f32[25769803776]{0:T(1024)}, u32[]{:T(128)}) %host-async-start), backend_config={"flag_configs":[],"scoped_memory_configs":[],"device_type":"DEVICE_TYPE_HOST","used_scoped_memory_configs":[]}
%redundant-move-to-host = f32[25769803776]{0:T(1024)} custom-call(f32[25769803776]{0:T(1024)} %host-async-done), custom_call_target="MoveToHost"
ROOT %output_tuple = (f32[1048576]{0:T(1024)}, f32[25769803776]{0:T(1024)}) tuple(f32[1048576]{0:T(1024)} %custom-call, f32[25769803776]{0:T(1024)} %redundant-move-to-host), sharding={{replicated}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
VLOG(1) << module->ToString();
HloInstruction* async_start =
FindInstruction(module.get(), "host-async-start");
ASSERT_NE(async_start, nullptr);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(async_start->shape(), {0, 0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(async_start->shape(), {1}),
Layout::kHostMemorySpace);
HloInstruction* async_done = FindInstruction(module.get(), "host-async-done");
ASSERT_NE(async_done, nullptr);
TestShapeHasMemorySpace(async_done->shape(), Layout::kHostMemorySpace);
HloInstruction* output_tuple = FindInstruction(module.get(), "output_tuple");
ASSERT_NE(output_tuple, nullptr);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(output_tuple->shape(), {1}),
Layout::kHostMemorySpace);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offloader.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offloader_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5fd54df9-c8ba-413c-8b2b-4506dc2a7011 | cpp | google/quiche | prr_sender | quiche/quic/core/congestion_control/prr_sender.cc | quiche/quic/core/congestion_control/prr_sender_test.cc | #include "quiche/quic/core/congestion_control/prr_sender.h"
#include "quiche/quic/core/quic_packets.h"
namespace quic {
PrrSender::PrrSender()
: bytes_sent_since_loss_(0),
bytes_delivered_since_loss_(0),
ack_count_since_loss_(0),
bytes_in_flight_before_loss_(0) {}
void PrrSender::OnPacketSent(QuicByteCount sent_bytes) {
bytes_sent_since_loss_ += sent_bytes;
}
void PrrSender::OnPacketLost(QuicByteCount prior_in_flight) {
bytes_sent_since_loss_ = 0;
bytes_in_flight_before_loss_ = prior_in_flight;
bytes_delivered_since_loss_ = 0;
ack_count_since_loss_ = 0;
}
void PrrSender::OnPacketAcked(QuicByteCount acked_bytes) {
bytes_delivered_since_loss_ += acked_bytes;
++ack_count_since_loss_;
}
bool PrrSender::CanSend(QuicByteCount congestion_window,
QuicByteCount bytes_in_flight,
QuicByteCount slowstart_threshold) const {
if (bytes_sent_since_loss_ == 0 || bytes_in_flight < kMaxSegmentSize) {
return true;
}
if (congestion_window > bytes_in_flight) {
if (bytes_delivered_since_loss_ + ack_count_since_loss_ * kMaxSegmentSize <=
bytes_sent_since_loss_) {
return false;
}
return true;
}
if (bytes_delivered_since_loss_ * slowstart_threshold >
bytes_sent_since_loss_ * bytes_in_flight_before_loss_) {
return true;
}
return false;
}
} | #include "quiche/quic/core/congestion_control/prr_sender.h"
#include <algorithm>
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
const QuicByteCount kMaxSegmentSize = kDefaultTCPMSS;
}
class PrrSenderTest : public QuicTest {};
TEST_F(PrrSenderTest, SingleLossResultsInSendOnEveryOtherAck) {
PrrSender prr;
QuicPacketCount num_packets_in_flight = 50;
QuicByteCount bytes_in_flight = num_packets_in_flight * kMaxSegmentSize;
const QuicPacketCount ssthresh_after_loss = num_packets_in_flight / 2;
const QuicByteCount congestion_window = ssthresh_after_loss * kMaxSegmentSize;
prr.OnPacketLost(bytes_in_flight);
prr.OnPacketAcked(kMaxSegmentSize);
bytes_in_flight -= kMaxSegmentSize;
EXPECT_TRUE(prr.CanSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
prr.OnPacketSent(kMaxSegmentSize);
EXPECT_FALSE(prr.CanSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
for (uint64_t i = 0; i < ssthresh_after_loss - 1; ++i) {
prr.OnPacketAcked(kMaxSegmentSize);
bytes_in_flight -= kMaxSegmentSize;
EXPECT_FALSE(prr.CanSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
prr.OnPacketAcked(kMaxSegmentSize);
bytes_in_flight -= kMaxSegmentSize;
EXPECT_TRUE(prr.CanSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
prr.OnPacketSent(kMaxSegmentSize);
bytes_in_flight += kMaxSegmentSize;
}
EXPECT_EQ(congestion_window, bytes_in_flight);
for (int i = 0; i < 10; ++i) {
prr.OnPacketAcked(kMaxSegmentSize);
bytes_in_flight -= kMaxSegmentSize;
EXPECT_TRUE(prr.CanSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
prr.OnPacketSent(kMaxSegmentSize);
bytes_in_flight += kMaxSegmentSize;
EXPECT_EQ(congestion_window, bytes_in_flight);
EXPECT_FALSE(prr.CanSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
}
}
TEST_F(PrrSenderTest, BurstLossResultsInSlowStart) {
PrrSender prr;
QuicByteCount bytes_in_flight = 20 * kMaxSegmentSize;
const QuicPacketCount num_packets_lost = 13;
const QuicPacketCount ssthresh_after_loss = 10;
const QuicByteCount congestion_window = ssthresh_after_loss * kMaxSegmentSize;
bytes_in_flight -= num_packets_lost * kMaxSegmentSize;
prr.OnPacketLost(bytes_in_flight);
for (int i = 0; i < 3; ++i) {
prr.OnPacketAcked(kMaxSegmentSize);
bytes_in_flight -= kMaxSegmentSize;
for (int j = 0; j < 2; ++j) {
EXPECT_TRUE(prr.CanSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
prr.OnPacketSent(kMaxSegmentSize);
bytes_in_flight += kMaxSegmentSize;
}
EXPECT_FALSE(prr.CanSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
}
for (int i = 0; i < 10; ++i) {
prr.OnPacketAcked(kMaxSegmentSize);
bytes_in_flight -= kMaxSegmentSize;
EXPECT_TRUE(prr.CanSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
prr.OnPacketSent(kMaxSegmentSize);
bytes_in_flight += kMaxSegmentSize;
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/congestion_control/prr_sender.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/congestion_control/prr_sender_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
ed923afa-a952-4b03-96af-6414be706916 | cpp | tensorflow/tensorflow | clustering_ops | tensorflow/core/ops/clustering_ops.cc | tensorflow/core/kernels/clustering_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
namespace tensorflow {
REGISTER_OP("KmeansPlusPlusInitialization")
.Input("points: float32")
.Input("num_to_sample: int64")
.Input("seed: int64")
.Input("num_retries_per_sample: int64")
.Output("samples: float32")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("KMC2ChainInitialization")
.Input("distances: float32")
.Input("seed: int64")
.Output("index: int64")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("NearestNeighbors")
.Input("points: float32")
.Input("centers: float32")
.Input("k: int64")
.Output("nearest_center_indices: int64")
.Output("nearest_center_distances: float32")
.SetShapeFn(shape_inference::UnknownShape);
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
constexpr int k100Dim = 100;
constexpr int k10Points = 10;
constexpr int k100Points = 100;
constexpr int k1kPoints = 1000;
constexpr int k10kPoints = 10000;
constexpr int k1MPoints = 1000000;
constexpr int k2Centers = 2;
constexpr int k5Centers = 5;
constexpr int k10Centers = 10;
constexpr int k20Centers = 20;
constexpr int k50Centers = 50;
constexpr int k100Centers = 100;
constexpr int k200Centers = 200;
constexpr int k500Centers = 500;
constexpr int k1kCenters = 1000;
constexpr int k10kCenters = 10000;
constexpr int k0RetriesPerSample = 0;
constexpr int k3RetriesPerSample = 3;
Graph* SetUpKmeansPlusPlusInitialization(int num_dims, int num_points,
int num_to_sample,
int retries_per_sample) {
Graph* g = new Graph(OpRegistry::Global());
Tensor points(DT_FLOAT, TensorShape({num_points, num_dims}));
Tensor sample_size(DT_INT64, TensorShape({}));
Tensor seed(DT_INT64, TensorShape({}));
Tensor num_retries_per_sample(DT_INT64, TensorShape({}));
points.flat<float>().setRandom();
sample_size.flat<int64_t>().setConstant(num_to_sample);
seed.flat<int64_t>().setConstant(12345);
num_retries_per_sample.flat<int64_t>().setConstant(retries_per_sample);
TF_CHECK_OK(NodeBuilder("kmeans_plus_plus_initialization_op",
"KmeansPlusPlusInitialization")
.Input(test::graph::Constant(g, points))
.Input(test::graph::Constant(g, sample_size))
.Input(test::graph::Constant(g, seed))
.Input(test::graph::Constant(g, num_retries_per_sample))
.Finalize(g, nullptr ));
return g;
}
template <int num_points, int num_to_sample, int num_dims,
int retries_per_sample>
void BM_KmeansPlusPlusInitialization(::testing::benchmark::State& state) {
Graph* g = SetUpKmeansPlusPlusInitialization(
num_dims, num_points, num_to_sample, retries_per_sample);
test::Benchmark("cpu", g, false).Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) *
num_points * num_dims * num_to_sample);
}
#define BENCHMARK_KMEANS_PLUS_PLUS(p, c, d, r) \
void BM_KmeansPlusPlusInitialization_##p##_##c##_##d##_##r( \
::testing::benchmark::State& state) { \
BM_KmeansPlusPlusInitialization<p, c, d, r>(state); \
} \
BENCHMARK(BM_KmeansPlusPlusInitialization_##p##_##c##_##d##_##r) \
->UseRealTime();
#define RUN_BM_KmeansPlusPlusInitialization(retries) \
BENCHMARK_KMEANS_PLUS_PLUS(k10Points, k2Centers, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k10Points, k5Centers, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k10Points, k10Centers, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k100Points, k10Centers, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k100Points, k20Centers, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k100Points, k50Centers, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k100Points, k100Centers, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k1kPoints, k100Centers, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k1kPoints, k200Centers, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k1kPoints, k500Centers, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k1kPoints, k1kCenters, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k10kPoints, k100Centers, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k10kPoints, k200Centers, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k10kPoints, k500Centers, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k10kPoints, k1kCenters, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k1MPoints, k100Centers, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k1MPoints, k200Centers, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k1MPoints, k500Centers, k100Dim, retries); \
BENCHMARK_KMEANS_PLUS_PLUS(k1MPoints, k1kCenters, k100Dim, retries)
RUN_BM_KmeansPlusPlusInitialization(k0RetriesPerSample);
RUN_BM_KmeansPlusPlusInitialization(k3RetriesPerSample);
#undef RUN_BM_KmeansPlusPlusInitialization
#undef BENCHMARK_KMEANS_PLUS_PLUS
Graph* SetUpKMC2Initialization(int num_points) {
Graph* g = new Graph(OpRegistry::Global());
Tensor distances(DT_FLOAT, TensorShape({num_points}));
Tensor seed(DT_INT64, TensorShape({}));
distances.flat<float>().setRandom();
seed.flat<int64_t>().setConstant(12345);
TF_CHECK_OK(
NodeBuilder("KMC2ChainInitializationOp", "KMC2ChainInitialization")
.Input(test::graph::Constant(g, distances))
.Input(test::graph::Constant(g, seed))
.Finalize(g, nullptr ));
return g;
}
template <int num_points, int num_to_sample, int num_dims>
void BM_KMC2Initialization(::testing::benchmark::State& state) {
Graph* g = SetUpKMC2Initialization(num_points);
test::Benchmark("cpu", g, false).Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) *
num_points * num_dims * num_to_sample);
}
#define BENCHMARK_KMC2(p, c, d) \
void BM_KMC2Initialization_##p##_##c##_##d( \
::testing::benchmark::State& state) { \
BM_KMC2Initialization<p, c, d>(state); \
} \
BENCHMARK(BM_KMC2Initialization_##p##_##c##_##d)->UseRealTime();
#define RUN_BM_KMC2Initialization \
BENCHMARK_KMC2(k10Points, k2Centers, k100Dim); \
BENCHMARK_KMC2(k10Points, k5Centers, k100Dim); \
BENCHMARK_KMC2(k10Points, k10Centers, k100Dim); \
BENCHMARK_KMC2(k100Points, k10Centers, k100Dim); \
BENCHMARK_KMC2(k100Points, k20Centers, k100Dim); \
BENCHMARK_KMC2(k100Points, k50Centers, k100Dim); \
BENCHMARK_KMC2(k100Points, k100Centers, k100Dim); \
BENCHMARK_KMC2(k1kPoints, k100Centers, k100Dim); \
BENCHMARK_KMC2(k1kPoints, k200Centers, k100Dim); \
BENCHMARK_KMC2(k1kPoints, k500Centers, k100Dim); \
BENCHMARK_KMC2(k1kPoints, k1kCenters, k100Dim); \
BENCHMARK_KMC2(k10kPoints, k100Centers, k100Dim); \
BENCHMARK_KMC2(k10kPoints, k200Centers, k100Dim); \
BENCHMARK_KMC2(k10kPoints, k500Centers, k100Dim); \
BENCHMARK_KMC2(k10kPoints, k1kCenters, k100Dim); \
BENCHMARK_KMC2(k1MPoints, k100Centers, k100Dim); \
BENCHMARK_KMC2(k1MPoints, k200Centers, k100Dim); \
BENCHMARK_KMC2(k1MPoints, k500Centers, k100Dim); \
BENCHMARK_KMC2(k1MPoints, k1kCenters, k100Dim)
RUN_BM_KMC2Initialization;
#undef RUN_BM_KMC2Initialization
#undef BENCHMARK_KMC2
Graph* SetUpNearestNeighbors(int num_dims, int num_points, int num_centers,
int k) {
Graph* g = new Graph(OpRegistry::Global());
Tensor points(DT_FLOAT, TensorShape({num_points, num_dims}));
Tensor centers(DT_FLOAT, TensorShape({num_centers, num_dims}));
Tensor top(DT_INT64, TensorShape({}));
points.flat<float>().setRandom();
centers.flat<float>().setRandom();
top.flat<int64_t>().setConstant(k);
TF_CHECK_OK(NodeBuilder("nearest_centers_op", "NearestNeighbors")
.Input(test::graph::Constant(g, points))
.Input(test::graph::Constant(g, centers))
.Input(test::graph::Constant(g, top))
.Finalize(g, nullptr ));
return g;
}
template <int num_dims, int num_points, int num_centers, int k>
void BM_NearestNeighbors(::testing::benchmark::State& state) {
Graph* g = SetUpNearestNeighbors(num_dims, num_points, num_centers, k);
test::Benchmark("cpu", g, false).Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) *
num_points * num_dims * num_centers);
}
constexpr int kTop1 = 1;
constexpr int kTop2 = 2;
constexpr int kTop5 = 5;
constexpr int kTop10 = 10;
#define BENCHMARK_NEAREST_NEIGHBORS(d, p, c, k) \
void BM_NearestNeighbors##d##_##p##_##c##_##k( \
::testing::benchmark::State& state) { \
BM_NearestNeighbors<d, p, c, k>(state); \
} \
BENCHMARK(BM_NearestNeighbors##d##_##p##_##c##_##k)->UseRealTime();
#define RUN_BM_NearestNeighbors(k) \
BENCHMARK_NEAREST_NEIGHBORS(k100Dim, k1kPoints, k100Centers, k); \
BENCHMARK_NEAREST_NEIGHBORS(k100Dim, k1kPoints, k1kCenters, k); \
BENCHMARK_NEAREST_NEIGHBORS(k100Dim, k1kPoints, k10kCenters, k); \
BENCHMARK_NEAREST_NEIGHBORS(k100Dim, k1MPoints, k100Centers, k); \
BENCHMARK_NEAREST_NEIGHBORS(k100Dim, k1MPoints, k1kCenters, k); \
BENCHMARK_NEAREST_NEIGHBORS(k100Dim, k1MPoints, k10kCenters, k)
RUN_BM_NearestNeighbors(kTop1);
RUN_BM_NearestNeighbors(kTop2);
RUN_BM_NearestNeighbors(kTop5);
RUN_BM_NearestNeighbors(kTop10);
#undef RUN_BM_NearestNeighbors
#undef BENCHMARK_NEAREST_NEIGHBORS
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/clustering_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/clustering_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8ed8fe03-8fe6-41ff-86a0-f7f2a159a47c | cpp | google/quiche | decode_http2_structures | quiche/http2/decoder/decode_http2_structures.cc | quiche/http2/decoder/decode_http2_structures_test.cc | #include "quiche/http2/decoder/decode_http2_structures.h"
#include <cstdint>
#include <cstring>
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
void DoDecode(Http2FrameHeader* out, DecodeBuffer* b) {
QUICHE_DCHECK_NE(nullptr, out);
QUICHE_DCHECK_NE(nullptr, b);
QUICHE_DCHECK_LE(Http2FrameHeader::EncodedSize(), b->Remaining());
out->payload_length = b->DecodeUInt24();
out->type = static_cast<Http2FrameType>(b->DecodeUInt8());
out->flags = static_cast<Http2FrameFlag>(b->DecodeUInt8());
out->stream_id = b->DecodeUInt31();
}
void DoDecode(Http2PriorityFields* out, DecodeBuffer* b) {
QUICHE_DCHECK_NE(nullptr, out);
QUICHE_DCHECK_NE(nullptr, b);
QUICHE_DCHECK_LE(Http2PriorityFields::EncodedSize(), b->Remaining());
uint32_t stream_id_and_flag = b->DecodeUInt32();
out->stream_dependency = stream_id_and_flag & StreamIdMask();
if (out->stream_dependency == stream_id_and_flag) {
out->is_exclusive = false;
} else {
out->is_exclusive = true;
}
out->weight = b->DecodeUInt8() + 1;
}
void DoDecode(Http2RstStreamFields* out, DecodeBuffer* b) {
QUICHE_DCHECK_NE(nullptr, out);
QUICHE_DCHECK_NE(nullptr, b);
QUICHE_DCHECK_LE(Http2RstStreamFields::EncodedSize(), b->Remaining());
out->error_code = static_cast<Http2ErrorCode>(b->DecodeUInt32());
}
void DoDecode(Http2SettingFields* out, DecodeBuffer* b) {
QUICHE_DCHECK_NE(nullptr, out);
QUICHE_DCHECK_NE(nullptr, b);
QUICHE_DCHECK_LE(Http2SettingFields::EncodedSize(), b->Remaining());
out->parameter = static_cast<Http2SettingsParameter>(b->DecodeUInt16());
out->value = b->DecodeUInt32();
}
void DoDecode(Http2PushPromiseFields* out, DecodeBuffer* b) {
QUICHE_DCHECK_NE(nullptr, out);
QUICHE_DCHECK_NE(nullptr, b);
QUICHE_DCHECK_LE(Http2PushPromiseFields::EncodedSize(), b->Remaining());
out->promised_stream_id = b->DecodeUInt31();
}
void DoDecode(Http2PingFields* out, DecodeBuffer* b) {
QUICHE_DCHECK_NE(nullptr, out);
QUICHE_DCHECK_NE(nullptr, b);
QUICHE_DCHECK_LE(Http2PingFields::EncodedSize(), b->Remaining());
memcpy(out->opaque_bytes, b->cursor(), Http2PingFields::EncodedSize());
b->AdvanceCursor(Http2PingFields::EncodedSize());
}
void DoDecode(Http2GoAwayFields* out, DecodeBuffer* b) {
QUICHE_DCHECK_NE(nullptr, out);
QUICHE_DCHECK_NE(nullptr, b);
QUICHE_DCHECK_LE(Http2GoAwayFields::EncodedSize(), b->Remaining());
out->last_stream_id = b->DecodeUInt31();
out->error_code = static_cast<Http2ErrorCode>(b->DecodeUInt32());
}
void DoDecode(Http2WindowUpdateFields* out, DecodeBuffer* b) {
QUICHE_DCHECK_NE(nullptr, out);
QUICHE_DCHECK_NE(nullptr, b);
QUICHE_DCHECK_LE(Http2WindowUpdateFields::EncodedSize(), b->Remaining());
out->window_size_increment = b->DecodeUInt31();
}
void DoDecode(Http2PriorityUpdateFields* out, DecodeBuffer* b) {
QUICHE_DCHECK_NE(nullptr, out);
QUICHE_DCHECK_NE(nullptr, b);
QUICHE_DCHECK_LE(Http2PriorityUpdateFields::EncodedSize(), b->Remaining());
out->prioritized_stream_id = b->DecodeUInt31();
}
void DoDecode(Http2AltSvcFields* out, DecodeBuffer* b) {
QUICHE_DCHECK_NE(nullptr, out);
QUICHE_DCHECK_NE(nullptr, b);
QUICHE_DCHECK_LE(Http2AltSvcFields::EncodedSize(), b->Remaining());
out->origin_length = b->DecodeUInt16();
}
} | #include "quiche/http2/decoder/decode_http2_structures.h"
#include <stddef.h>
#include <string>
#include "absl/strings/string_view.h"
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/decode_status.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/test_tools/http2_frame_builder.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/http2_structures_test_util.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
namespace {
template <typename T, size_t N>
absl::string_view ToStringPiece(T (&data)[N]) {
return absl::string_view(reinterpret_cast<const char*>(data), N * sizeof(T));
}
template <class S>
std::string SerializeStructure(const S& s) {
Http2FrameBuilder fb;
fb.Append(s);
EXPECT_EQ(S::EncodedSize(), fb.size());
return fb.buffer();
}
template <class S>
class StructureDecoderTest : public quiche::test::QuicheTest {
protected:
typedef S Structure;
StructureDecoderTest() : random_(), random_decode_count_(100) {}
void Randomize(S* p) { ::http2::test::Randomize(p, &random_); }
void DecodeLeadingStructure(const S* expected, absl::string_view data) {
ASSERT_LE(S::EncodedSize(), data.size());
DecodeBuffer db(data);
Randomize(&structure_);
DoDecode(&structure_, &db);
EXPECT_EQ(db.Offset(), S::EncodedSize());
if (expected != nullptr) {
EXPECT_EQ(structure_, *expected);
}
}
template <size_t N>
void DecodeLeadingStructure(const char (&data)[N]) {
DecodeLeadingStructure(nullptr, absl::string_view(data, N));
}
void EncodeThenDecode(const S& in_s) {
std::string bytes = SerializeStructure(in_s);
EXPECT_EQ(S::EncodedSize(), bytes.size());
DecodeLeadingStructure(&in_s, bytes);
}
void TestDecodingRandomizedStructures(size_t count) {
for (size_t i = 0; i < count && !HasFailure(); ++i) {
Structure input;
Randomize(&input);
EncodeThenDecode(input);
}
}
void TestDecodingRandomizedStructures() {
TestDecodingRandomizedStructures(random_decode_count_);
}
Http2Random random_;
const size_t random_decode_count_;
uint32_t decode_offset_ = 0;
S structure_;
size_t fast_decode_count_ = 0;
size_t slow_decode_count_ = 0;
};
class FrameHeaderDecoderTest : public StructureDecoderTest<Http2FrameHeader> {};
TEST_F(FrameHeaderDecoderTest, DecodesLiteral) {
{
const char kData[] = {
'\x00', '\x00', '\x05',
'\x01',
'\x08',
'\x00', '\x00', '\x00', '\x01',
'\x04',
'\x00', '\x00', '\x00', '\x00',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ(5u, structure_.payload_length);
EXPECT_EQ(Http2FrameType::HEADERS, structure_.type);
EXPECT_EQ(Http2FrameFlag::PADDED, structure_.flags);
EXPECT_EQ(1u, structure_.stream_id);
}
}
{
const char kData[] = {
'\xff', '\xff', '\xff',
'\xff',
'\xff',
'\xff', '\xff', '\xff', '\xff',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ((1u << 24) - 1, structure_.payload_length);
EXPECT_EQ(static_cast<Http2FrameType>(255), structure_.type);
EXPECT_EQ(255, structure_.flags);
EXPECT_EQ(0x7FFFFFFFu, structure_.stream_id);
}
}
}
TEST_F(FrameHeaderDecoderTest, DecodesRandomized) {
TestDecodingRandomizedStructures();
}
class PriorityFieldsDecoderTest
: public StructureDecoderTest<Http2PriorityFields> {};
TEST_F(PriorityFieldsDecoderTest, DecodesLiteral) {
{
const char kData[] = {
'\x80', '\x00', '\x00', '\x05',
'\xff',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ(5u, structure_.stream_dependency);
EXPECT_EQ(256u, structure_.weight);
EXPECT_EQ(true, structure_.is_exclusive);
}
}
{
const char kData[] = {
'\x7f', '\xff',
'\xff', '\xff',
'\x00',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ(StreamIdMask(), structure_.stream_dependency);
EXPECT_EQ(1u, structure_.weight);
EXPECT_FALSE(structure_.is_exclusive);
}
}
}
TEST_F(PriorityFieldsDecoderTest, DecodesRandomized) {
TestDecodingRandomizedStructures();
}
class RstStreamFieldsDecoderTest
: public StructureDecoderTest<Http2RstStreamFields> {};
TEST_F(RstStreamFieldsDecoderTest, DecodesLiteral) {
{
const char kData[] = {
'\x00', '\x00', '\x00', '\x01',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_TRUE(structure_.IsSupportedErrorCode());
EXPECT_EQ(Http2ErrorCode::PROTOCOL_ERROR, structure_.error_code);
}
}
{
const char kData[] = {
'\xff', '\xff', '\xff',
'\xff',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_FALSE(structure_.IsSupportedErrorCode());
EXPECT_EQ(static_cast<Http2ErrorCode>(0xffffffff), structure_.error_code);
}
}
}
TEST_F(RstStreamFieldsDecoderTest, DecodesRandomized) {
TestDecodingRandomizedStructures();
}
class SettingFieldsDecoderTest
: public StructureDecoderTest<Http2SettingFields> {};
TEST_F(SettingFieldsDecoderTest, DecodesLiteral) {
{
const char kData[] = {
'\x00', '\x01',
'\x00', '\x00', '\x40', '\x00',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_TRUE(structure_.IsSupportedParameter());
EXPECT_EQ(Http2SettingsParameter::HEADER_TABLE_SIZE,
structure_.parameter);
EXPECT_EQ(1u << 14, structure_.value);
}
}
{
const char kData[] = {
'\x00', '\x00',
'\xff', '\xff', '\xff', '\xff',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_FALSE(structure_.IsSupportedParameter());
EXPECT_EQ(static_cast<Http2SettingsParameter>(0), structure_.parameter);
}
}
}
TEST_F(SettingFieldsDecoderTest, DecodesRandomized) {
TestDecodingRandomizedStructures();
}
class PushPromiseFieldsDecoderTest
: public StructureDecoderTest<Http2PushPromiseFields> {};
TEST_F(PushPromiseFieldsDecoderTest, DecodesLiteral) {
{
const char kData[] = {
'\x00', '\x01', '\x8a', '\x92',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ(101010u, structure_.promised_stream_id);
}
}
{
const char kData[] = {
'\xff', '\xff', '\xff',
'\xff',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ(StreamIdMask(), structure_.promised_stream_id);
}
}
}
TEST_F(PushPromiseFieldsDecoderTest, DecodesRandomized) {
TestDecodingRandomizedStructures();
}
class PingFieldsDecoderTest : public StructureDecoderTest<Http2PingFields> {};
TEST_F(PingFieldsDecoderTest, DecodesLiteral) {
{
const char kData[] = {
'\x00', '\x01', '\x02', '\x03', '\x04', '\x05', '\x06', '\x07',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ(absl::string_view(kData, 8),
ToStringPiece(structure_.opaque_bytes));
}
}
{
const char kData[] = {
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ(absl::string_view(kData, 8),
ToStringPiece(structure_.opaque_bytes));
}
}
{
const char kData[] = {
'\xff', '\xff', '\xff', '\xff', '\xff', '\xff', '\xff', '\xff',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ(absl::string_view(kData, 8),
ToStringPiece(structure_.opaque_bytes));
}
}
}
TEST_F(PingFieldsDecoderTest, DecodesRandomized) {
TestDecodingRandomizedStructures();
}
class GoAwayFieldsDecoderTest : public StructureDecoderTest<Http2GoAwayFields> {
};
TEST_F(GoAwayFieldsDecoderTest, DecodesLiteral) {
{
const char kData[] = {
'\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ(0u, structure_.last_stream_id);
EXPECT_TRUE(structure_.IsSupportedErrorCode());
EXPECT_EQ(Http2ErrorCode::HTTP2_NO_ERROR, structure_.error_code);
}
}
{
const char kData[] = {
'\x00', '\x00', '\x00', '\x01',
'\x00', '\x00', '\x00', '\x0d',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ(1u, structure_.last_stream_id);
EXPECT_TRUE(structure_.IsSupportedErrorCode());
EXPECT_EQ(Http2ErrorCode::HTTP_1_1_REQUIRED, structure_.error_code);
}
}
{
const char kData[] = {
'\xff', '\xff',
'\xff', '\xff',
'\xff', '\xff',
'\xff', '\xff',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ(StreamIdMask(), structure_.last_stream_id);
EXPECT_FALSE(structure_.IsSupportedErrorCode());
EXPECT_EQ(static_cast<Http2ErrorCode>(0xffffffff), structure_.error_code);
}
}
}
TEST_F(GoAwayFieldsDecoderTest, DecodesRandomized) {
TestDecodingRandomizedStructures();
}
class WindowUpdateFieldsDecoderTest
: public StructureDecoderTest<Http2WindowUpdateFields> {};
TEST_F(WindowUpdateFieldsDecoderTest, DecodesLiteral) {
{
const char kData[] = {
'\x00', '\x01', '\x00', '\x00',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ(1u << 16, structure_.window_size_increment);
}
}
{
const char kData[] = {
'\x00', '\x00', '\x00', '\x00',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ(0u, structure_.window_size_increment);
}
}
{
const char kData[] = {
'\xff', '\xff', '\xff', '\xff',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ(StreamIdMask(), structure_.window_size_increment);
}
}
}
TEST_F(WindowUpdateFieldsDecoderTest, DecodesRandomized) {
TestDecodingRandomizedStructures();
}
class AltSvcFieldsDecoderTest : public StructureDecoderTest<Http2AltSvcFields> {
};
TEST_F(AltSvcFieldsDecoderTest, DecodesLiteral) {
{
const char kData[] = {
'\x00', '\x00',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ(0, structure_.origin_length);
}
}
{
const char kData[] = {
'\x00', '\x14',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ(20, structure_.origin_length);
}
}
{
const char kData[] = {
'\xff', '\xff',
};
DecodeLeadingStructure(kData);
if (!HasFailure()) {
EXPECT_EQ(65535, structure_.origin_length);
}
}
}
TEST_F(AltSvcFieldsDecoderTest, DecodesRandomized) {
TestDecodingRandomizedStructures();
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/decode_http2_structures.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/decode_http2_structures_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
01ae2344-ebd7-4587-9659-10c56b01874f | cpp | google/libaddressinput | md5 | cpp/src/util/md5.cc | cpp/test/util/md5_unittest.cc | #include "md5.h"
#include <cstddef>
#include <string>
#include <string.h>
namespace {
struct Context {
uint32_t buf[4];
uint32_t bits[2];
uint8_t in[64];
};
void byteReverse(uint8_t* buf, unsigned longs) {
do {
uint32_t temp = static_cast<uint32_t>(
static_cast<unsigned>(buf[3]) << 8 |
buf[2]) << 16 |
(static_cast<unsigned>(buf[1]) << 8 | buf[0]);
*reinterpret_cast<uint32_t*>(buf) = temp;
buf += 4;
} while (--longs);
}
#define F1(x, y, z) (z ^ (x & (y ^ z)))
#define F2(x, y, z) F1(z, x, y)
#define F3(x, y, z) (x ^ y ^ z)
#define F4(x, y, z) (y ^ (x | ~z))
#define MD5STEP(f, w, x, y, z, data, s) \
(w += f(x, y, z) + data, w = w << s | w >> (32 - s), w += x)
void MD5Transform(uint32_t buf[4], const uint32_t in[16]) {
uint32_t a, b, c, d;
a = buf[0];
b = buf[1];
c = buf[2];
d = buf[3];
MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
buf[0] += a;
buf[1] += b;
buf[2] += c;
buf[3] += d;
}
}
namespace i18n {
namespace addressinput {
void MD5Init(MD5Context* context) {
struct Context* ctx = reinterpret_cast<struct Context*>(context);
ctx->buf[0] = 0x67452301;
ctx->buf[1] = 0xefcdab89;
ctx->buf[2] = 0x98badcfe;
ctx->buf[3] = 0x10325476;
ctx->bits[0] = 0;
ctx->bits[1] = 0;
}
void MD5Update(MD5Context* context, const std::string& data) {
struct Context* ctx = reinterpret_cast<struct Context*>(context);
const uint8_t* buf = reinterpret_cast<const uint8_t*>(data.data());
size_t len = data.size();
uint32_t t = ctx->bits[0];
if ((ctx->bits[0] = t + (static_cast<uint32_t>(len) << 3)) < t)
ctx->bits[1]++;
ctx->bits[1] += static_cast<uint32_t>(len >> 29);
t = (t >> 3) & 0x3f;
if (t) {
uint8_t* p = static_cast<uint8_t*>(ctx->in + t);
t = 64 - t;
if (len < t) {
memcpy(p, buf, len);
return;
}
memcpy(p, buf, t);
byteReverse(ctx->in, 16);
MD5Transform(ctx->buf, reinterpret_cast<uint32_t*>(ctx->in));
buf += t;
len -= t;
}
while (len >= 64) {
memcpy(ctx->in, buf, 64);
byteReverse(ctx->in, 16);
MD5Transform(ctx->buf, reinterpret_cast<uint32_t*>(ctx->in));
buf += 64;
len -= 64;
}
memcpy(ctx->in, buf, len);
}
void MD5Final(MD5Digest* digest, MD5Context* context) {
struct Context* ctx = reinterpret_cast<struct Context*>(context);
unsigned count;
uint8_t* p;
count = (ctx->bits[0] >> 3) & 0x3F;
p = ctx->in + count;
*p++ = 0x80;
count = 64 - 1 - count;
if (count < 8) {
memset(p, 0, count);
byteReverse(ctx->in, 16);
MD5Transform(ctx->buf, reinterpret_cast<uint32_t*>(ctx->in));
memset(ctx->in, 0, 56);
} else {
memset(p, 0, count - 8);
}
byteReverse(ctx->in, 14);
memcpy(&ctx->in[14 * sizeof(ctx->bits[0])], &ctx->bits[0],
sizeof(ctx->bits[0]));
memcpy(&ctx->in[15 * sizeof(ctx->bits[1])], &ctx->bits[1],
sizeof(ctx->bits[1]));
MD5Transform(ctx->buf, reinterpret_cast<uint32_t*>(ctx->in));
byteReverse(reinterpret_cast<uint8_t*>(ctx->buf), 4);
memcpy(digest->a, ctx->buf, 16);
memset(ctx, 0, sizeof(*ctx));
}
void MD5IntermediateFinal(MD5Digest* digest, const MD5Context* context) {
MD5Context context_copy;
memcpy(&context_copy, context, sizeof(context_copy));
MD5Final(digest, &context_copy);
}
std::string MD5DigestToBase16(const MD5Digest& digest) {
static char const zEncode[] = "0123456789abcdef";
std::string ret;
ret.resize(32);
for (int i = 0, j = 0; i < 16; i++, j += 2) {
uint8_t a = digest.a[i];
ret[j] = zEncode[(a >> 4) & 0xf];
ret[j + 1] = zEncode[a & 0xf];
}
return ret;
}
void MD5Sum(const void* data, size_t length, MD5Digest* digest) {
MD5Context ctx;
MD5Init(&ctx);
MD5Update(&ctx, std::string(reinterpret_cast<const char*>(data), length));
MD5Final(digest, &ctx);
}
std::string MD5String(const std::string& str) {
MD5Digest digest;
MD5Sum(str.data(), str.length(), &digest);
return MD5DigestToBase16(digest);
}
}
} | #include "util/md5.h"
#include <cstring>
#include <memory>
#include <string>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::MD5Context;
using i18n::addressinput::MD5Digest;
using i18n::addressinput::MD5Init;
using i18n::addressinput::MD5String;
using i18n::addressinput::MD5Update;
TEST(MD5, DigestToBase16) {
MD5Digest digest;
int data[] = {
0xd4, 0x1d, 0x8c, 0xd9,
0x8f, 0x00, 0xb2, 0x04,
0xe9, 0x80, 0x09, 0x98,
0xec, 0xf8, 0x42, 0x7e
};
for (int i = 0; i < 16; ++i)
digest.a[i] = data[i] & 0xff;
std::string actual = MD5DigestToBase16(digest);
std::string expected = "d41d8cd98f00b204e9800998ecf8427e";
EXPECT_EQ(expected, actual);
}
TEST(MD5, MD5SumEmtpyData) {
MD5Digest digest;
const char data[] = "";
MD5Sum(data, strlen(data), &digest);
int expected[] = {
0xd4, 0x1d, 0x8c, 0xd9,
0x8f, 0x00, 0xb2, 0x04,
0xe9, 0x80, 0x09, 0x98,
0xec, 0xf8, 0x42, 0x7e
};
for (int i = 0; i < 16; ++i)
EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
}
TEST(MD5, MD5SumOneByteData) {
MD5Digest digest;
const char data[] = "a";
MD5Sum(data, strlen(data), &digest);
int expected[] = {
0x0c, 0xc1, 0x75, 0xb9,
0xc0, 0xf1, 0xb6, 0xa8,
0x31, 0xc3, 0x99, 0xe2,
0x69, 0x77, 0x26, 0x61
};
for (int i = 0; i < 16; ++i)
EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
}
TEST(MD5, MD5SumLongData) {
const int length = 10 * 1024 * 1024 + 1;
std::unique_ptr<char[]> data(new char[length]);
for (int i = 0; i < length; ++i)
data[i] = i & 0xFF;
MD5Digest digest;
MD5Sum(data.get(), length, &digest);
int expected[] = {
0x90, 0xbd, 0x6a, 0xd9,
0x0a, 0xce, 0xf5, 0xad,
0xaa, 0x92, 0x20, 0x3e,
0x21, 0xc7, 0xa1, 0x3e
};
for (int i = 0; i < 16; ++i)
EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
}
TEST(MD5, ContextWithEmptyData) {
MD5Context ctx;
MD5Init(&ctx);
MD5Digest digest;
MD5Final(&digest, &ctx);
int expected[] = {
0xd4, 0x1d, 0x8c, 0xd9,
0x8f, 0x00, 0xb2, 0x04,
0xe9, 0x80, 0x09, 0x98,
0xec, 0xf8, 0x42, 0x7e
};
for (int i = 0; i < 16; ++i)
EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
}
TEST(MD5, ContextWithLongData) {
MD5Context ctx;
MD5Init(&ctx);
const int length = 10 * 1024 * 1024 + 1;
std::unique_ptr<char[]> data(new char[length]);
for (int i = 0; i < length; ++i)
data[i] = i & 0xFF;
int total = 0;
while (total < length) {
int len = 4097;
if (len > length - total)
len = length - total;
MD5Update(&ctx,
std::string(reinterpret_cast<char*>(data.get() + total), len));
total += len;
}
EXPECT_EQ(length, total);
MD5Digest digest;
MD5Final(&digest, &ctx);
int expected[] = {
0x90, 0xbd, 0x6a, 0xd9,
0x0a, 0xce, 0xf5, 0xad,
0xaa, 0x92, 0x20, 0x3e,
0x21, 0xc7, 0xa1, 0x3e
};
for (int i = 0; i < 16; ++i)
EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
}
TEST(MD5, MD5StringTestSuite1) {
std::string actual = MD5String("");
std::string expected = "d41d8cd98f00b204e9800998ecf8427e";
EXPECT_EQ(expected, actual);
}
TEST(MD5, MD5StringTestSuite2) {
std::string actual = MD5String("a");
std::string expected = "0cc175b9c0f1b6a831c399e269772661";
EXPECT_EQ(expected, actual);
}
TEST(MD5, MD5StringTestSuite3) {
std::string actual = MD5String("abc");
std::string expected = "900150983cd24fb0d6963f7d28e17f72";
EXPECT_EQ(expected, actual);
}
TEST(MD5, MD5StringTestSuite4) {
std::string actual = MD5String("message digest");
std::string expected = "f96b697d7cb7938d525a2f31aaf161d0";
EXPECT_EQ(expected, actual);
}
TEST(MD5, MD5StringTestSuite5) {
std::string actual = MD5String("abcdefghijklmnopqrstuvwxyz");
std::string expected = "c3fcd3d76192e4007dfb496cca67e13b";
EXPECT_EQ(expected, actual);
}
TEST(MD5, MD5StringTestSuite6) {
std::string actual = MD5String("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789");
std::string expected = "d174ab98d277d9f5a5611c2c9f419d9f";
EXPECT_EQ(expected, actual);
}
TEST(MD5, MD5StringTestSuite7) {
std::string actual = MD5String("12345678901234567890"
"12345678901234567890"
"12345678901234567890"
"12345678901234567890");
std::string expected = "57edf4a22be3c955ac49da2e2107b67a";
EXPECT_EQ(expected, actual);
}
TEST(MD5, ContextWithStringData) {
MD5Context ctx;
MD5Init(&ctx);
MD5Update(&ctx, "abc");
MD5Digest digest;
MD5Final(&digest, &ctx);
std::string actual = MD5DigestToBase16(digest);
std::string expected = "900150983cd24fb0d6963f7d28e17f72";
EXPECT_EQ(expected, actual);
}
TEST(MD5, IntermediateFinal) {
MD5Context check_header_context;
MD5Init(&check_header_context);
MD5Context check_full_context;
MD5Init(&check_full_context);
MD5Context context;
MD5Init(&context);
static const char kHeader[] = "header data";
static const char kBody[] = "payload data";
MD5Update(&context, kHeader);
MD5Update(&check_header_context, kHeader);
MD5Update(&check_full_context, kHeader);
MD5Digest check_header_digest;
MD5Final(&check_header_digest, &check_header_context);
MD5Digest header_digest;
MD5IntermediateFinal(&header_digest, &context);
MD5Update(&context, kBody);
MD5Update(&check_full_context, kBody);
MD5Digest check_full_digest;
MD5Final(&check_full_digest, &check_full_context);
MD5Digest digest;
MD5Final(&digest, &context);
EXPECT_TRUE(!memcmp(&header_digest, &check_header_digest,
sizeof(header_digest)));
EXPECT_TRUE(!memcmp(&digest, &check_full_digest, sizeof(digest)));
EXPECT_TRUE(memcmp(&digest, &header_digest, sizeof(digest)));
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/util/md5.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/util/md5_unittest.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
6288d502-0a37-48dc-8f80-6013bf3a35e7 | cpp | tensorflow/tensorflow | int_type | tensorflow/core/lib/gtl/int_type.h | third_party/xla/xla/tsl/lib/gtl/int_type_test.cc | #ifndef TENSORFLOW_CORE_LIB_GTL_INT_TYPE_H_
#define TENSORFLOW_CORE_LIB_GTL_INT_TYPE_H_
#include "xla/tsl/lib/gtl/int_type.h"
namespace tensorflow {
namespace gtl {
using ::tsl::gtl::IntType;
}
}
#endif | #include "xla/tsl/lib/gtl/int_type.h"
#include <memory>
#include <unordered_map>
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
TSL_LIB_GTL_DEFINE_INT_TYPE(Int8_IT, int8);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt8_IT, uint8);
TSL_LIB_GTL_DEFINE_INT_TYPE(Int16_IT, int16);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt16_IT, uint16);
TSL_LIB_GTL_DEFINE_INT_TYPE(Int32_IT, int32);
TSL_LIB_GTL_DEFINE_INT_TYPE(Int64_IT, int64_t);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt32_IT, uint32);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt64_IT, uint64);
TSL_LIB_GTL_DEFINE_INT_TYPE(Long_IT, long);
template <typename IntType_Type>
class IntTypeTest : public ::testing::Test {};
typedef ::testing::Types<Int8_IT, UInt8_IT, Int16_IT, UInt16_IT, Int32_IT,
Int64_IT, UInt64_IT, Long_IT>
SupportedIntTypes;
TYPED_TEST_SUITE(IntTypeTest, SupportedIntTypes);
TYPED_TEST(IntTypeTest, TestInitialization) {
constexpr TypeParam a;
constexpr TypeParam b(1);
constexpr TypeParam c(b);
EXPECT_EQ(0, a);
EXPECT_EQ(1, b);
EXPECT_EQ(1, c);
}
TYPED_TEST(IntTypeTest, TestOperators) {
TypeParam a(0);
TypeParam b(1);
TypeParam c(2);
constexpr TypeParam d(3);
constexpr TypeParam e(4);
EXPECT_EQ(0, (a++).value());
EXPECT_EQ(2, (++a).value());
EXPECT_EQ(2, (a--).value());
EXPECT_EQ(0, (--a).value());
EXPECT_EQ(true, !a);
EXPECT_EQ(false, !b);
static_assert(!d == false, "Unary operator! failed");
EXPECT_EQ(a.value(), +a);
static_assert(+d == d.value(), "Unary operator+ failed");
EXPECT_EQ(-a.value(), -a);
static_assert(-d == -d.value(), "Unary operator- failed");
EXPECT_EQ(~a.value(), ~a);
EXPECT_EQ(~b.value(), ~b);
static_assert(~d == ~d.value(), "Unary operator~ failed");
c = a = b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = b = 2;
EXPECT_EQ(2, b.value());
EXPECT_EQ(2, c.value());
c = a += b;
EXPECT_EQ(3, a.value());
EXPECT_EQ(3, c.value());
c = a -= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a *= b;
EXPECT_EQ(2, a.value());
EXPECT_EQ(2, c.value());
c = a /= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a <<= b;
EXPECT_EQ(4, a.value());
EXPECT_EQ(4, c.value());
c = a >>= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a %= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a += 2;
EXPECT_EQ(3, a.value());
EXPECT_EQ(3, c.value());
c = a -= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a *= 2;
EXPECT_EQ(2, a.value());
EXPECT_EQ(2, c.value());
c = a /= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a <<= 2;
EXPECT_EQ(4, a.value());
EXPECT_EQ(4, c.value());
c = a >>= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a %= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
a = 0;
b = 1;
EXPECT_FALSE(a == b);
EXPECT_TRUE(a == 0);
EXPECT_FALSE(1 == a);
static_assert(d == d, "operator== failed");
static_assert(d == 3, "operator== failed");
static_assert(3 == d, "operator== failed");
EXPECT_TRUE(a != b);
EXPECT_TRUE(a != 1);
EXPECT_FALSE(0 != a);
static_assert(d != e, "operator!= failed");
static_assert(d != 4, "operator!= failed");
static_assert(4 != d, "operator!= failed");
EXPECT_TRUE(a < b);
EXPECT_TRUE(a < 1);
EXPECT_FALSE(0 < a);
static_assert(d < e, "operator< failed");
static_assert(d < 4, "operator< failed");
static_assert(3 < e, "operator< failed");
EXPECT_TRUE(a <= b);
EXPECT_TRUE(a <= 1);
EXPECT_TRUE(0 <= a);
static_assert(d <= e, "operator<= failed");
static_assert(d <= 4, "operator<= failed");
static_assert(3 <= e, "operator<= failed");
EXPECT_FALSE(a > b);
EXPECT_FALSE(a > 1);
EXPECT_FALSE(0 > a);
static_assert(e > d, "operator> failed");
static_assert(e > 3, "operator> failed");
static_assert(4 > d, "operator> failed");
EXPECT_FALSE(a >= b);
EXPECT_FALSE(a >= 1);
EXPECT_TRUE(0 >= a);
static_assert(e >= d, "operator>= failed");
static_assert(e >= 3, "operator>= failed");
static_assert(4 >= d, "operator>= failed");
a = 1;
b = 3;
EXPECT_EQ(4, (a + b).value());
EXPECT_EQ(4, (a + 3).value());
EXPECT_EQ(4, (1 + b).value());
static_assert((d + e).value() == 7, "Binary operator+ failed");
static_assert((d + 4).value() == 7, "Binary operator+ failed");
static_assert((3 + e).value() == 7, "Binary operator+ failed");
EXPECT_EQ(2, (b - a).value());
EXPECT_EQ(2, (b - 1).value());
EXPECT_EQ(2, (3 - a).value());
static_assert((e - d).value() == 1, "Binary operator- failed");
static_assert((e - 3).value() == 1, "Binary operator- failed");
static_assert((4 - d).value() == 1, "Binary operator- failed");
EXPECT_EQ(3, (a * b).value());
EXPECT_EQ(3, (a * 3).value());
EXPECT_EQ(3, (1 * b).value());
static_assert((d * e).value() == 12, "Binary operator* failed");
static_assert((d * 4).value() == 12, "Binary operator* failed");
static_assert((3 * e).value() == 12, "Binary operator* failed");
EXPECT_EQ(0, (a / b).value());
EXPECT_EQ(0, (a / 3).value());
EXPECT_EQ(0, (1 / b).value());
static_assert((d / e).value() == 0, "Binary operator/ failed");
static_assert((d / 4).value() == 0, "Binary operator/ failed");
static_assert((3 / e).value() == 0, "Binary operator/ failed");
EXPECT_EQ(8, (a << b).value());
EXPECT_EQ(8, (a << 3).value());
EXPECT_EQ(8, (1 << b).value());
static_assert((d << e).value() == 48, "Binary operator<< failed");
static_assert((d << 4).value() == 48, "Binary operator<< failed");
static_assert((3 << e).value() == 48, "Binary operator<< failed");
b = 8;
EXPECT_EQ(4, (b >> a).value());
EXPECT_EQ(4, (b >> 1).value());
EXPECT_EQ(4, (8 >> a).value());
static_assert((d >> e).value() == 0, "Binary operator>> failed");
static_assert((d >> 4).value() == 0, "Binary operator>> failed");
static_assert((3 >> e).value() == 0, "Binary operator>> failed");
b = 3;
a = 2;
EXPECT_EQ(1, (b % a).value());
EXPECT_EQ(1, (b % 2).value());
EXPECT_EQ(1, (3 % a).value());
static_assert((e % d).value() == 1, "Binary operator% failed");
static_assert((e % 3).value() == 1, "Binary operator% failed");
static_assert((4 % d).value() == 1, "Binary operator% failed");
}
TYPED_TEST(IntTypeTest, TestHashFunctor) {
std::unordered_map<TypeParam, char, typename TypeParam::Hasher> map;
TypeParam a(0);
map[a] = 'c';
EXPECT_EQ('c', map[a]);
map[++a] = 'o';
EXPECT_EQ('o', map[a]);
TypeParam b(a);
EXPECT_EQ(typename TypeParam::Hasher()(a), typename TypeParam::Hasher()(b));
}
TYPED_TEST(IntTypeTest, TestValueAccessor) {
constexpr typename TypeParam::ValueType i = -1;
constexpr TypeParam int_type(i);
EXPECT_EQ(i, int_type.value());
static_assert(int_type.value() == i, "value() failed");
EXPECT_EQ(static_cast<int>(i), int_type.template value<int>());
EXPECT_EQ(static_cast<int8>(i), int_type.template value<int8>());
EXPECT_EQ(static_cast<int16>(i), int_type.template value<int16>());
EXPECT_EQ(static_cast<int32>(i), int_type.template value<int32>());
EXPECT_EQ(static_cast<uint32>(i), int_type.template value<uint32>());
EXPECT_EQ(static_cast<int64_t>(i), int_type.template value<int64_t>());
EXPECT_EQ(static_cast<uint64>(i), int_type.template value<uint64>());
EXPECT_EQ(static_cast<long>(i), int_type.template value<long>());
static_assert(int_type.template value<int>() == static_cast<int>(i),
"value<Value>() failed");
}
TYPED_TEST(IntTypeTest, TestMove) {
struct NotCopyable {
TypeParam inttype;
std::unique_ptr<int> ptr;
static NotCopyable Make(int i) {
NotCopyable f;
f.inttype = TypeParam(i);
f.ptr.reset(new int(i));
return f;
}
};
NotCopyable foo = NotCopyable::Make(123);
EXPECT_EQ(123, foo.inttype);
EXPECT_EQ(123, *foo.ptr);
foo = NotCopyable::Make(321);
EXPECT_EQ(321, foo.inttype);
EXPECT_EQ(321, *foo.ptr);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/int_type.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/gtl/int_type_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d141a428-7f90-4be4-a9af-5ed15f002cff | cpp | tensorflow/tensorflow | log | tensorflow/lite/experimental/shlo/ops/log.cc | tensorflow/lite/experimental/shlo/ops/log_test.cc | #include "tensorflow/lite/experimental/shlo/ops/log.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Log {
template <class T>
T operator()(T v) const {
return std::log(v);
}
};
template <>
F16 Log::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Log::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
LogOp Create(LogOp::Attributes) { return {}; }
absl::Status Prepare(LogOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("log"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("log"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(LogOp& op, const Tensor& input, Tensor& output) {
Log log;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), log, input,
output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
log, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.log: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/log.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<LogOp> {
static std::string Get() { return "Log"; }
};
namespace {
struct Log {
template <class T>
T operator()(T v) const {
return std::log(v);
}
} log_ref;
template <>
F16 Log::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Log::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(Log, UnaryElementwiseOpShapePropagationTest,
LogOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Log, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<LogOp>, TestParamNames);
using UnsupportedTypes = WithOpTypes<
LogOp, ConcatTypes<BoolTestType, IntTestTypes, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Log, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct LogTest : ::testing::Test {};
TYPED_TEST_SUITE(LogTest, FloatTestTypes, TestParamNames);
TYPED_TEST(LogTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(
shape, static_cast<StorageT>(0.1));
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), log_ref);
auto op = Create(LogOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedLogTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedLogTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedLogTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
Vector<StorageT> input_data =
RandomBuffer<TypeParam::kStorage>(shape, zero_point + 1);
Vector<StorageT> output_data(shape.NumElements());
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = log_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(LogOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/log.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/log_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
17b11f88-09df-48cc-859d-7fed7fb5dec6 | cpp | abseil/abseil-cpp | cordz_update_scope | absl/strings/internal/cordz_update_scope.h | absl/strings/internal/cordz_update_scope_test.cc | #ifndef ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_SCOPE_H_
#define ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_SCOPE_H_
#include "absl/base/config.h"
#include "absl/base/optimization.h"
#include "absl/base/thread_annotations.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cordz_info.h"
#include "absl/strings/internal/cordz_update_tracker.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
class ABSL_SCOPED_LOCKABLE CordzUpdateScope {
public:
CordzUpdateScope(CordzInfo* info, CordzUpdateTracker::MethodIdentifier method)
ABSL_EXCLUSIVE_LOCK_FUNCTION(info)
: info_(info) {
if (ABSL_PREDICT_FALSE(info_)) {
info->Lock(method);
}
}
CordzUpdateScope(CordzUpdateScope&& rhs) = delete;
CordzUpdateScope(const CordzUpdateScope&) = delete;
CordzUpdateScope& operator=(CordzUpdateScope&& rhs) = delete;
CordzUpdateScope& operator=(const CordzUpdateScope&) = delete;
~CordzUpdateScope() ABSL_UNLOCK_FUNCTION() {
if (ABSL_PREDICT_FALSE(info_)) {
info_->Unlock();
}
}
void SetCordRep(CordRep* rep) const {
if (ABSL_PREDICT_FALSE(info_)) {
info_->SetCordRep(rep);
}
}
CordzInfo* info() const { return info_; }
private:
CordzInfo* info_;
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/strings/internal/cordz_update_scope.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/strings/cordz_test_helpers.h"
#include "absl/strings/internal/cord_rep_flat.h"
#include "absl/strings/internal/cordz_info.h"
#include "absl/strings/internal/cordz_update_tracker.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
namespace {
auto constexpr kTrackCordMethod = CordzUpdateTracker::kConstructorString;
TEST(CordzUpdateScopeTest, ScopeNullptr) {
CordzUpdateScope scope(nullptr, kTrackCordMethod);
}
TEST(CordzUpdateScopeTest, ScopeSampledCord) {
TestCordData cord;
CordzInfo::TrackCord(cord.data, kTrackCordMethod, 1);
CordzUpdateScope scope(cord.data.cordz_info(), kTrackCordMethod);
cord.data.cordz_info()->SetCordRep(nullptr);
}
}
ABSL_NAMESPACE_END
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cordz_update_scope.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cordz_update_scope_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
98768ebc-6ed4-449c-809b-5a25249a04e5 | cpp | tensorflow/tensorflow | fb_storage | tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include <fcntl.h>
#include <string.h>
#ifndef _WIN32
#include <sys/file.h>
#include <unistd.h>
#endif
#include <fstream>
#include <sstream>
#include <string>
#include "absl/strings/string_view.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#ifndef TEMP_FAILURE_RETRY
#ifdef __ANDROID__
#error "TEMP_FAILURE_RETRY not set although on Android"
#else
#define TEMP_FAILURE_RETRY(exp) exp
#endif
#endif
namespace tflite {
namespace acceleration {
FileStorage::FileStorage(absl::string_view path, ErrorReporter* error_reporter)
: path_(path), error_reporter_(error_reporter) {}
MinibenchmarkStatus FileStorage::ReadFileIntoBuffer() {
#ifndef _WIN32
buffer_.clear();
int fd = TEMP_FAILURE_RETRY(open(path_.c_str(), O_RDONLY | O_CLOEXEC, 0600));
int open_error_no = errno;
if (fd < 0) {
int fd = TEMP_FAILURE_RETRY(
open(path_.c_str(), O_WRONLY | O_APPEND | O_CREAT | O_CLOEXEC, 0600));
if (fd >= 0) {
close(fd);
return kMinibenchmarkSuccess;
}
int create_error_no = errno;
TF_LITE_REPORT_ERROR(
error_reporter_,
"Could not open %s for reading: %s, creating failed as well: %s",
path_.c_str(), std::strerror(open_error_no),
std::strerror(create_error_no));
return kMinibenchmarkCantCreateStorageFile;
}
int lock_status = flock(fd, LOCK_EX);
int lock_error_no = errno;
if (lock_status < 0) {
close(fd);
TF_LITE_REPORT_ERROR(error_reporter_, "Could not flock %s: %s",
path_.c_str(), std::strerror(lock_error_no));
return kMinibenchmarkFlockingStorageFileFailed;
}
char buffer[512];
while (true) {
int bytes_read = TEMP_FAILURE_RETRY(read(fd, buffer, 512));
int read_error_no = errno;
if (bytes_read == 0) {
close(fd);
return kMinibenchmarkSuccess;
} else if (bytes_read < 0) {
close(fd);
TF_LITE_REPORT_ERROR(error_reporter_, "Error reading %s: %s",
path_.c_str(), std::strerror(read_error_no));
return kMinibenchmarkErrorReadingStorageFile;
} else {
buffer_.append(buffer, bytes_read);
}
}
#else
return kMinibenchmarkUnsupportedPlatform;
#endif
}
MinibenchmarkStatus FileStorage::AppendDataToFile(absl::string_view data) {
#ifndef _WIN32
int fd = TEMP_FAILURE_RETRY(
open(path_.c_str(), O_WRONLY | O_APPEND | O_CREAT | O_CLOEXEC, 0600));
if (fd < 0) {
int error_no = errno;
TF_LITE_REPORT_ERROR(error_reporter_, "Could not open %s for writing: %s",
path_.c_str(), std::strerror(error_no));
return kMinibenchmarkFailedToOpenStorageFileForWriting;
}
int lock_status = flock(fd, LOCK_EX);
int lock_error_no = errno;
if (lock_status < 0) {
close(fd);
TF_LITE_REPORT_ERROR(error_reporter_, "Could not flock %s: %s",
path_.c_str(), std::strerror(lock_error_no));
return kMinibenchmarkFlockingStorageFileFailed;
}
absl::string_view bytes = data;
while (!bytes.empty()) {
ssize_t bytes_written =
TEMP_FAILURE_RETRY(write(fd, bytes.data(), bytes.size()));
if (bytes_written < 0) {
int error_no = errno;
close(fd);
TF_LITE_REPORT_ERROR(error_reporter_, "Could not write to %s: %s",
path_.c_str(), std::strerror(error_no));
return kMinibenchmarkErrorWritingStorageFile;
}
bytes.remove_prefix(bytes_written);
}
if (TEMP_FAILURE_RETRY(fsync(fd)) < 0) {
int error_no = errno;
close(fd);
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to fsync %s: %s",
path_.c_str(), std::strerror(error_no));
return kMinibenchmarkErrorFsyncingStorageFile;
}
if (TEMP_FAILURE_RETRY(close(fd)) < 0) {
int error_no = errno;
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to close %s: %s",
path_.c_str(), std::strerror(error_no));
return kMinibenchmarkErrorClosingStorageFile;
}
return kMinibenchmarkSuccess;
#else
return kMinibenchmarkUnsupportedPlatform;
#endif
}
const char kFlatbufferStorageIdentifier[] = "STO1";
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include <algorithm>
#include <string>
#include <thread>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/c_api_types.h"
namespace tflite {
namespace acceleration {
namespace {
std::string GetTemporaryDirectory() {
#ifdef __ANDROID__
return "/data/local/tmp";
#else
if (getenv("TEST_TMPDIR")) {
return getenv("TEST_TMPDIR");
}
if (getenv("TEMP")) {
return getenv("TEMP");
}
return ".";
#endif
}
std::string GetStoragePath() {
std::string path = GetTemporaryDirectory() + "/storage.fb";
unlink(path.c_str());
return path;
}
TEST(FlatbufferStorageTest, AppendAndReadOneItem) {
std::string path = GetStoragePath();
flatbuffers::FlatBufferBuilder fbb;
flatbuffers::Offset<BenchmarkEvent> o =
CreateBenchmarkEvent(fbb, 0, BenchmarkEventType_START);
FlatbufferStorage<BenchmarkEvent> storage(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkSuccess);
EXPECT_EQ(storage.Count(), 0);
EXPECT_EQ(storage.Append(&fbb, o), kMinibenchmarkSuccess);
ASSERT_EQ(storage.Count(), 1);
EXPECT_EQ(storage.Get(0)->event_type(), BenchmarkEventType_START);
storage = FlatbufferStorage<BenchmarkEvent>(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkSuccess);
ASSERT_EQ(storage.Count(), 1);
EXPECT_EQ(storage.Get(0)->event_type(), BenchmarkEventType_START);
}
TEST(FlatbufferStorageTest, AppendAndReadThreeItems) {
std::string path = GetStoragePath();
FlatbufferStorage<BenchmarkEvent> storage(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkSuccess);
EXPECT_EQ(storage.Count(), 0);
for (auto event : {BenchmarkEventType_START, BenchmarkEventType_ERROR,
BenchmarkEventType_END}) {
flatbuffers::FlatBufferBuilder fbb;
flatbuffers::Offset<BenchmarkEvent> object =
CreateBenchmarkEvent(fbb, 0, event);
EXPECT_EQ(storage.Append(&fbb, object), kMinibenchmarkSuccess);
}
ASSERT_EQ(storage.Count(), 3);
EXPECT_EQ(storage.Get(0)->event_type(), BenchmarkEventType_START);
EXPECT_EQ(storage.Get(1)->event_type(), BenchmarkEventType_ERROR);
EXPECT_EQ(storage.Get(2)->event_type(), BenchmarkEventType_END);
storage = FlatbufferStorage<BenchmarkEvent>(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkSuccess);
ASSERT_EQ(storage.Count(), 3);
EXPECT_EQ(storage.Get(0)->event_type(), BenchmarkEventType_START);
EXPECT_EQ(storage.Get(1)->event_type(), BenchmarkEventType_ERROR);
EXPECT_EQ(storage.Get(2)->event_type(), BenchmarkEventType_END);
}
TEST(FlatbufferStorageTest, PathDoesntExist) {
std::string path = GetTemporaryDirectory() + "/nosuchdirectory/storage.pb";
FlatbufferStorage<BenchmarkEvent> storage(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkCantCreateStorageFile);
}
#ifndef __ANDROID__
TEST(FlatbufferStorageTest, WriteFailureResetsStorage) {
std::string path = GetStoragePath();
flatbuffers::FlatBufferBuilder fbb;
flatbuffers::Offset<BenchmarkEvent> o =
CreateBenchmarkEvent(fbb, 0, BenchmarkEventType_START);
FlatbufferStorage<BenchmarkEvent> storage(path);
EXPECT_EQ(storage.Append(&fbb, o), kMinibenchmarkSuccess);
ASSERT_EQ(storage.Count(), 1);
chmod(path.c_str(), 0444);
EXPECT_EQ(storage.Append(&fbb, o),
kMinibenchmarkFailedToOpenStorageFileForWriting);
ASSERT_EQ(storage.Count(), 0);
}
#endif
TEST(FlatbufferStorageTest, Locking) {
std::string path = GetStoragePath();
std::vector<std::thread> threads;
const int kNumThreads = 4;
const int kIterations = 10;
threads.reserve(kNumThreads);
for (int i = 0; i < kNumThreads; i++) {
threads.push_back(std::thread([path]() {
for (int j = 0; j < kIterations; j++) {
FlatbufferStorage<BenchmarkEvent> storage(path);
flatbuffers::FlatBufferBuilder fbb;
flatbuffers::Offset<BenchmarkEvent> o =
CreateBenchmarkEvent(fbb, 0, BenchmarkEventType_START);
EXPECT_EQ(storage.Append(&fbb, o), kMinibenchmarkSuccess);
}
}));
}
std::for_each(threads.begin(), threads.end(),
[](std::thread& t) { t.join(); });
FlatbufferStorage<BenchmarkEvent> storage(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkSuccess);
EXPECT_EQ(storage.Count(), kNumThreads * kIterations);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
83a42cf8-3caa-495f-ade3-622f32a471a3 | cpp | tensorflow/tensorflow | ragged_tensor_from_variant_op | tensorflow/core/kernels/ragged_tensor_from_variant_op.cc | tensorflow/core/kernels/ragged_tensor_from_variant_op_test.cc | #include <utility>
#include <vector>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_encode_decode.h"
#include "tensorflow/core/kernels/ragged_tensor_variant.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace {
Status RaggedComponentsFromVariant(
const Tensor& encoded_variant, int input_ragged_rank,
int output_ragged_rank, DataType value_dtype, DataType split_dtype,
std::vector<RaggedTensorVariant>* decoded_ragged) {
const auto& flat_variants = encoded_variant.flat<Variant>();
decoded_ragged->reserve(flat_variants.size());
for (int i = 0; i < flat_variants.size(); i++) {
const auto& flat_variant = flat_variants(i);
const RaggedTensorVariant* decoded =
flat_variant.get<RaggedTensorVariant>();
if (decoded == nullptr) {
return errors::InvalidArgument(
"Input Variant element at index ", i,
" doesn't hold a RaggedTensorVariant: ", flat_variant.DebugString());
}
decoded_ragged->push_back(*decoded);
decoded = &decoded_ragged->back();
if (decoded->ragged_rank() != input_ragged_rank) {
return errors::InvalidArgument(
"Encoded input RaggedTensorVariant has ragged_rank=",
decoded->ragged_rank(), ". Expected ragged_rank=", input_ragged_rank,
".");
}
if (decoded->values().dtype() != value_dtype) {
return errors::InvalidArgument(
"Expected values Tensor dtype: ", DataTypeString(value_dtype),
", found: ", DataTypeString(decoded->values().dtype()));
}
if (decoded->values().dims() < 1 && output_ragged_rank != 0) {
return errors::InvalidArgument(
"Ragged values must have rank >= 1; encoded scalar element at index ",
i, " has values Tensor: ", decoded->values().DebugString());
}
for (const auto& splits : decoded->nested_splits()) {
if (splits.dtype() != split_dtype) {
return errors::InvalidArgument(
"Expected row_splits Tensor dtype: ", DataTypeString(split_dtype),
", found: ", DataTypeString(splits.dtype()));
}
if (splits.dims() != 1) {
return errors::InvalidArgument(
"Ragged splits must have rank 1; encoded scalar element at index ",
i, " has splits Tensor ", splits.DebugString());
}
}
}
return absl::OkStatus();
}
template <typename VALUE_TYPE>
Status StackNonRaggedTensors(
const std::vector<RaggedTensorVariant>& ragged_components,
RaggedTensorVariant* output_ragged) {
if (ragged_components.empty()) {
output_ragged->set_values(Tensor(DataTypeToEnum<VALUE_TYPE>::value, {0}));
return absl::OkStatus();
}
TensorShape component_values_shape = ragged_components[0].values().shape();
TensorShape result_shape = component_values_shape;
result_shape.InsertDim(0, ragged_components.size());
output_ragged->set_values(
Tensor(DataTypeToEnum<VALUE_TYPE>::value, result_shape));
auto output_values_flat = output_ragged->mutable_values()->flat<VALUE_TYPE>();
int values_index = 0;
for (int i = 0; i < ragged_components.size(); i++) {
auto& component_values = ragged_components[i].values();
if (component_values.shape() != component_values_shape) {
return errors::InvalidArgument(
"All flat_values must have compatible shapes. Shape at index 0: ",
component_values_shape, ". Shape at index ", i, ": ",
component_values.shape());
}
auto component_values_flat = component_values.flat<VALUE_TYPE>();
for (int j = 0; j < component_values_flat.size(); j++) {
output_values_flat(values_index++) = component_values_flat(j);
}
}
return absl::OkStatus();
}
template <typename VALUE_TYPE, typename SPLIT_TYPE>
Status NestedStackRaggedTensors(
const std::vector<RaggedTensorVariant>& ragged_components,
const std::vector<int>& nested_dim_sizes, const int input_ragged_rank,
const int output_ragged_rank, RaggedTensorVariant* output_ragged) {
output_ragged->mutable_nested_splits()->reserve(output_ragged_rank);
const int dims = nested_dim_sizes.size();
if (output_ragged_rank == 0) {
if (input_ragged_rank > 0) {
return errors::InvalidArgument(
"Expected input_ragged_rank=0 if output_ragged_rank==0. "
"Got input_ragged_rank=",
input_ragged_rank);
}
return StackNonRaggedTensors<VALUE_TYPE>(ragged_components, output_ragged);
}
for (int i = 0; i < dims - 1; i++) {
int dims_splits_size = nested_dim_sizes[i] + 1;
output_ragged->append_splits(Tensor(DataTypeToEnum<SPLIT_TYPE>::value,
TensorShape({dims_splits_size})));
auto splits_vec = output_ragged->mutable_splits(i)->vec<SPLIT_TYPE>();
int split_diff = nested_dim_sizes[i + 1];
for (int j = 0; j < dims_splits_size; j++) {
splits_vec(j) = j * split_diff;
}
}
int splits_size = ragged_components.size() + 1;
output_ragged->append_splits(
Tensor(DataTypeToEnum<SPLIT_TYPE>::value, TensorShape({splits_size})));
auto dims_splits_vec =
output_ragged->mutable_splits(dims - 1)->vec<SPLIT_TYPE>();
dims_splits_vec(0) = 0;
for (int i = 0; i < ragged_components.size(); i++) {
int split_val = ragged_components[i].values().shape().dim_size(0);
if (input_ragged_rank != 0 && ragged_components[i].ragged_rank() > 0) {
split_val = ragged_components[i].splits(0).NumElements() - 1;
}
dims_splits_vec(i + 1) = dims_splits_vec(i) + split_val;
}
for (int i = 0; i < input_ragged_rank; i++) {
int split_index = dims + i;
int split_size = 1;
for (int j = 0; j < ragged_components.size(); j++) {
if (!ragged_components[j].nested_splits().empty()) {
split_size += ragged_components[j].splits(i).NumElements() - 1;
}
}
output_ragged->append_splits(
Tensor(DataTypeToEnum<SPLIT_TYPE>::value, TensorShape({split_size})));
auto splits_vec =
output_ragged->mutable_splits(split_index)->vec<SPLIT_TYPE>();
splits_vec(0) = 0;
SPLIT_TYPE last_split_value = 0;
int index = 1;
for (int j = 0; j < ragged_components.size(); j++) {
if (ragged_components[j].nested_splits().empty()) {
continue;
}
auto component_splits_vec =
ragged_components[j].splits(i).vec<SPLIT_TYPE>();
for (int k = 1; k < component_splits_vec.size(); k++, index++) {
splits_vec(index) = component_splits_vec(k) + last_split_value;
}
last_split_value = splits_vec(index - 1);
}
}
TensorShape component_values_shape;
if (ragged_components.empty()) {
component_values_shape = TensorShape({0});
} else {
component_values_shape = ragged_components[0].values().shape();
}
int values_size = component_values_shape.dim_size(0);
for (int i = 1; i < ragged_components.size(); i++) {
if (ragged_components[i].values().dims() != component_values_shape.dims()) {
return errors::InvalidArgument(
"Rank of values must match for all "
"components; values shape at index 0: ",
component_values_shape.DebugString(), ", values shape at index ", i,
": ", ragged_components[i].values().shape().DebugString());
}
values_size += ragged_components[i].values().shape().dim_size(0);
}
component_values_shape.set_dim(0, values_size);
output_ragged->set_values(
Tensor(DataTypeToEnum<VALUE_TYPE>::value, component_values_shape));
auto output_values_flat =
output_ragged->mutable_values()->flat_outer_dims<VALUE_TYPE, 2>();
int values_index = 0;
TensorShape expected_value_shape = component_values_shape;
expected_value_shape.RemoveDim(0);
for (int i = 0; i < ragged_components.size(); i++) {
TensorShape value_shape = ragged_components[i].values().shape();
value_shape.RemoveDim(0);
if (value_shape != expected_value_shape) {
return errors::InvalidArgument(
"All flat_values must have compatible shapes. Shape at index 0: ",
expected_value_shape, ". Shape at index ", i, ": ", value_shape,
". If you are using tf.map_fn, then you may need to specify an "
"explicit fn_output_signature with appropriate ragged_rank, and/or "
"convert output tensors to RaggedTensors.");
}
auto component_values_flat =
ragged_components[i].values().flat_outer_dims<VALUE_TYPE, 2>();
int num_inner_elements = ragged_components[i].values().NumElements();
if (ragged_components[i].values().dim_size(0) > 0) {
num_inner_elements /= ragged_components[i].values().dim_size(0);
}
for (int j = 0; j < ragged_components[i].values().dim_size(0);
j++, values_index++) {
for (int k = 0; k < num_inner_elements; k++) {
output_values_flat(values_index, k) = component_values_flat(j, k);
}
}
}
return absl::OkStatus();
}
}
template <typename VALUE_TYPE, typename SPLIT_TYPE>
class RaggedTensorFromVariantOp : public OpKernel {
public:
explicit RaggedTensorFromVariantOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("input_ragged_rank",
&input_ragged_rank_attr_));
OP_REQUIRES_OK(
context, context->GetAttr("output_ragged_rank", &output_ragged_rank_));
}
void Compute(OpKernelContext* context) override {
const Tensor& encoded_variant = context->input(0);
auto input_ragged_rank_ = input_ragged_rank_attr_;
if (input_ragged_rank_ == -1) {
input_ragged_rank_ = output_ragged_rank_ - encoded_variant.dims();
if (output_ragged_rank_ == 0 && input_ragged_rank_ < 0) {
input_ragged_rank_ = 0;
}
OP_REQUIRES(context, input_ragged_rank_ >= 0,
errors::InvalidArgument(
"Inferred input_ragged_rank (output_ragged_rank - "
"encoded_variant.dims()) must be >= 0, found "
"output_ragged_rank: ",
output_ragged_rank_,
", encoded_variant.dims(): ", encoded_variant.dims(),
", inferred input_ragged_rank: ", input_ragged_rank_));
}
OP_REQUIRES(
context,
(output_ragged_rank_ == 0 && input_ragged_rank_ == 0) ||
(output_ragged_rank_ ==
encoded_variant.dims() + input_ragged_rank_),
errors::InvalidArgument(
"output_ragged_rank must be equal to input_ragged_rank + "
"encoded_ragged.dims(); output_ragged_rank: ",
output_ragged_rank_, ", input_ragged_rank: ", input_ragged_rank_,
", encoded_variant.dims(): ", encoded_variant.dims(), "."));
const auto value_dtype = DataTypeToEnum<VALUE_TYPE>::v();
const auto split_dtype = DataTypeToEnum<SPLIT_TYPE>::v();
std::vector<RaggedTensorVariant> decoded_components;
OP_REQUIRES_OK(context,
RaggedComponentsFromVariant(
encoded_variant, input_ragged_rank_, output_ragged_rank_,
value_dtype, split_dtype, &decoded_components));
if (encoded_variant.dims() == 0) {
ReturnRaggedTensor(context, decoded_components[0]);
return;
}
std::vector<int> encoded_dim_sizes(encoded_variant.dims(), 0);
for (int i = 0; i < encoded_variant.dims(); i++) {
encoded_dim_sizes[i] = encoded_variant.dim_size(i);
}
RaggedTensorVariant output_ragged;
OP_REQUIRES_OK(
context, NestedStackRaggedTensors<VALUE_TYPE, SPLIT_TYPE>(
decoded_components, encoded_dim_sizes, input_ragged_rank_,
output_ragged_rank_, &output_ragged));
ReturnRaggedTensor(context, output_ragged);
}
private:
int input_ragged_rank_attr_;
int output_ragged_rank_;
void ReturnRaggedTensor(OpKernelContext* context,
const RaggedTensorVariant& ragged_tensor) {
int ragged_rank = ragged_tensor.ragged_rank();
OpOutputList splits_out;
OP_REQUIRES_OK(context,
context->output_list("output_nested_splits", &splits_out));
for (int i = 0; i < ragged_rank; i++) {
splits_out.set(i, ragged_tensor.splits(i));
}
context->set_output(ragged_rank, ragged_tensor.values());
}
};
#define REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, split_type) \
REGISTER_KERNEL_BUILDER(Name("RaggedTensorFromVariant") \
.Device(DEVICE_CPU) \
.TypeConstraint<value_type>("Tvalues") \
.TypeConstraint<split_type>("Tsplits"), \
RaggedTensorFromVariantOp<value_type, split_type>) \
REGISTER_KERNEL_BUILDER(Name("RaggedTensorFromVariant") \
.Device(DEVICE_GPU) \
.TypeConstraint<value_type>("Tvalues") \
.TypeConstraint<split_type>("Tsplits") \
.HostMemory("encoded_ragged") \
.HostMemory("output_nested_splits") \
.HostMemory("output_dense_values"), \
RaggedTensorFromVariantOp<value_type, split_type>) \
REGISTER_KERNEL_BUILDER(Name("RaggedTensorFromVariant") \
.Device(DEVICE_TPU) \
.TypeConstraint<value_type>("Tvalues") \
.TypeConstraint<split_type>("Tsplits") \
.HostMemory("encoded_ragged") \
.HostMemory("output_nested_splits") \
.HostMemory("output_dense_values"), \
RaggedTensorFromVariantOp<value_type, split_type>);
#define REGISTER_KERNELS(value_type) \
REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int32) \
REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int64_t)
TF_CALL_POD_TYPES(REGISTER_KERNELS);
TF_CALL_tstring(REGISTER_KERNELS);
TF_CALL_QUANTIZED_TYPES(REGISTER_KERNELS);
TF_CALL_quint16(REGISTER_KERNELS);
TF_CALL_qint16(REGISTER_KERNELS);
#undef REGISTER_KERNELS
#undef REGISTER_KERNELS_WITH_SPLIT_TYPE
} | #include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_encode_decode.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ragged_tensor_variant.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class RaggedTensorFromVariantKernelTest : public ::tensorflow::OpsTestBase {
protected:
template <typename VALUE_TYPE, typename SPLIT_TYPE>
void BuildDecodeRaggedTensorGraph(
const int input_ragged_rank, const int output_ragged_rank,
const TensorShape& variant_shape,
const std::vector<Variant>& variant_values) {
const auto value_dtype = DataTypeToEnum<VALUE_TYPE>::v();
const auto split_dtype = DataTypeToEnum<SPLIT_TYPE>::v();
TF_ASSERT_OK(NodeDefBuilder("tested_op", "RaggedTensorFromVariant")
.Input(FakeInput(DT_VARIANT))
.Attr("input_ragged_rank", input_ragged_rank)
.Attr("output_ragged_rank", output_ragged_rank)
.Attr("Tvalues", value_dtype)
.Attr("Tsplits", split_dtype)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<Variant>(variant_shape, variant_values);
}
template <typename VALUE_TYPE, typename SPLIT_TYPE>
RaggedTensorVariant CreateVariantFromRagged(
const std::vector<std::vector<SPLIT_TYPE>>& ragged_splits,
const TensorShape& ragged_values_shape,
const std::vector<VALUE_TYPE>& ragged_values) {
RaggedTensorVariant encoded;
for (auto ragged_split : ragged_splits) {
int splits_size = ragged_split.size();
Tensor splits(DataTypeToEnum<SPLIT_TYPE>::v(),
TensorShape({splits_size}));
test::FillValues<SPLIT_TYPE>(&splits, ragged_split);
encoded.append_splits(splits);
}
Tensor values(DataTypeToEnum<VALUE_TYPE>::v(), ragged_values_shape);
test::FillValues<VALUE_TYPE>(&values, ragged_values);
encoded.set_values(values);
return encoded;
}
};
TEST_F(RaggedTensorFromVariantKernelTest, ScalarInput) {
const std::vector<int64_t> split_1 = {0, 1, 2, 3, 4, 5};
const std::vector<int64_t> split_2 = {0, 1, 2, 5, 6, 7};
const std::vector<int> values = {0, 1, 1, 2, 2, 3, 4};
auto encoded_variant = CreateVariantFromRagged<int, int64_t>(
{split_1, split_2}, TensorShape({7}), values);
Tensor expected_splits_1(DT_INT64, TensorShape({6}));
Tensor expected_splits_2(DT_INT64, TensorShape({6}));
Tensor expected_values(DT_INT32, TensorShape({7}));
test::FillValues<int64_t>(&expected_splits_1, split_1);
test::FillValues<int64_t>(&expected_splits_2, split_2);
test::FillValues<int>(&expected_values, values);
int input_ragged_rank = 2;
int output_ragged_rank = 2;
BuildDecodeRaggedTensorGraph<int, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({}),
{encoded_variant});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(0), expected_splits_1);
test::ExpectTensorEqual<int64_t>(*GetOutput(1), expected_splits_2);
test::ExpectTensorEqual<int>(*GetOutput(2), expected_values);
}
TEST_F(RaggedTensorFromVariantKernelTest, OneInputElement) {
const std::vector<int64_t> split_1 = {0, 1, 2, 3, 4, 5};
const std::vector<int64_t> split_2 = {0, 1, 2, 5, 6, 7};
const std::vector<int> values = {0, 1, 1, 2, 2, 3, 4};
const std::vector<int64_t> batched_splits_1 = {0, 5};
auto encoded_variant = CreateVariantFromRagged<int, int64_t>(
{split_1, split_2}, TensorShape({7}), values);
Tensor expected_splits_1(DT_INT64, TensorShape({2}));
Tensor expected_splits_2(DT_INT64, TensorShape({6}));
Tensor expected_splits_3(DT_INT64, TensorShape({6}));
Tensor expected_values(DT_INT32, TensorShape({7}));
test::FillValues<int64_t>(&expected_splits_1, batched_splits_1);
test::FillValues<int64_t>(&expected_splits_2, split_1);
test::FillValues<int64_t>(&expected_splits_3, split_2);
test::FillValues<int>(&expected_values, values);
int input_ragged_rank = 2;
int output_ragged_rank = 3;
BuildDecodeRaggedTensorGraph<int, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({1}),
{encoded_variant});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(0), expected_splits_1);
test::ExpectTensorEqual<int64_t>(*GetOutput(1), expected_splits_2);
test::ExpectTensorEqual<int64_t>(*GetOutput(2), expected_splits_3);
test::ExpectTensorEqual<int>(*GetOutput(3), expected_values);
}
TEST_F(RaggedTensorFromVariantKernelTest, TensorIn2DOut) {
const std::vector<int> values_1 = {1, 2, 3};
const std::vector<int> values_2 = {};
const std::vector<int> values_3 = {4, 5};
const std::vector<int> values_4 = {6};
const std::vector<int64_t> batched_splits_1 = {0, 2, 4};
const std::vector<int64_t> batched_splits_2 = {0, 3, 3, 5, 6};
const std::vector<int> batched_values = {1, 2, 3, 4, 5, 6};
auto component_variant_1 =
CreateVariantFromRagged<int, int64_t>({}, TensorShape({3}), values_1);
auto component_variant_2 =
CreateVariantFromRagged<int, int64_t>({}, TensorShape({0}), values_2);
auto component_variant_3 =
CreateVariantFromRagged<int, int64_t>({}, TensorShape({2}), values_3);
auto component_variant_4 =
CreateVariantFromRagged<int, int64_t>({}, TensorShape({1}), values_4);
Tensor expected_splits_1(DT_INT64, TensorShape({3}));
Tensor expected_splits_2(DT_INT64, TensorShape({5}));
Tensor expected_values(DT_INT32, TensorShape({6}));
test::FillValues<int64_t>(&expected_splits_1, batched_splits_1);
test::FillValues<int64_t>(&expected_splits_2, batched_splits_2);
test::FillValues<int>(&expected_values, batched_values);
int input_ragged_rank = 0;
int output_ragged_rank = 2;
BuildDecodeRaggedTensorGraph<int, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({2, 2}),
{component_variant_1, component_variant_2, component_variant_3,
component_variant_4});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(0), expected_splits_1);
test::ExpectTensorEqual<int64_t>(*GetOutput(1), expected_splits_2);
test::ExpectTensorEqual<int>(*GetOutput(2), expected_values);
}
TEST_F(RaggedTensorFromVariantKernelTest, NonEmpty1DIn3DOut) {
const std::vector<int64_t> component_split_1_1 = {0, 1};
const std::vector<int64_t> component_split_2_1 = {0, 1, 2};
const std::vector<int64_t> component_split_3_1 = {0, 2};
const std::vector<int64_t> component_split_4_1 = {0, 2, 3};
const std::vector<int64_t> component_split_5_1 = {0, 1, 3};
const std::vector<int> component_values_1 = {0};
const std::vector<int> component_values_2 = {0, 1};
const std::vector<int> component_values_3 = {0, 1};
const std::vector<int> component_values_4 = {0, 1, 2};
const std::vector<int> component_values_5 = {0, 1, 2};
const std::vector<int64_t> batched_splits_1 = {0, 5, 10};
const std::vector<int64_t> batched_splits_2 = {0, 1, 3, 4, 6, 8,
10, 12, 13, 14, 16};
const std::vector<int64_t> batched_splits_3 = {
0, 1, 2, 3, 5, 7, 8, 9, 11, 13, 14, 15, 17, 18, 20, 21, 22};
const std::vector<int> batched_values = {0, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2,
0, 1, 2, 0, 1, 2, 0, 0, 1, 0, 1};
Tensor expected_splits_1(DT_INT64, TensorShape({3}));
Tensor expected_splits_2(DT_INT64, TensorShape({11}));
Tensor expected_splits_3(DT_INT64, TensorShape({17}));
Tensor expected_values(DT_INT32, TensorShape({22}));
test::FillValues<int64_t>(&expected_splits_1, batched_splits_1);
test::FillValues<int64_t>(&expected_splits_2, batched_splits_2);
test::FillValues<int64_t>(&expected_splits_3, batched_splits_3);
test::FillValues<int>(&expected_values, batched_values);
auto variant_component_1 = CreateVariantFromRagged<int, int64_t>(
{component_split_1_1}, TensorShape({1}), component_values_1);
auto variant_component_2 = CreateVariantFromRagged<int, int64_t>(
{component_split_2_1}, TensorShape({2}), component_values_2);
auto variant_component_3 = CreateVariantFromRagged<int, int64_t>(
{component_split_3_1}, TensorShape({2}), component_values_3);
auto variant_component_4 = CreateVariantFromRagged<int, int64_t>(
{component_split_4_1}, TensorShape({3}), component_values_4);
auto variant_component_5 = CreateVariantFromRagged<int, int64_t>(
{component_split_5_1}, TensorShape({3}), component_values_5);
int input_ragged_rank = 1;
int output_ragged_rank = 3;
BuildDecodeRaggedTensorGraph<int, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({2, 5}),
{variant_component_1, variant_component_2, variant_component_3,
variant_component_4, variant_component_5, variant_component_4,
variant_component_5, variant_component_1, variant_component_3,
variant_component_2});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(0), expected_splits_1);
test::ExpectTensorEqual<int64_t>(*GetOutput(1), expected_splits_2);
test::ExpectTensorEqual<int64_t>(*GetOutput(2), expected_splits_3);
test::ExpectTensorEqual<int>(*GetOutput(3), expected_values);
}
TEST_F(RaggedTensorFromVariantKernelTest,
NonEmpty2DIn4DOutInferredInputRaggedRank) {
const std::vector<int64_t> component_split_1_1 = {0, 1, 3, 4, 6, 8};
const std::vector<int64_t> component_split_1_2 = {0, 1, 2, 3, 5, 7, 8, 9, 11};
const std::vector<int64_t> component_split_2_1 = {0, 2, 4, 5, 6, 8};
const std::vector<int64_t> component_split_2_2 = {0, 2, 3, 4, 6,
7, 9, 10, 11};
const std::vector<int> component_values_1 = {0, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2};
const std::vector<int> component_values_2 = {0, 1, 2, 0, 1, 2, 0, 0, 1, 0, 1};
const std::vector<int64_t> batched_splits_1 = {0, 2, 4};
const std::vector<int64_t> batched_splits_2 = {0, 5, 10, 15, 20};
const std::vector<int64_t> batched_splits_3 = {0, 1, 3, 4, 6, 8, 10,
12, 13, 14, 16, 18, 20, 21,
22, 24, 25, 27, 28, 30, 32};
const std::vector<int64_t> batched_splits_4 = {
0, 1, 2, 3, 5, 7, 8, 9, 11, 13, 14, 15, 17, 18, 20, 21, 22,
24, 25, 26, 28, 29, 31, 32, 33, 34, 35, 36, 38, 40, 41, 42, 44};
const std::vector<int> batched_values = {
0, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 0, 1, 0, 1,
0, 1, 2, 0, 1, 2, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2};
Tensor expected_splits_1(DT_INT64, TensorShape({3}));
Tensor expected_splits_2(DT_INT64, TensorShape({5}));
Tensor expected_splits_3(DT_INT64, TensorShape({21}));
Tensor expected_splits_4(DT_INT64, TensorShape({33}));
Tensor expected_values(DT_INT32, TensorShape({44}));
test::FillValues<int64_t>(&expected_splits_1, batched_splits_1);
test::FillValues<int64_t>(&expected_splits_2, batched_splits_2);
test::FillValues<int64_t>(&expected_splits_3, batched_splits_3);
test::FillValues<int64_t>(&expected_splits_4, batched_splits_4);
test::FillValues<int>(&expected_values, batched_values);
auto variant_component_1 = CreateVariantFromRagged<int, int64_t>(
{component_split_1_1, component_split_1_2}, TensorShape({11}),
component_values_1);
auto variant_component_2 = CreateVariantFromRagged<int, int64_t>(
{component_split_2_1, component_split_2_2}, TensorShape({11}),
component_values_2);
int input_ragged_rank = -1;
int output_ragged_rank = 4;
BuildDecodeRaggedTensorGraph<int, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({2, 2}),
{variant_component_1, variant_component_2, variant_component_2,
variant_component_1});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(0), expected_splits_1);
test::ExpectTensorEqual<int64_t>(*GetOutput(1), expected_splits_2);
test::ExpectTensorEqual<int64_t>(*GetOutput(2), expected_splits_3);
test::ExpectTensorEqual<int64_t>(*GetOutput(3), expected_splits_4);
test::ExpectTensorEqual<int>(*GetOutput(4), expected_values);
}
TEST_F(RaggedTensorFromVariantKernelTest, EmptyRow1DIn2DOut) {
const std::vector<int64_t> component_split_1_1 = {0, 3, 3};
const std::vector<int> component_values_1 = {1, 2, 3};
const std::vector<int64_t> component_split_2_1 = {0};
const std::vector<int64_t> batched_splits_1 = {0, 2, 2};
const std::vector<int64_t> batched_splits_2 = {0, 3, 3};
const std::vector<int> batched_values = {1, 2, 3};
Tensor expected_splits_1(DT_INT64, TensorShape({3}));
Tensor expected_splits_2(DT_INT64, TensorShape({3}));
Tensor expected_values(DT_INT32, TensorShape({3}));
test::FillValues<int64_t>(&expected_splits_1, batched_splits_1);
test::FillValues<int64_t>(&expected_splits_2, batched_splits_2);
test::FillValues<int>(&expected_values, batched_values);
auto variant_component_1 = CreateVariantFromRagged<int, int64_t>(
{component_split_1_1}, TensorShape({3}), component_values_1);
auto variant_component_2 = CreateVariantFromRagged<int, int64_t>(
{component_split_2_1}, TensorShape({0}), {});
int input_ragged_rank = 1;
int output_ragged_rank = 2;
BuildDecodeRaggedTensorGraph<int, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({2}),
{variant_component_1, variant_component_2});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(0), expected_splits_1);
test::ExpectTensorEqual<int64_t>(*GetOutput(1), expected_splits_2);
test::ExpectTensorEqual<int>(*GetOutput(2), expected_values);
}
TEST_F(RaggedTensorFromVariantKernelTest, NDValues1DIn2DOut) {
const std::vector<int64_t> component_split_1_1 = {0, 1};
const std::vector<int> component_values_1 = {1, 2};
const std::vector<int64_t> component_split_2_1 = {0, 1, 2};
const std::vector<int> component_values_2 = {1, 2, 3, 4};
const std::vector<int64_t> batched_splits_1 = {0, 1, 3};
const std::vector<int64_t> batched_splits_2 = {0, 1, 2, 3};
const std::vector<int> batched_values = {1, 2, 1, 2, 3, 4};
Tensor expected_splits_1(DT_INT64, TensorShape({3}));
Tensor expected_splits_2(DT_INT64, TensorShape({4}));
Tensor expected_values(DT_INT32, TensorShape({3, 2}));
test::FillValues<int64_t>(&expected_splits_1, batched_splits_1);
test::FillValues<int64_t>(&expected_splits_2, batched_splits_2);
test::FillValues<int>(&expected_values, batched_values);
auto variant_component_1 = CreateVariantFromRagged<int, int64_t>(
{component_split_1_1}, TensorShape({1, 2}), component_values_1);
auto variant_component_2 = CreateVariantFromRagged<int, int64_t>(
{component_split_2_1}, TensorShape({2, 2}), component_values_2);
int input_ragged_rank = 1;
int output_ragged_rank = 2;
BuildDecodeRaggedTensorGraph<int, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({2}),
{variant_component_1, variant_component_2});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(0), expected_splits_1);
test::ExpectTensorEqual<int64_t>(*GetOutput(1), expected_splits_2);
test::ExpectTensorEqual<int>(*GetOutput(2), expected_values);
}
TEST_F(RaggedTensorFromVariantKernelTest, NonEmpty1DIn3DOutInt32Splits) {
const std::vector<int> component_split_1_1 = {0, 1};
const std::vector<int> component_split_2_1 = {0, 1, 2};
const std::vector<int> component_split_3_1 = {0, 2};
const std::vector<int> component_split_4_1 = {0, 2, 3};
const std::vector<int> component_split_5_1 = {0, 1, 3};
const std::vector<int> component_values_1 = {0};
const std::vector<int> component_values_2 = {0, 1};
const std::vector<int> component_values_3 = {0, 1};
const std::vector<int> component_values_4 = {0, 1, 2};
const std::vector<int> component_values_5 = {0, 1, 2};
const std::vector<int> batched_splits_1 = {0, 5, 10};
const std::vector<int> batched_splits_2 = {0, 1, 3, 4, 6, 8,
10, 12, 13, 14, 16};
const std::vector<int> batched_splits_3 = {0, 1, 2, 3, 5, 7, 8, 9, 11,
13, 14, 15, 17, 18, 20, 21, 22};
const std::vector<int> batched_values = {0, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2,
0, 1, 2, 0, 1, 2, 0, 0, 1, 0, 1};
Tensor expected_splits_1(DT_INT32, TensorShape({3}));
Tensor expected_splits_2(DT_INT32, TensorShape({11}));
Tensor expected_splits_3(DT_INT32, TensorShape({17}));
Tensor expected_values(DT_INT32, TensorShape({22}));
test::FillValues<int>(&expected_splits_1, batched_splits_1);
test::FillValues<int>(&expected_splits_2, batched_splits_2);
test::FillValues<int>(&expected_splits_3, batched_splits_3);
test::FillValues<int>(&expected_values, batched_values);
auto variant_component_1 = CreateVariantFromRagged<int, int>(
{component_split_1_1}, TensorShape({1}), component_values_1);
auto variant_component_2 = CreateVariantFromRagged<int, int>(
{component_split_2_1}, TensorShape({2}), component_values_2);
auto variant_component_3 = CreateVariantFromRagged<int, int>(
{component_split_3_1}, TensorShape({2}), component_values_3);
auto variant_component_4 = CreateVariantFromRagged<int, int>(
{component_split_4_1}, TensorShape({3}), component_values_4);
auto variant_component_5 = CreateVariantFromRagged<int, int>(
{component_split_5_1}, TensorShape({3}), component_values_5);
int input_ragged_rank = 1;
int output_ragged_rank = 3;
BuildDecodeRaggedTensorGraph<int, int>(
input_ragged_rank, output_ragged_rank, TensorShape({2, 5}),
{variant_component_1, variant_component_2, variant_component_3,
variant_component_4, variant_component_5, variant_component_4,
variant_component_5, variant_component_1, variant_component_3,
variant_component_2});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int>(*GetOutput(0), expected_splits_1);
test::ExpectTensorEqual<int>(*GetOutput(1), expected_splits_2);
test::ExpectTensorEqual<int>(*GetOutput(2), expected_splits_3);
test::ExpectTensorEqual<int>(*GetOutput(3), expected_values);
}
TEST_F(RaggedTensorFromVariantKernelTest, InvalidInferredInputRaggedRank) {
auto component_variant_1 =
CreateVariantFromRagged<int, int64_t>({}, TensorShape({3}), {1, 2, 3});
auto component_variant_2 =
CreateVariantFromRagged<int, int64_t>({}, TensorShape({0}), {});
auto component_variant_3 =
CreateVariantFromRagged<int, int64_t>({}, TensorShape({2}), {1, 2});
auto component_variant_4 =
CreateVariantFromRagged<int, int64_t>({}, TensorShape({1}), {1});
int input_ragged_rank = -1;
int output_ragged_rank = 2;
BuildDecodeRaggedTensorGraph<int, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({1, 1, 1, 4}),
{component_variant_1, component_variant_2, component_variant_3,
component_variant_4});
EXPECT_TRUE(
absl::StartsWith(RunOpKernel().message(),
"Inferred input_ragged_rank (output_ragged_rank - "
"encoded_variant.dims()) must be >= 0"));
}
TEST_F(RaggedTensorFromVariantKernelTest, InputDimsAndRaggedRankAttrsMismatch) {
const std::vector<int64_t> component_split_1_1 = {0, 1};
const std::vector<int64_t> component_split_2_1 = {0, 1, 2};
const std::vector<int> component_values_1 = {0};
const std::vector<int> component_values_2 = {0, 1};
auto variant_component_1 = CreateVariantFromRagged<int, int64_t>(
{component_split_1_1}, TensorShape({1}), component_values_1);
auto variant_component_2 = CreateVariantFromRagged<int, int64_t>(
{component_split_2_1}, TensorShape({2}), component_values_2);
int input_ragged_rank = 1;
int output_ragged_rank = 4;
BuildDecodeRaggedTensorGraph<int, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({2}),
{variant_component_1, variant_component_2});
EXPECT_TRUE(absl::StartsWith(RunOpKernel().message(),
"output_ragged_rank must be equal to "
"input_ragged_rank + encoded_ragged.dims()"));
}
TEST_F(RaggedTensorFromVariantKernelTest, InputDoesNotHoldRaggedTensorVariant) {
int input_ragged_rank = 1;
int output_ragged_rank = 2;
BuildDecodeRaggedTensorGraph<int, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({2}), {1, 2});
EXPECT_TRUE(absl::StartsWith(
RunOpKernel().message(),
"Input Variant element at index 0 doesn't hold a RaggedTensorVariant"));
}
TEST_F(RaggedTensorFromVariantKernelTest,
InputScalarElementDoesNotMatchInputRaggedRank) {
const std::vector<int64_t> component_split_1_1 = {0, 1};
const std::vector<int> component_values_1 = {1, 2};
auto variant_component_1 = CreateVariantFromRagged<int, int64_t>(
{component_split_1_1}, TensorShape({1, 2}), component_values_1);
int input_ragged_rank = 2;
int output_ragged_rank = 3;
BuildDecodeRaggedTensorGraph<int, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({1}),
{variant_component_1});
EXPECT_TRUE(
absl::StartsWith(RunOpKernel().message(),
"Encoded input RaggedTensorVariant has ragged_rank=1. "
"Expected ragged_rank=2."));
}
TEST_F(RaggedTensorFromVariantKernelTest, RaggedSplitTypeMismatch) {
const std::vector<int64_t> component_split_1_1 = {0, 1};
const std::vector<int> component_values_1 = {0};
auto variant_component_1 = CreateVariantFromRagged<int, int64_t>(
{component_split_1_1}, TensorShape({1}), component_values_1);
int input_ragged_rank = 1;
int output_ragged_rank = 2;
BuildDecodeRaggedTensorGraph<int, int>(input_ragged_rank, output_ragged_rank,
TensorShape({1}),
{variant_component_1});
EXPECT_TRUE(absl::StartsWith(
RunOpKernel().message(),
"Expected row_splits Tensor dtype: int32, found: int64"));
}
TEST_F(RaggedTensorFromVariantKernelTest, RaggedSplitRankNotOne) {
RaggedTensorVariant encoded(Tensor(DT_INT32, {2}),
{Tensor(DT_INT64, {2, 1})});
test::FillValues<int64_t>(encoded.mutable_splits(0), {1, 2});
test::FillValues<int>(encoded.mutable_values(), {1, 2});
int input_ragged_rank = 1;
int output_ragged_rank = 2;
BuildDecodeRaggedTensorGraph<int, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({1}), {encoded});
EXPECT_TRUE(absl::StartsWith(RunOpKernel().message(),
"Ragged splits must have rank 1"));
}
TEST_F(RaggedTensorFromVariantKernelTest, RaggedValuesTypeMismatch) {
const std::vector<int64_t> component_split_1_1 = {0, 1};
const std::vector<int> component_values_1 = {0};
auto variant_component_1 = CreateVariantFromRagged<int, int64_t>(
{component_split_1_1}, TensorShape({1}), component_values_1);
int input_ragged_rank = 1;
int output_ragged_rank = 2;
BuildDecodeRaggedTensorGraph<tstring, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({1}),
{variant_component_1});
EXPECT_TRUE(
absl::StartsWith(RunOpKernel().message(),
"Expected values Tensor dtype: string, found: int32"));
}
TEST_F(RaggedTensorFromVariantKernelTest, RaggedValuesRankNotGreaterThanOne) {
auto variant_component_1 =
CreateVariantFromRagged<int, int64_t>({{0, 1}}, TensorShape({}), {1});
int input_ragged_rank = 1;
int output_ragged_rank = 2;
BuildDecodeRaggedTensorGraph<int, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({1}),
{variant_component_1});
EXPECT_TRUE(absl::StartsWith(RunOpKernel().message(),
"Ragged values must have rank >= 1"));
}
TEST_F(RaggedTensorFromVariantKernelTest, RaggedValuesRankMismatch) {
const std::vector<int64_t> component_split_1_1 = {0, 1};
const std::vector<int64_t> component_split_2_1 = {0, 1, 2};
const std::vector<int> component_values_1 = {0};
const std::vector<int> component_values_2 = {0, 1, 2, 3};
auto variant_component_1 = CreateVariantFromRagged<int, int64_t>(
{component_split_1_1}, TensorShape({1}), component_values_1);
auto variant_component_2 = CreateVariantFromRagged<int, int64_t>(
{component_split_2_1}, TensorShape({2, 2}), component_values_2);
int input_ragged_rank = 1;
int output_ragged_rank = 2;
BuildDecodeRaggedTensorGraph<int, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({2}),
{variant_component_1, variant_component_2});
EXPECT_TRUE(absl::StartsWith(RunOpKernel().message(),
"Rank of values must match for all components"));
}
TEST_F(RaggedTensorFromVariantKernelTest, OutputRaggedRank0) {
auto variant_component_1 =
CreateVariantFromRagged<int, int64_t>({}, TensorShape({4}), {0, 1, 2, 3});
auto variant_component_2 =
CreateVariantFromRagged<int, int64_t>({}, TensorShape({4}), {4, 5, 6, 7});
int input_ragged_rank = -1;
int output_ragged_rank = 0;
Tensor expected_values(DT_INT32, TensorShape({2, 4}));
test::FillValues<int>(&expected_values, {0, 1, 2, 3, 4, 5, 6, 7});
BuildDecodeRaggedTensorGraph<int, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({2}),
{variant_component_1, variant_component_2});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int>(*GetOutput(0), expected_values);
}
TEST_F(RaggedTensorFromVariantKernelTest, OutputRaggedRank0Empty) {
int input_ragged_rank = -1;
int output_ragged_rank = 0;
Tensor expected_values(DT_INT32, TensorShape({0}));
test::FillValues<int>(&expected_values, {});
BuildDecodeRaggedTensorGraph<int, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({0}), {});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int>(*GetOutput(0), expected_values);
}
TEST_F(RaggedTensorFromVariantKernelTest, ShapeFnTest) {
ShapeInferenceTestOp op("RaggedTensorFromVariant");
(*op.node_def.mutable_attr())["input_ragged_rank"].set_i(0);
(*op.node_def.mutable_attr())["output_ragged_rank"].set_i(1);
INFER_OK(op, "?", "[?];?");
INFER_OK(op, "[?]", "[?];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[?,?]");
(*op.node_def.mutable_attr())["input_ragged_rank"].set_i(1);
(*op.node_def.mutable_attr())["output_ragged_rank"].set_i(1);
INFER_OK(op, "?", "[?];?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?]");
INFER_ERROR("Shape must be rank 0 but is rank 2", op, "[?,?]");
(*op.node_def.mutable_attr())["output_ragged_rank"].set_i(2);
INFER_OK(op, "?", "[?];[?];?");
INFER_OK(op, "[?]", "[?];[?];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[?,?]");
(*op.node_def.mutable_attr())["output_ragged_rank"].set_i(3);
INFER_OK(op, "?", "[?];[?];[?];?");
INFER_ERROR("Shape must be rank 2 but is rank 1", op, "[?]");
INFER_OK(op, "[?,?]", "[?];[?];[?];?");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[?,?,?]");
(*op.node_def.mutable_attr())["input_ragged_rank"].set_i(3);
(*op.node_def.mutable_attr())["output_ragged_rank"].set_i(3);
INFER_OK(op, "?", "[?];[?];[?];?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?]");
(*op.node_def.mutable_attr())["output_ragged_rank"].set_i(4);
INFER_OK(op, "?", "[?];[?];[?];[?];?");
INFER_OK(op, "[?]", "[?];[?];[?];[?];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[?,?]");
(*op.node_def.mutable_attr())["output_ragged_rank"].set_i(5);
INFER_OK(op, "?", "[?];[?];[?];[?];[?];?");
INFER_ERROR("Shape must be rank 2 but is rank 1", op, "[?]");
INFER_OK(op, "[?,?]", "[?];[?];[?];[?];[?];?");
(*op.node_def.mutable_attr())["output_ragged_rank"].set_i(6);
INFER_OK(op, "?", "[?];[?];[?];[?];[?];[?];?");
INFER_ERROR("Shape must be rank 3 but is rank 1", op, "[?]");
INFER_ERROR("Shape must be rank 3 but is rank 2", op, "[?,?]");
INFER_OK(op, "[?,?,?]", "[?];[?];[?];[?];[?];[?];?");
}
TEST_F(RaggedTensorFromVariantKernelTest, 2DValuesTensorIn1DOut) {
const std::vector<int64_t> batched_splits_1 = {0, 2, 3, 3, 5};
const std::vector<int> batched_values = {1, 1, 1, 1, 2, 2, 2, 2, 3, 3,
3, 3, 4, 4, 4, 4, 5, 5, 5, 5};
auto variant_component_1 = CreateVariantFromRagged<int, int64_t>(
{}, TensorShape({2, 2, 2}), {1, 1, 1, 1, 2, 2, 2, 2});
auto variant_component_2 = CreateVariantFromRagged<int, int64_t>(
{}, TensorShape({1, 2, 2}), {3, 3, 3, 3});
auto variant_component_3 =
CreateVariantFromRagged<int, int64_t>({}, TensorShape({0, 2, 2}), {});
auto variant_component_4 = CreateVariantFromRagged<int, int64_t>(
{}, TensorShape({2, 2, 2}), {4, 4, 4, 4, 5, 5, 5, 5});
Tensor expected_splits_1(DT_INT64, TensorShape({5}));
Tensor expected_values(DT_INT32, TensorShape({5, 2, 2}));
test::FillValues<int64_t>(&expected_splits_1, batched_splits_1);
test::FillValues<int>(&expected_values, batched_values);
int input_ragged_rank = 0;
int output_ragged_rank = 1;
BuildDecodeRaggedTensorGraph<int, int64_t>(
input_ragged_rank, output_ragged_rank, TensorShape({4}),
{variant_component_1, variant_component_2, variant_component_3,
variant_component_4});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(0), expected_splits_1);
test::ExpectTensorEqual<int>(*GetOutput(1), expected_values);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_tensor_from_variant_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0870f6bf-356a-4ba4-973a-d1a6c1553518 | cpp | tensorflow/tensorflow | xplane_utils | third_party/xla/xla/tsl/profiler/utils/xplane_utils.cc | third_party/xla/xla/tsl/profiler/utils/xplane_utils_test.cc | #include "xla/tsl/profiler/utils/xplane_utils.h"
#include <algorithm>
#include <cstdint>
#include <limits>
#include <optional>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/math_utils.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "xla/tsl/util/stats_calculator.h"
#include "tsl/platform/fingerprint.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/lib/context_types.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
template <typename T, typename Pred>
std::vector<int> FindAll(const protobuf::RepeatedPtrField<T>& array,
const Pred& pred) {
std::vector<int> indices;
for (int i = 0; i < array.size(); ++i) {
if (pred(&array.Get(i))) indices.push_back(i);
}
return indices;
}
template <typename T, typename Pred>
int Find(const protobuf::RepeatedPtrField<T>& array, const Pred& pred) {
std::vector<int> indices = FindAll(array, pred);
if (indices.size() > 1) {
LOG(WARNING) << "Found multiple " << T().GetTypeName()
<< " when only one was expected.";
}
return indices.empty() ? -1 : indices.front();
}
template <typename T>
void RemoveAt(protobuf::RepeatedPtrField<T>* array,
const std::vector<int>& indices) {
if (indices.empty()) return;
if (array->size() == indices.size()) {
array->Clear();
return;
}
auto remove_iter = indices.begin();
int i = *(remove_iter++);
for (int j = i + 1; j < array->size(); ++j) {
if (remove_iter != indices.end() && *remove_iter == j) {
++remove_iter;
} else {
array->SwapElements(j, i++);
}
}
array->DeleteSubrange(i, array->size() - i);
}
template <typename T>
void Remove(protobuf::RepeatedPtrField<T>* array, const T* elem) {
int i = Find(*array, [elem](const T* e) { return elem == e; });
RemoveAt(array, {i});
}
template <typename T, typename Pred>
void RemoveIf(protobuf::RepeatedPtrField<T>* array, Pred&& pred) {
std::vector<int> indices = FindAll(*array, pred);
RemoveAt(array, indices);
}
void CopyEventMetadata(const XEventMetadata& src_event_metadata,
const XPlaneVisitor& src_plane,
XEventMetadata& dst_event_metadata,
XPlaneBuilder& dst_plane) {
if (dst_event_metadata.display_name().empty() &&
!src_event_metadata.display_name().empty()) {
dst_event_metadata.set_display_name(src_event_metadata.display_name());
}
if (dst_event_metadata.name().empty() && !src_event_metadata.name().empty()) {
dst_event_metadata.set_name(src_event_metadata.name());
}
if (dst_event_metadata.metadata().empty() &&
!src_event_metadata.metadata().empty()) {
dst_event_metadata.set_metadata(src_event_metadata.metadata());
}
if (dst_event_metadata.stats().empty() &&
!src_event_metadata.stats().empty()) {
XEventMetadataVisitor src_event_metadata_visitor(&src_plane,
&src_event_metadata);
src_event_metadata_visitor.ForEachStat([&](const XStatVisitor& src_stat) {
XStatMetadata& metadata =
*dst_plane.GetOrCreateStatMetadata(src_stat.Name());
XStat& dst_stat = *dst_event_metadata.add_stats();
dst_stat = src_stat.RawStat();
if (src_stat.ValueCase() == XStat::kRefValue) {
XStatMetadata& value_metadata =
*dst_plane.GetOrCreateStatMetadata(src_stat.StrOrRefValue());
dst_stat.set_ref_value(value_metadata.id());
}
dst_stat.set_metadata_id(metadata.id());
});
}
DCHECK_EQ(src_event_metadata.stats_size(), dst_event_metadata.stats_size());
}
void CopyEvent(const XEventVisitor& src_event, const XPlaneVisitor& src,
const XPlane& src_plane, int64_t time_offset_ps,
XPlaneBuilder& dst_plane, XLineBuilder& dst_line) {
XEventMetadata* dst_event_metadata =
dst_plane.GetOrCreateEventMetadata(src_event.Name());
CopyEventMetadata(*src_event.metadata(), src, *dst_event_metadata, dst_plane);
XEventBuilder dst_event = dst_line.AddEvent(*dst_event_metadata);
if (src_event.IsAggregatedEvent()) {
dst_event.SetNumOccurrences(src_event.NumOccurrences());
} else {
dst_event.SetOffsetPs(src_event.OffsetPs() + time_offset_ps);
}
dst_event.SetDurationPs(src_event.DurationPs());
src_event.ForEachStat([&](const XStatVisitor& stat) {
dst_event.AddStat(*dst_plane.GetOrCreateStatMetadata(stat.Name()),
stat.RawStat(), src_plane);
});
}
bool IsOpLineName(absl::string_view line_name) {
return line_name == kXlaOpLineName || line_name == kTensorFlowOpLineName;
}
}
const XPlane* FindPlaneWithName(const XSpace& space, absl::string_view name) {
int i = Find(space.planes(),
[name](const XPlane* plane) { return plane->name() == name; });
return (i != -1) ? &space.planes(i) : nullptr;
}
std::vector<const XPlane*> FindPlanesWithNames(
const XSpace& space, const std::vector<absl::string_view>& names) {
absl::flat_hash_set<absl::string_view> names_set(names.begin(), names.end());
std::vector<int> indices =
FindAll(space.planes(), [&names_set](const XPlane* plane) {
return names_set.contains(plane->name());
});
std::vector<const XPlane*> planes;
planes.reserve(indices.size());
for (int i : indices) {
planes.push_back(&space.planes(i));
}
return planes;
}
XPlane* FindMutablePlaneWithName(XSpace* space, absl::string_view name) {
int i = Find(space->planes(),
[name](const XPlane* plane) { return plane->name() == name; });
return (i != -1) ? space->mutable_planes(i) : nullptr;
}
XPlane* FindOrAddMutablePlaneWithName(XSpace* space, absl::string_view name) {
XPlane* plane = FindMutablePlaneWithName(space, name);
if (plane == nullptr) {
plane = space->add_planes();
plane->set_name(name.data(), name.size());
}
return plane;
}
std::vector<const XPlane*> FindPlanesWithPrefix(const XSpace& space,
absl::string_view prefix) {
return FindPlanes(space, [&](const XPlane& plane) {
return absl::StartsWith(plane.name(), prefix);
});
}
std::vector<XPlane*> FindMutablePlanesWithPrefix(XSpace* space,
absl::string_view prefix) {
return FindMutablePlanes(space, [&](XPlane& plane) {
return absl::StartsWith(plane.name(), prefix);
});
}
const XLine* FindLineWithId(const XPlane& plane, int64_t id) {
int i =
Find(plane.lines(), [id](const XLine* line) { return line->id() == id; });
return (i != -1) ? &plane.lines(i) : nullptr;
}
std::vector<const XLine*> FindLinesWithId(const XPlane& plane, int64_t id) {
std::vector<int> indices = FindAll(
plane.lines(), [id](const XLine* line) { return line->id() == id; });
std::vector<const XLine*> lines;
lines.reserve(indices.size());
for (int index : indices) {
lines.push_back(&plane.lines(index));
}
return lines;
}
const XLine* FindLineWithName(const XPlane& plane, absl::string_view name) {
int i = Find(plane.lines(),
[name](const XLine* line) { return line->name() == name; });
return (i != -1) ? &plane.lines(i) : nullptr;
}
XStat* FindOrAddMutableStat(const XStatMetadata& stat_metadata, XEvent* event) {
for (auto& stat : *event->mutable_stats()) {
if (stat.metadata_id() == stat_metadata.id()) {
return &stat;
}
}
XStat* stat = event->add_stats();
stat->set_metadata_id(stat_metadata.id());
return stat;
}
void RemovePlane(XSpace* space, const XPlane* plane) {
DCHECK(plane != nullptr);
Remove(space->mutable_planes(), plane);
}
void RemovePlanes(XSpace* space, const std::vector<const XPlane*>& planes) {
absl::flat_hash_set<const XPlane*> planes_set(planes.begin(), planes.end());
RemoveIf(space->mutable_planes(), [&planes_set](const XPlane* plane) {
return planes_set.contains(plane);
});
}
void RemoveLine(XPlane* plane, const XLine* line) {
DCHECK(line != nullptr);
Remove(plane->mutable_lines(), line);
}
void RemoveEvents(XLine* line,
const absl::flat_hash_set<const XEvent*>& events) {
RemoveIf(line->mutable_events(),
[&](const XEvent* event) { return events.contains(event); });
}
void RemoveEmptyPlanes(XSpace* space) {
RemoveIf(space->mutable_planes(),
[&](const XPlane* plane) { return plane->lines().empty(); });
}
void RemoveEmptyLines(XPlane* plane) {
RemoveIf(plane->mutable_lines(),
[&](const XLine* line) { return line->events().empty(); });
}
bool XEventsComparator::operator()(const XEvent* a, const XEvent* b) const {
return XEventTimespan(*a) < XEventTimespan(*b);
}
void SortXPlane(XPlane* plane) {
for (XLine& line : *plane->mutable_lines()) {
auto& events = *line.mutable_events();
std::sort(events.pointer_begin(), events.pointer_end(),
XEventsComparator());
}
}
void SortXSpace(XSpace* space) {
for (XPlane& plane : *space->mutable_planes()) SortXPlane(&plane);
}
void NormalizeTimestamps(XPlane* plane, uint64 start_time_ns) {
for (XLine& line : *plane->mutable_lines()) {
if (line.timestamp_ns() >= static_cast<int64_t>(start_time_ns)) {
line.set_timestamp_ns(line.timestamp_ns() - start_time_ns);
}
}
}
void NormalizeTimestamps(XSpace* space, uint64 start_time_ns) {
for (XPlane& plane : *space->mutable_planes()) {
NormalizeTimestamps(&plane, start_time_ns);
}
}
void MergePlanes(const XPlane& src_plane, XPlane* dst_plane) {
RemoveEmptyLines(dst_plane);
XPlaneVisitor src(&src_plane);
XPlaneBuilder dst(dst_plane);
src.ForEachStat([&](const XStatVisitor& stat) {
XStatMetadata* stat_metadata = dst.GetOrCreateStatMetadata(stat.Name());
dst.SetOrAddStat(*stat_metadata, stat.RawStat(), src_plane);
});
src.ForEachLine([&](const XLineVisitor& line) {
XLineBuilder dst_line = dst.GetOrCreateLine(line.Id());
int64_t time_offset_ps = 0LL;
if (dst_line.NumEvents() == 0) {
dst_line.SetTimestampNs(line.TimestampNs());
dst_line.SetName(line.Name());
dst_line.SetDisplayNameIfEmpty(line.DisplayName());
} else {
if (line.TimestampNs() <= dst_line.TimestampNs()) {
dst_line.SetTimestampNsAndAdjustEventOffsets(line.TimestampNs());
} else {
time_offset_ps =
NanoToPico(line.TimestampNs() - dst_line.TimestampNs());
}
dst_line.SetNameIfEmpty(line.Name());
}
line.ForEachEvent([&](const XEventVisitor& event) {
CopyEvent(event, src, src_plane, time_offset_ps, dst, dst_line);
});
});
}
void MergePlanes(const std::vector<const XPlane*>& src_planes,
XPlane* dst_plane) {
for (const XPlane* src_plane : src_planes) {
MergePlanes(*src_plane, dst_plane);
}
}
int64_t GetStartTimestampNs(const XPlane& plane) {
if (plane.lines().empty()) return 0LL;
int64_t plane_timestamp = std::numeric_limits<int64_t>::max();
for (const auto& line : plane.lines()) {
plane_timestamp = std::min(plane_timestamp, line.timestamp_ns());
}
return plane_timestamp;
}
bool IsEmpty(const XSpace& space) {
for (const auto& plane : space.planes()) {
for (const auto& line : plane.lines()) {
if (!line.events().empty()) {
return false;
}
}
}
return true;
}
bool IsXSpaceGrouped(const XSpace& space) {
for (const auto& plane : space.planes()) {
XPlaneVisitor xplane = tsl::profiler::CreateTfXPlaneVisitor(&plane);
const XStatMetadata* group_id_stat =
xplane.GetStatMetadataByType(StatType::kGroupId);
if (group_id_stat) return true;
}
return false;
}
void AddFlowsToXplane(int32_t host_id, bool is_host_plane, bool connect_traceme,
XPlane* xplane) {
if (!xplane) return;
XPlaneBuilder plane(xplane);
XStatMetadata* correlation_id_stats_metadata =
plane.GetStatMetadata(GetStatTypeStr(StatType::kCorrelationId));
XStatMetadata* producer_type_stats_metadata =
plane.GetStatMetadata(GetStatTypeStr(StatType::kProducerType));
XStatMetadata* consumer_type_stats_metadata =
plane.GetStatMetadata(GetStatTypeStr(StatType::kConsumerType));
XStatMetadata* producer_id_stats_metadata =
plane.GetStatMetadata(GetStatTypeStr(StatType::kProducerId));
XStatMetadata* consumer_id_stats_metadata =
plane.GetStatMetadata(GetStatTypeStr(StatType::kConsumerId));
XStatMetadata* flow_stats_metadata =
plane.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kFlow));
XFlow::FlowDirection direction = is_host_plane
? XFlow::FlowDirection::kFlowOut
: XFlow::FlowDirection::kFlowIn;
plane.ForEachLine([&](XLineBuilder line) {
line.ForEachEvent([&](XEventBuilder event) {
std::optional<uint64_t> correlation_id;
std::optional<uint64_t> producer_type;
std::optional<uint64_t> consumer_type;
std::optional<uint64_t> producer_id;
std::optional<uint64_t> consumer_id;
event.ForEachStat([&](XStat* stat) {
if (correlation_id_stats_metadata &&
stat->metadata_id() == correlation_id_stats_metadata->id()) {
correlation_id = stat->uint64_value();
} else if (connect_traceme) {
if (producer_type_stats_metadata &&
stat->metadata_id() == producer_type_stats_metadata->id()) {
producer_type = XStatsBuilder<XPlane>::IntOrUintValue(*stat);
} else if (consumer_type_stats_metadata &&
stat->metadata_id() ==
consumer_type_stats_metadata->id()) {
consumer_type = XStatsBuilder<XPlane>::IntOrUintValue(*stat);
} else if (producer_id_stats_metadata &&
stat->metadata_id() == producer_id_stats_metadata->id()) {
producer_id = XStatsBuilder<XPlane>::IntOrUintValue(*stat);
} else if (consumer_id_stats_metadata &&
stat->metadata_id() == consumer_id_stats_metadata->id()) {
consumer_id = XStatsBuilder<XPlane>::IntOrUintValue(*stat);
}
}
});
if (correlation_id) {
XFlow flow(XFlow::GetFlowId(host_id, *correlation_id), direction,
ContextType::kGpuLaunch);
event.AddStatValue(*flow_stats_metadata, flow.ToStatValue());
}
if (connect_traceme) {
if (producer_type && producer_id) {
auto context_type = GetSafeContextType(*producer_type);
XFlow flow(XFlow::GetFlowId(host_id, *producer_id, context_type),
XFlow::FlowDirection::kFlowOut, context_type);
event.AddStatValue(*flow_stats_metadata, flow.ToStatValue());
}
if (consumer_type && consumer_id) {
auto context_type = GetSafeContextType(*consumer_type);
XFlow flow(XFlow::GetFlowId(host_id, *consumer_id, context_type),
XFlow::FlowDirection::kFlowIn, context_type);
event.AddStatValue(*flow_stats_metadata, flow.ToStatValue());
}
}
});
});
}
uint64_t GetDevicePlaneFingerprint(const XPlane& plane) {
const XLine* xla_module_line = FindLineWithName(plane, kXlaModuleLineName);
if (!xla_module_line) return 0ULL;
XPlaneVisitor xplane(&plane);
XLineVisitor xline(&xplane, xla_module_line);
std::set<uint64_t> ordered_module_fps;
xline.ForEachEvent([&](const XEventVisitor& xevent) {
ordered_module_fps.insert(Fingerprint64(xevent.Name()));
});
if (ordered_module_fps.empty()) return 0ULL;
uint64_t output = 0ULL;
for (const auto& fp : ordered_module_fps) {
output = FingerprintCat64(output, fp);
}
return output;
}
std::optional<XEventVisitor> XEventContextTracker::GetContainingEvent(
const Timespan& event) {
if (!line_) return std::nullopt;
if (current_index_ != -1) {
XEventVisitor current_event(plane_, line_, &line_->events(current_index_));
if (current_event.GetTimespan().Includes(event)) {
return current_event;
}
}
for (int i = current_index_ + 1; i < line_->events_size(); ++i) {
XEventVisitor current_event(plane_, line_, &line_->events(i));
if (current_event.TimestampPs() > event.end_ps()) break;
if (current_event.EndTimestampPs() < event.begin_ps()) continue;
current_index_ = i;
if (current_event.GetTimespan().Includes(event)) {
return current_event;
}
break;
}
return std::nullopt;
}
std::optional<XEventVisitor> XEventContextTracker::GetOverlappingEvent(
const Timespan& event) {
if (!line_) return std::nullopt;
if (current_index_ != -1) {
XEventVisitor current_event(plane_, line_, &line_->events(current_index_));
if (current_event.GetTimespan().Overlaps(event)) {
return current_event;
}
}
for (int i = current_index_ + 1; i < line_->events_size(); ++i) {
XEventVisitor current_event(plane_, line_, &line_->events(i));
if (current_event.TimestampPs() > event.end_ps()) break;
if (current_event.EndTimestampPs() < event.begin_ps()) continue;
current_index_ = i;
if (current_event.GetTimespan().Overlaps(event)) {
return current_event;
}
break;
}
return std::nullopt;
}
void AggregateXPlane(const XPlane& full_trace, XPlane& aggregated_trace) {
struct EventStat {
tsl::Stat<int64_t> stat;
int64_t children_duration;
};
using StatByEvent = absl::flat_hash_map<int64_t , EventStat>;
using StatByGroup = absl::flat_hash_map<int64_t , StatByEvent>;
absl::flat_hash_map<int64_t , StatByGroup> stats;
const XPlaneVisitor& plane = CreateTfXPlaneVisitor(&full_trace);
XPlaneBuilder aggregated_plane(&aggregated_trace);
aggregated_plane.SetName(plane.Name());
uint64_t first_op_start_ps = kint64max;
uint64_t last_op_end_ps = 0;
plane.ForEachLine([&](const XLineVisitor& line) {
if (line.Name() == kStepLineName ||
line.Name() == kSparseCoreStepLineName) {
XLineBuilder aggregated_line =
aggregated_plane.GetOrCreateLine(line.Id());
aggregated_line.SetName(kStepLineName);
line.ForEachEvent([&](const XEventVisitor& event) {
CopyEvent(event, plane, full_trace, 0LL, aggregated_plane,
aggregated_line);
});
}
if (!IsOpLineName(line.Name())) return;
XLineBuilder aggregated_line = aggregated_plane.GetOrCreateLine(line.Id());
aggregated_line.SetName(line.Name());
std::vector<XEventVisitor> event_stack;
line.ForEachEvent([&](XEventVisitor event) {
first_op_start_ps = first_op_start_ps <= event.TimestampPs()
? first_op_start_ps
: event.TimestampPs();
last_op_end_ps = last_op_end_ps >= event.EndTimestampPs()
? last_op_end_ps
: event.EndTimestampPs();
const auto& group_stat = event.GetStat(StatType::kGroupId);
int64_t group_id =
group_stat.has_value() ? group_stat->IntOrUintValue() : kint64max;
StatByEvent& line_stats = stats[line.Id()][group_id];
line_stats[event.Id()].stat.UpdateStat(event.DurationPs());
DCHECK(event_stack.empty() || !(event < event_stack.back()));
while (!event_stack.empty() &&
!event_stack.back().GetTimespan().Includes(event.GetTimespan())) {
event_stack.pop_back();
}
if (!event_stack.empty()) {
line_stats[event_stack.back().Id()].children_duration +=
event.DurationPs();
}
event_stack.push_back(std::move(event));
});
});
uint64_t total_time_ps =
(last_op_end_ps && last_op_end_ps > first_op_start_ps)
? last_op_end_ps - first_op_start_ps
: 0;
aggregated_plane.AddStatValue(
*aggregated_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kTotalProfileDurationPs)),
total_time_ps);
XStatMetadata* kMinDurationPs = aggregated_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kMinDurationPs));
XStatMetadata* kSelfDurationPs = aggregated_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kSelfDurationPs));
XStatMetadata* kGroupId = aggregated_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
for (const auto& [line_id, stats_by_group] : stats) {
XLineBuilder aggregated_line = aggregated_plane.GetOrCreateLine(line_id);
for (const auto& [group_id, stat_by_event] : stats_by_group) {
for (const auto& [event_id, event_stat] : stat_by_event) {
const auto& src_event_metadata = *plane.GetEventMetadata(event_id);
XEventMetadata& event_metadata =
*aggregated_plane.GetOrCreateEventMetadata(
src_event_metadata.name());
CopyEventMetadata(src_event_metadata, plane, event_metadata,
aggregated_plane);
XEventBuilder aggregated_event =
aggregated_line.AddEvent(event_metadata);
aggregated_event.SetNumOccurrences(event_stat.stat.count());
aggregated_event.SetDurationPs(event_stat.stat.sum());
if (group_id != kint64max) {
aggregated_event.AddStatValue(*kGroupId, group_id);
}
if (event_stat.stat.count() > 1) {
aggregated_event.AddStatValue(*kMinDurationPs, event_stat.stat.min());
}
if (event_stat.children_duration != 0) {
aggregated_event.AddStatValue(
*kSelfDurationPs,
event_stat.stat.sum() - event_stat.children_duration);
}
}
}
}
}
bool IsCustomPlane(const XPlane& plane) {
constexpr absl::string_view kLegacyCustomPlanePrefix = "/custom:";
return absl::StartsWith(plane.name(), kCustomPlanePrefix) ||
absl::StartsWith(plane.name(), kLegacyCustomPlanePrefix);
}
bool IsHostPlane(const XPlane& plane) {
return plane.name() == kHostThreadsPlaneName ||
plane.name() == kHostCpusPlaneName ||
plane.name() == kTFStreamzPlaneName ||
plane.name() == kMetadataPlaneName ||
plane.name() == kSyscallsPlaneName ||
plane.name() == kPythonTracerPlaneName ||
plane.name() == kCuptiDriverApiPlaneName;
}
bool IsDevicePlane(const XPlane& plane) {
if (IsHostPlane(plane)) return false;
return absl::StartsWith(plane.name(), "/device") ||
absl::StartsWith(plane.name(), kTpuNonCorePlaneNamePrefix) ||
IsCustomPlane(plane);
}
}
} | #include "xla/tsl/profiler/utils/xplane_utils.h"
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "xla/tsl/profiler/utils/math_utils.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
using ::testing::Property;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
#if defined(PLATFORM_GOOGLE)
using ::testing::EqualsProto;
using ::testing::proto::IgnoringFields;
using ::testing::proto::IgnoringRepeatedFieldOrdering;
using ::testing::proto::Partially;
#endif
XEvent CreateEvent(int64_t offset_ps, int64_t duration_ps) {
XEvent event;
event.set_offset_ps(offset_ps);
event.set_duration_ps(duration_ps);
return event;
}
TEST(XPlaneUtilsTest, AddAndRemovePlanes) {
XSpace space;
auto* p1 = FindOrAddMutablePlaneWithName(&space, "p1");
EXPECT_EQ(p1, FindPlaneWithName(space, "p1"));
auto* p2 = FindOrAddMutablePlaneWithName(&space, "p2");
EXPECT_EQ(p2, FindPlaneWithName(space, "p2"));
auto* p3 = FindOrAddMutablePlaneWithName(&space, "p3");
EXPECT_EQ(p3, FindPlaneWithName(space, "p3"));
RemovePlane(&space, p2);
EXPECT_EQ(space.planes_size(), 2);
EXPECT_EQ(p1, FindPlaneWithName(space, "p1"));
EXPECT_EQ(p3, FindPlaneWithName(space, "p3"));
RemovePlane(&space, p1);
EXPECT_EQ(space.planes_size(), 1);
EXPECT_EQ(p3, FindPlaneWithName(space, "p3"));
RemovePlane(&space, p3);
EXPECT_EQ(space.planes_size(), 0);
}
TEST(XPlaneUtilsTest, RemoveEmptyPlanes) {
XSpace space;
RemoveEmptyPlanes(&space);
EXPECT_EQ(space.planes_size(), 0);
auto* plane1 = space.add_planes();
plane1->set_name("p1");
plane1->add_lines()->set_name("p1l1");
plane1->add_lines()->set_name("p1l2");
auto* plane2 = space.add_planes();
plane2->set_name("p2");
auto* plane3 = space.add_planes();
plane3->set_name("p3");
plane3->add_lines()->set_name("p3l1");
auto* plane4 = space.add_planes();
plane4->set_name("p4");
RemoveEmptyPlanes(&space);
ASSERT_EQ(space.planes_size(), 2);
EXPECT_EQ(space.planes(0).name(), "p1");
EXPECT_EQ(space.planes(1).name(), "p3");
}
TEST(XPlaneUtilsTest, RemoveEmptyLines) {
XPlane plane;
RemoveEmptyLines(&plane);
EXPECT_EQ(plane.lines_size(), 0);
auto* line1 = plane.add_lines();
line1->set_name("l1");
line1->add_events();
line1->add_events();
auto* line2 = plane.add_lines();
line2->set_name("l2");
auto* line3 = plane.add_lines();
line3->set_name("l3");
line3->add_events();
auto* line4 = plane.add_lines();
line4->set_name("l4");
RemoveEmptyLines(&plane);
ASSERT_EQ(plane.lines_size(), 2);
EXPECT_EQ(plane.lines(0).name(), "l1");
EXPECT_EQ(plane.lines(1).name(), "l3");
}
TEST(XPlaneUtilsTest, RemoveLine) {
XPlane plane;
const XLine* line1 = plane.add_lines();
const XLine* line2 = plane.add_lines();
const XLine* line3 = plane.add_lines();
RemoveLine(&plane, line2);
ASSERT_EQ(plane.lines_size(), 2);
EXPECT_EQ(&plane.lines(0), line1);
EXPECT_EQ(&plane.lines(1), line3);
}
TEST(XPlaneUtilsTest, RemoveEvents) {
XLine line;
const XEvent* event1 = line.add_events();
const XEvent* event2 = line.add_events();
const XEvent* event3 = line.add_events();
const XEvent* event4 = line.add_events();
RemoveEvents(&line, {event1, event3});
ASSERT_EQ(line.events_size(), 2);
EXPECT_EQ(&line.events(0), event2);
EXPECT_EQ(&line.events(1), event4);
}
TEST(XPlaneUtilsTest, SortXPlaneTest) {
XPlane plane;
XLine* line = plane.add_lines();
*line->add_events() = CreateEvent(200, 100);
*line->add_events() = CreateEvent(100, 100);
*line->add_events() = CreateEvent(120, 50);
*line->add_events() = CreateEvent(120, 30);
SortXPlane(&plane);
ASSERT_EQ(plane.lines_size(), 1);
ASSERT_EQ(plane.lines(0).events_size(), 4);
EXPECT_EQ(plane.lines(0).events(0).offset_ps(), 100);
EXPECT_EQ(plane.lines(0).events(0).duration_ps(), 100);
EXPECT_EQ(plane.lines(0).events(1).offset_ps(), 120);
EXPECT_EQ(plane.lines(0).events(1).duration_ps(), 50);
EXPECT_EQ(plane.lines(0).events(2).offset_ps(), 120);
EXPECT_EQ(plane.lines(0).events(2).duration_ps(), 30);
EXPECT_EQ(plane.lines(0).events(3).offset_ps(), 200);
EXPECT_EQ(plane.lines(0).events(3).duration_ps(), 100);
}
namespace {
XLineBuilder CreateXLine(XPlaneBuilder* plane, absl::string_view name,
absl::string_view display, int64_t id,
int64_t timestamp_ns) {
XLineBuilder line = plane->GetOrCreateLine(id);
line.SetName(name);
line.SetTimestampNs(timestamp_ns);
line.SetDisplayNameIfEmpty(display);
return line;
}
XEventBuilder CreateXEvent(XPlaneBuilder* plane, XLineBuilder line,
absl::string_view event_name,
std::optional<absl::string_view> display,
int64_t offset_ns, int64_t duration_ns) {
XEventMetadata* event_metadata = plane->GetOrCreateEventMetadata(event_name);
if (display) event_metadata->set_display_name(std::string(*display));
XEventBuilder event = line.AddEvent(*event_metadata);
event.SetOffsetNs(offset_ns);
event.SetDurationNs(duration_ns);
return event;
}
template <typename T, typename V>
void CreateXStats(XPlaneBuilder* plane, T* stats_owner,
absl::string_view stats_name, V stats_value) {
stats_owner->AddStatValue(*plane->GetOrCreateStatMetadata(stats_name),
stats_value);
}
void CheckXLine(const XLine& line, absl::string_view name,
absl::string_view display, int64_t start_time_ns,
int64_t events_size) {
EXPECT_EQ(line.name(), name);
EXPECT_EQ(line.display_name(), display);
EXPECT_EQ(line.timestamp_ns(), start_time_ns);
EXPECT_EQ(line.events_size(), events_size);
}
void CheckXEvent(const XEvent& event, const XPlane& plane,
absl::string_view name, absl::string_view display,
int64_t offset_ns, int64_t duration_ns, int64_t stats_size) {
const XEventMetadata& event_metadata =
plane.event_metadata().at(event.metadata_id());
EXPECT_EQ(event_metadata.name(), name);
EXPECT_EQ(event_metadata.display_name(), display);
EXPECT_EQ(event.offset_ps(), NanoToPico(offset_ns));
EXPECT_EQ(event.duration_ps(), NanoToPico(duration_ns));
EXPECT_EQ(event.stats_size(), stats_size);
}
}
TEST(XPlaneUtilsTest, MergeXPlaneTest) {
XPlane src_plane, dst_plane;
constexpr int64_t kLineIdOnlyInSrcPlane = 1LL;
constexpr int64_t kLineIdOnlyInDstPlane = 2LL;
constexpr int64_t kLineIdInBothPlanes = 3LL;
constexpr int64_t kLineIdInBothPlanes2 = 4LL;
{
XPlaneBuilder src(&src_plane);
CreateXStats(&src, &src, "plane_stat1", 1);
CreateXStats(&src, &src, "plane_stat3", 3.0);
auto l1 = CreateXLine(&src, "l1", "d1", kLineIdOnlyInSrcPlane, 100);
auto e1 = CreateXEvent(&src, l1, "event1", "display1", 1, 2);
CreateXStats(&src, &e1, "event_stat1", 2.0);
auto e2 = CreateXEvent(&src, l1, "event2", std::nullopt, 3, 4);
CreateXStats(&src, &e2, "event_stat2", 3);
auto l2 = CreateXLine(&src, "l2", "d2", kLineIdInBothPlanes, 200);
auto e3 = CreateXEvent(&src, l2, "event3", std::nullopt, 5, 7);
CreateXStats(&src, &e3, "event_stat3", 2.0);
auto e4 = CreateXEvent(&src, l2, "event4", std::nullopt, 6, 8);
CreateXStats(&src, &e4, "event_stat4", 3);
CreateXStats(&src, &e4, "event_stat5", 3);
auto l5 = CreateXLine(&src, "l5", "d5", kLineIdInBothPlanes2, 700);
CreateXEvent(&src, l5, "event51", std::nullopt, 9, 10);
CreateXEvent(&src, l5, "event52", std::nullopt, 11, 12);
}
{
XPlaneBuilder dst(&dst_plane);
CreateXStats(&dst, &dst, "plane_stat2", 2);
CreateXStats(&dst, &dst, "plane_stat3", 4);
auto l3 = CreateXLine(&dst, "l3", "d3", kLineIdOnlyInDstPlane, 300);
auto e5 = CreateXEvent(&dst, l3, "event5", std::nullopt, 11, 2);
CreateXStats(&dst, &e5, "event_stat6", 2.0);
auto e6 = CreateXEvent(&dst, l3, "event6", std::nullopt, 13, 4);
CreateXStats(&dst, &e6, "event_stat7", 3);
auto l2 = CreateXLine(&dst, "l4", "d4", kLineIdInBothPlanes, 400);
auto e7 = CreateXEvent(&dst, l2, "event7", std::nullopt, 15, 7);
CreateXStats(&dst, &e7, "event_stat8", 2.0);
auto e8 = CreateXEvent(&dst, l2, "event8", "display8", 16, 8);
CreateXStats(&dst, &e8, "event_stat9", 3);
auto l6 = CreateXLine(&dst, "l6", "d6", kLineIdInBothPlanes2, 300);
CreateXEvent(&dst, l6, "event61", std::nullopt, 21, 10);
CreateXEvent(&dst, l6, "event62", std::nullopt, 22, 12);
}
MergePlanes(src_plane, &dst_plane);
XPlaneVisitor plane(&dst_plane);
EXPECT_EQ(dst_plane.lines_size(), 4);
EXPECT_EQ(dst_plane.stats_size(), 3);
absl::flat_hash_map<absl::string_view, absl::string_view> plane_stats;
plane.ForEachStat([&](const XStatVisitor& stat) {
if (stat.Name() == "plane_stat1") {
EXPECT_EQ(stat.IntValue(), 1);
} else if (stat.Name() == "plane_stat2") {
EXPECT_EQ(stat.IntValue(), 2);
} else if (stat.Name() == "plane_stat3") {
EXPECT_EQ(stat.DoubleValue(), 3.0);
} else {
EXPECT_TRUE(false);
}
});
EXPECT_EQ(dst_plane.stat_metadata_size(), 12);
{
const XLine& line = dst_plane.lines(0);
CheckXLine(line, "l3", "d3", 300, 2);
CheckXEvent(line.events(0), dst_plane, "event5", "", 11, 2, 1);
CheckXEvent(line.events(1), dst_plane, "event6", "", 13, 4, 1);
}
{
const XLine& line = dst_plane.lines(1);
CheckXLine(line, "l4", "d4", 200, 4);
CheckXEvent(line.events(0), dst_plane, "event7", "", 215, 7, 1);
CheckXEvent(line.events(1), dst_plane, "event8", "display8", 216, 8, 1);
CheckXEvent(line.events(2), dst_plane, "event3", "", 5, 7, 1);
CheckXEvent(line.events(3), dst_plane, "event4", "", 6, 8, 2);
}
{
const XLine& line = dst_plane.lines(2);
CheckXLine(line, "l6", "d6", 300, 4);
CheckXEvent(line.events(0), dst_plane, "event61", "", 21, 10, 0);
CheckXEvent(line.events(1), dst_plane, "event62", "", 22, 12, 0);
CheckXEvent(line.events(2), dst_plane, "event51", "", 409, 10, 0);
CheckXEvent(line.events(3), dst_plane, "event52", "", 411, 12, 0);
}
{
const XLine& line = dst_plane.lines(3);
CheckXLine(line, "l1", "d1", 100, 2);
CheckXEvent(line.events(0), dst_plane, "event1", "display1", 1, 2, 1);
CheckXEvent(line.events(1), dst_plane, "event2", "", 3, 4, 1);
}
}
TEST(XPlaneUtilsTest, FindPlanesWithPrefix) {
XSpace xspace;
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:0");
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:1");
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:2");
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:3");
XPlane* p4 = FindOrAddMutablePlaneWithName(&xspace, "test-do-not-include:0");
std::vector<const XPlane*> xplanes =
FindPlanesWithPrefix(xspace, "test-prefix");
ASSERT_EQ(4, xplanes.size());
for (const XPlane* plane : xplanes) {
ASSERT_NE(p4, plane);
}
}
TEST(XplaneUtilsTest, FindMutablePlanesWithPrefix) {
XSpace xspace;
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:0");
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:1");
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:2");
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:3");
XPlane* p4 = FindOrAddMutablePlaneWithName(&xspace, "test-do-not-include:0");
std::vector<XPlane*> xplanes =
FindMutablePlanesWithPrefix(&xspace, "test-prefix");
ASSERT_EQ(4, xplanes.size());
for (XPlane* plane : xplanes) {
ASSERT_NE(p4, plane);
}
}
TEST(XplaneUtilsTest, FindPlanesWithPredicate) {
XSpace xspace;
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:0");
XPlane* p1 = FindOrAddMutablePlaneWithName(&xspace, "test-prefix:1");
std::vector<const XPlane*> xplanes = FindPlanes(
xspace,
[](const XPlane& xplane) { return xplane.name() == "test-prefix:1"; });
ASSERT_EQ(1, xplanes.size());
ASSERT_EQ(p1, xplanes[0]);
}
TEST(XplaneUtilsTest, FindMutablePlanesWithPredicate) {
XSpace xspace;
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:0");
XPlane* p1 = FindOrAddMutablePlaneWithName(&xspace, "test-prefix:1");
std::vector<XPlane*> xplanes = FindMutablePlanes(
&xspace, [](XPlane& xplane) { return xplane.name() == "test-prefix:1"; });
ASSERT_EQ(1, xplanes.size());
ASSERT_EQ(p1, xplanes[0]);
}
TEST(XplaneUtilsTest, TestAggregateXPlanes) {
XPlane xplane;
XPlaneBuilder builder(&xplane);
auto& event_metadata1 = *builder.GetOrCreateEventMetadata("EventMetadata1");
auto& event_metadata2 = *builder.GetOrCreateEventMetadata("EventMetadata2");
auto& event_metadata3 = *builder.GetOrCreateEventMetadata("EventMetadata3");
auto& event_metadata4 = *builder.GetOrCreateEventMetadata("EventMetadata4");
auto& step_event_metadata1 =
*builder.GetOrCreateEventMetadata("StepEventMetadata1");
auto& step_event_metadata2 =
*builder.GetOrCreateEventMetadata("StepEventMetadata2");
XLineBuilder step_line = builder.GetOrCreateLine(1);
step_line.SetName(kStepLineName);
XEventBuilder step1 = step_line.AddEvent(step_event_metadata1);
step1.SetOffsetNs(0);
step1.SetDurationNs(10);
XEventBuilder step2 = step_line.AddEvent(step_event_metadata2);
step2.SetOffsetNs(10);
step2.SetDurationNs(10);
XLineBuilder line = builder.GetOrCreateLine(2);
line.SetName(kTensorFlowOpLineName);
XEventBuilder event1 = line.AddEvent(event_metadata1);
event1.SetOffsetNs(0);
event1.SetDurationNs(5);
XEventBuilder event3 = line.AddEvent(event_metadata3);
event3.SetOffsetNs(0);
event3.SetDurationNs(2);
XEventBuilder event2 = line.AddEvent(event_metadata2);
event2.SetOffsetNs(5);
event2.SetDurationNs(5);
XEventBuilder event4 = line.AddEvent(event_metadata2);
event4.SetOffsetNs(10);
event4.SetDurationNs(5);
XEventBuilder event5 = line.AddEvent(event_metadata4);
event5.SetOffsetNs(15);
event5.SetDurationNs(6);
XEventBuilder event6 = line.AddEvent(event_metadata1);
event6.SetOffsetNs(15);
event6.SetDurationNs(4);
XEventBuilder event7 = line.AddEvent(event_metadata3);
event7.SetOffsetNs(15);
event7.SetDurationNs(3);
XPlane aggregated_xplane;
AggregateXPlane(xplane, aggregated_xplane);
#if defined(PLATFORM_GOOGLE)
ASSERT_THAT(
aggregated_xplane,
IgnoringFields(
{"tensorflow.profiler.XEvent.metadata_id",
"tensorflow.profiler.XPlane.event_metadata"},
IgnoringRepeatedFieldOrdering(EqualsProto(
R"pb(lines {
id: 1
name: "Steps"
events { metadata_id: 1 offset_ps: 0 duration_ps: 10000 }
events {
metadata_id: 2
offset_ps: 10000
duration_ps: 10000
}
}
lines {
id: 2
name: "Framework Ops"
events {
metadata_id: 3
duration_ps: 10000
stats { metadata_id: 2 int64_value: 5000 }
num_occurrences: 2
}
events {
metadata_id: 4
duration_ps: 5000
stats { metadata_id: 2 int64_value: 2000 }
num_occurrences: 2
}
events {
metadata_id: 5
duration_ps: 9000
stats { metadata_id: 2 int64_value: 4000 }
stats { metadata_id: 3 int64_value: 4000 }
num_occurrences: 2
}
events {
metadata_id: 6
duration_ps: 6000
stats { metadata_id: 3 int64_value: 2000 }
num_occurrences: 1
}
}
stat_metadata {
key: 1
value { id: 1 name: "total_profile_duration_ps" }
}
stat_metadata {
key: 2
value { id: 2 name: "min_duration_ps" }
}
stat_metadata {
key: 3
value { id: 3 name: "self_duration_ps" }
}
stat_metadata {
key: 4
value { id: 4 name: "group_id" }
}
stats { metadata_id: 1 uint64_value: 21000 }
)pb"))));
std::vector<std::string> event_metadata_names;
for (const auto& [id, event_metadata] : aggregated_xplane.event_metadata()) {
event_metadata_names.push_back(event_metadata.name());
}
EXPECT_THAT(event_metadata_names,
UnorderedElementsAre("EventMetadata1", "EventMetadata2",
"EventMetadata3", "EventMetadata4",
"StepEventMetadata1", "StepEventMetadata2"));
#endif
}
TEST(XPlanuUtilsTest, TestInstantEventDoesNotFail) {
XPlane xplane;
XPlaneBuilder xplane_builder(&xplane);
XEventMetadata* event_metadata1 = xplane_builder.GetOrCreateEventMetadata(1);
XEventMetadata* event_metadata2 = xplane_builder.GetOrCreateEventMetadata(2);
XLineBuilder line = xplane_builder.GetOrCreateLine(1);
line.SetName(kTensorFlowOpLineName);
XEventBuilder event1 = line.AddEvent(*event_metadata1);
XEventBuilder event2 = line.AddEvent(*event_metadata2);
event1.SetOffsetNs(1);
event1.SetDurationNs(0);
event2.SetOffsetNs(1);
event2.SetDurationNs(0);
XPlane aggregated_xplane;
AggregateXPlane(xplane, aggregated_xplane);
EXPECT_THAT(aggregated_xplane.lines(),
UnorderedElementsAre(Property(&XLine::events, SizeIs(2))));
}
TEST(XplaneutilsTest, TestEventMetadataStatsAreCopied) {
XPlane xplane;
XPlaneBuilder xplane_builder(&xplane);
XEventMetadata* event_metadata = xplane_builder.GetOrCreateEventMetadata(1);
XStatsBuilder<XEventMetadata> stats(event_metadata, &xplane_builder);
stats.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)),
"TestFunction");
XLineBuilder line = xplane_builder.GetOrCreateLine(1);
line.SetName(kTensorFlowOpLineName);
XEventBuilder event = line.AddEvent(*event_metadata);
event.SetDurationNs(0);
event.SetOffsetNs(0);
XPlane aggregated_xplane;
AggregateXPlane(xplane, aggregated_xplane);
XPlaneVisitor visitor = CreateTfXPlaneVisitor(&aggregated_xplane);
XEventMetadataVisitor metadata_visitor(&visitor, visitor.GetEventMetadata(1));
std::optional<XStatVisitor> stat = metadata_visitor.GetStat(StatType::kTfOp);
ASSERT_TRUE(stat.has_value());
EXPECT_EQ(stat->Name(), "tf_op");
EXPECT_EQ(stat->StrOrRefValue(), "TestFunction");
}
TEST(XplaneutilsTest, TestEventMetadataStatsAreCopiedForRefValue) {
XPlane xplane;
XPlaneBuilder xplane_builder(&xplane);
XEventMetadata* event_metadata = xplane_builder.GetOrCreateEventMetadata(1);
XStatsBuilder<XEventMetadata> stats(event_metadata, &xplane_builder);
stats.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)),
*xplane_builder.GetOrCreateStatMetadata("TestFunction"));
XLineBuilder line = xplane_builder.GetOrCreateLine(1);
line.SetName(kTensorFlowOpLineName);
XEventBuilder event = line.AddEvent(*event_metadata);
event.SetDurationNs(0);
event.SetOffsetNs(0);
XPlane aggregated_xplane;
AggregateXPlane(xplane, aggregated_xplane);
XPlaneVisitor visitor = CreateTfXPlaneVisitor(&aggregated_xplane);
XEventMetadataVisitor metadata_visitor(&visitor, visitor.GetEventMetadata(1));
std::optional<XStatVisitor> stat = metadata_visitor.GetStat(StatType::kTfOp);
ASSERT_TRUE(stat.has_value());
EXPECT_EQ(stat->Name(), "tf_op");
EXPECT_EQ(stat->StrOrRefValue(), "TestFunction");
}
TEST(XplaneutilsTest, TestIsXSpaceGrouped) {
XSpace space;
{
XPlaneBuilder p1(space.add_planes());
auto l1 = CreateXLine(&p1, "l1", "d1", 1, 100);
auto e1 = CreateXEvent(&p1, l1, "event1", "display1", 1, 2);
CreateXStats(&p1, &e1, "event_stat1", 2.0);
}
EXPECT_FALSE(IsXSpaceGrouped(space));
{
XPlaneBuilder p2(space.add_planes());
auto l2 = CreateXLine(&p2, "l2", "d2", 1, 100);
auto e2 = CreateXEvent(&p2, l2, "event2", "display2", 1, 2);
CreateXStats(&p2, &e2, "group_id", 1);
}
LOG(ERROR) << space.DebugString();
EXPECT_TRUE(IsXSpaceGrouped(space));
}
TEST(XplaneutilsTest, TestIsHostPlane) {
XSpace xspace;
auto xplane_host_thread = FindOrAddMutablePlaneWithName(&xspace, "/host:CPU");
auto xplane_host_cpu = FindOrAddMutablePlaneWithName(&xspace, "Host CPUs");
auto xplane_tfstreamz =
FindOrAddMutablePlaneWithName(&xspace, "/host:tfstreamz");
auto xplane_metadata =
FindOrAddMutablePlaneWithName(&xspace, "/host:metadata");
auto xplane_syscalls = FindOrAddMutablePlaneWithName(&xspace, "Syscalls");
auto xplane_python_tracer =
FindOrAddMutablePlaneWithName(&xspace, "/host:python-tracer");
auto xplane_custom_prefix =
FindOrAddMutablePlaneWithName(&xspace, "/device:CUSTOM:123");
auto xplane_legacy_custom =
FindOrAddMutablePlaneWithName(&xspace, "/custom:456");
auto xplane_cupti = FindOrAddMutablePlaneWithName(&xspace, "/host:CUPTI");
EXPECT_TRUE(IsHostPlane(*xplane_host_thread));
EXPECT_TRUE(IsHostPlane(*xplane_host_cpu));
EXPECT_TRUE(IsHostPlane(*xplane_tfstreamz));
EXPECT_TRUE(IsHostPlane(*xplane_metadata));
EXPECT_TRUE(IsHostPlane(*xplane_syscalls));
EXPECT_TRUE(IsHostPlane(*xplane_python_tracer));
EXPECT_FALSE(IsHostPlane(*xplane_custom_prefix));
EXPECT_FALSE(IsHostPlane(*xplane_legacy_custom));
EXPECT_TRUE(IsHostPlane(*xplane_cupti));
}
TEST(XplaneutilsTest, TestIsDevicePlane) {
XSpace xspace;
auto xplane_host_thread = FindOrAddMutablePlaneWithName(&xspace, "/host:CPU");
auto xplane_device_thread =
FindOrAddMutablePlaneWithName(&xspace, "/device:TPU");
auto xplane_task_env_thread =
FindOrAddMutablePlaneWithName(&xspace, "Task Environment");
auto xplane_custom_prefix =
FindOrAddMutablePlaneWithName(&xspace, "/device:CUSTOM:123");
auto xplane_legacy_custom =
FindOrAddMutablePlaneWithName(&xspace, "/custom:456");
EXPECT_FALSE(IsDevicePlane(*xplane_host_thread));
EXPECT_FALSE(IsDevicePlane(*xplane_task_env_thread));
EXPECT_TRUE(IsDevicePlane(*xplane_device_thread));
EXPECT_TRUE(IsDevicePlane(*xplane_custom_prefix));
EXPECT_TRUE(IsDevicePlane(*xplane_legacy_custom));
}
TEST(XplaneUtilsTest, XPlaneGroupingPropagatesStep) {
XPlane xplane;
XPlaneBuilder builder(&xplane);
XStatMetadata* kGroupId =
builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kGroupId));
XLineBuilder line = builder.GetOrCreateLine(1);
line.SetName(kStepLineName);
XEventMetadata* event_metadata = builder.GetOrCreateEventMetadata(1);
event_metadata->set_name("Step 1");
XEventBuilder event_builder = line.AddEvent(*event_metadata);
event_builder.AddStatValue(*kGroupId, 1);
event_builder.SetDurationNs(100);
event_builder.SetOffsetNs(100);
XEventMetadata* event_metadata2 = builder.GetOrCreateEventMetadata(2);
event_metadata2->set_name("Step 2");
XEventBuilder event_builder2 = line.AddEvent(*event_metadata2);
event_builder2.AddStatValue(*kGroupId, 2);
event_builder2.SetDurationNs(100);
event_builder2.SetOffsetNs(300);
XPlane aggregated_xplane;
AggregateXPlane(xplane, aggregated_xplane);
#if defined(PLATFORM_GOOGLE)
EXPECT_THAT(aggregated_xplane, Partially(EqualsProto(xplane)));
#endif
}
TEST(XplaneUtilsTest, XPlaneGroupingPropagatesGroupId) {
XPlane xplane;
XPlaneBuilder builder(&xplane);
XEventMetadata* event_metadata1 = builder.GetOrCreateEventMetadata(1);
event_metadata1->set_name("EventMetadata1");
XStatMetadata* kGroupId =
builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kGroupId));
XLineBuilder line = builder.GetOrCreateLine(1);
line.SetName(kXlaOpLineName);
XEventBuilder event_builder = line.AddEvent(*event_metadata1);
event_builder.SetDurationNs(100);
event_builder.SetOffsetNs(100);
event_builder.AddStatValue(*kGroupId, 1);
XEventBuilder event_builder2 = line.AddEvent(*event_metadata1);
event_builder2.AddStatValue(*kGroupId, 2);
event_builder2.SetDurationNs(100);
event_builder2.SetOffsetNs(300);
XPlane aggregated_xplane;
AggregateXPlane(xplane, aggregated_xplane);
EXPECT_THAT(aggregated_xplane.lines(),
UnorderedElementsAre(Property(&XLine::events, SizeIs(2))));
XPlaneVisitor visitor = CreateTfXPlaneVisitor(&aggregated_xplane);
visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
EXPECT_TRUE(event.GetStat(StatType::kGroupId).has_value());
});
});
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/xplane_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/xplane_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
044f3244-444c-4c46-bd49-7ff09be2cf0e | cpp | google/quiche | aes_128_gcm_encrypter | quiche/quic/core/crypto/aes_128_gcm_encrypter.cc | quiche/quic/core/crypto/aes_128_gcm_encrypter_test.cc | #include "quiche/quic/core/crypto/aes_128_gcm_encrypter.h"
#include "openssl/evp.h"
namespace quic {
namespace {
const size_t kKeySize = 16;
const size_t kNonceSize = 12;
}
Aes128GcmEncrypter::Aes128GcmEncrypter()
: AesBaseEncrypter(EVP_aead_aes_128_gcm, kKeySize, kAuthTagSize, kNonceSize,
true) {
static_assert(kKeySize <= kMaxKeySize, "key size too big");
static_assert(kNonceSize <= kMaxNonceSize, "nonce size too big");
}
Aes128GcmEncrypter::~Aes128GcmEncrypter() {}
} | #include "quiche/quic/core/crypto/aes_128_gcm_encrypter.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/escaping.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace {
struct TestGroupInfo {
size_t key_len;
size_t iv_len;
size_t pt_len;
size_t aad_len;
size_t tag_len;
};
struct TestVector {
const char* key;
const char* iv;
const char* pt;
const char* aad;
const char* ct;
const char* tag;
};
const TestGroupInfo test_group_info[] = {
{128, 96, 0, 0, 128}, {128, 96, 0, 128, 128}, {128, 96, 128, 0, 128},
{128, 96, 408, 160, 128}, {128, 96, 408, 720, 128}, {128, 96, 104, 0, 128},
};
const TestVector test_group_0[] = {
{"11754cd72aec309bf52f7687212e8957", "3c819d9a9bed087615030b65", "", "", "",
"250327c674aaf477aef2675748cf6971"},
{"ca47248ac0b6f8372a97ac43508308ed", "ffd2b598feabc9019262d2be", "", "", "",
"60d20404af527d248d893ae495707d1a"},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_1[] = {
{"77be63708971c4e240d1cb79e8d77feb", "e0e00f19fed7ba0136a797f3", "",
"7a43ec1d9c0a5a78a0b16533a6213cab", "",
"209fcc8d3675ed938e9c7166709dd946"},
{"7680c5d3ca6154758e510f4d25b98820", "f8f105f9c3df4965780321f8", "",
"c94c410194c765e3dcc7964379758ed3", "",
"94dca8edfcf90bb74b153c8d48a17930"},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_2[] = {
{"7fddb57453c241d03efbed3ac44e371c", "ee283a3fc75575e33efd4887",
"d5de42b461646c255c87bd2962d3b9a2", "", "2ccda4a5415cb91e135c2a0f78c9b2fd",
"b36d1df9b9d5e596f83e8b7f52971cb3"},
{"ab72c77b97cb5fe9a382d9fe81ffdbed", "54cc7dc2c37ec006bcc6d1da",
"007c5e5b3e59df24a7c355584fc1518d", "", "0e1bde206a07a9c2c1b65300f8c64997",
"2b4401346697138c7a4891ee59867d0c"},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_3[] = {
{"fe47fcce5fc32665d2ae399e4eec72ba", "5adb9609dbaeb58cbd6e7275",
"7c0e88c88899a779228465074797cd4c2e1498d259b54390b85e3eef1c02df60e743f1"
"b840382c4bccaf3bafb4ca8429bea063",
"88319d6e1d3ffa5f987199166c8a9b56c2aeba5a",
"98f4826f05a265e6dd2be82db241c0fbbbf9ffb1c173aa83964b7cf539304373636525"
"3ddbc5db8778371495da76d269e5db3e",
"291ef1982e4defedaa2249f898556b47"},
{"ec0c2ba17aa95cd6afffe949da9cc3a8", "296bce5b50b7d66096d627ef",
"b85b3753535b825cbe5f632c0b843c741351f18aa484281aebec2f45bb9eea2d79d987"
"b764b9611f6c0f8641843d5d58f3a242",
"f8d00f05d22bf68599bcdeb131292ad6e2df5d14",
"a7443d31c26bdf2a1c945e29ee4bd344a99cfaf3aa71f8b3f191f83c2adfc7a0716299"
"5506fde6309ffc19e716eddf1a828c5a",
"890147971946b627c40016da1ecf3e77"},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_4[] = {
{"2c1f21cf0f6fb3661943155c3e3d8492", "23cb5ff362e22426984d1907",
"42f758836986954db44bf37c6ef5e4ac0adaf38f27252a1b82d02ea949c8a1a2dbc0d6"
"8b5615ba7c1220ff6510e259f06655d8",
"5d3624879d35e46849953e45a32a624d6a6c536ed9857c613b572b0333e701557a713e"
"3f010ecdf9a6bd6c9e3e44b065208645aff4aabee611b391528514170084ccf587177f"
"4488f33cfb5e979e42b6e1cfc0a60238982a7aec",
"81824f0e0d523db30d3da369fdc0d60894c7a0a20646dd015073ad2732bd989b14a222"
"b6ad57af43e1895df9dca2a5344a62cc",
"57a3ee28136e94c74838997ae9823f3a"},
{"d9f7d2411091f947b4d6f1e2d1f0fb2e", "e1934f5db57cc983e6b180e7",
"73ed042327f70fe9c572a61545eda8b2a0c6e1d6c291ef19248e973aee6c312012f490"
"c2c6f6166f4a59431e182663fcaea05a",
"0a8a18a7150e940c3d87b38e73baee9a5c049ee21795663e264b694a949822b639092d"
"0e67015e86363583fcf0ca645af9f43375f05fdb4ce84f411dcbca73c2220dea03a201"
"15d2e51398344b16bee1ed7c499b353d6c597af8",
"aaadbd5c92e9151ce3db7210b8714126b73e43436d242677afa50384f2149b831f1d57"
"3c7891c2a91fbc48db29967ec9542b23",
"21b51ca862cb637cdd03b99a0f93b134"},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_5[] = {
{"fe9bb47deb3a61e423c2231841cfd1fb", "4d328eb776f500a2f7fb47aa",
"f1cc3818e421876bb6b8bbd6c9", "", "b88c5c1977b35b517b0aeae967",
"43fd4727fe5cdb4b5b42818dea7ef8c9"},
{"6703df3701a7f54911ca72e24dca046a", "12823ab601c350ea4bc2488c",
"793cd125b0b84a043e3ac67717", "", "b2051c80014f42f08735a7b0cd",
"38e6bcd29962e5f2c13626b85a877101"},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector* const test_group_array[] = {
test_group_0, test_group_1, test_group_2,
test_group_3, test_group_4, test_group_5,
};
}
namespace quic {
namespace test {
QuicData* EncryptWithNonce(Aes128GcmEncrypter* encrypter,
absl::string_view nonce,
absl::string_view associated_data,
absl::string_view plaintext) {
size_t ciphertext_size = encrypter->GetCiphertextSize(plaintext.length());
std::unique_ptr<char[]> ciphertext(new char[ciphertext_size]);
if (!encrypter->Encrypt(nonce, associated_data, plaintext,
reinterpret_cast<unsigned char*>(ciphertext.get()))) {
return nullptr;
}
return new QuicData(ciphertext.release(), ciphertext_size, true);
}
class Aes128GcmEncrypterTest : public QuicTest {};
TEST_F(Aes128GcmEncrypterTest, Encrypt) {
for (size_t i = 0; i < ABSL_ARRAYSIZE(test_group_array); i++) {
SCOPED_TRACE(i);
const TestVector* test_vectors = test_group_array[i];
const TestGroupInfo& test_info = test_group_info[i];
for (size_t j = 0; test_vectors[j].key != nullptr; j++) {
std::string key;
std::string iv;
std::string pt;
std::string aad;
std::string ct;
std::string tag;
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].key, &key));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].iv, &iv));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].pt, &pt));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].aad, &aad));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].ct, &ct));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].tag, &tag));
EXPECT_EQ(test_info.key_len, key.length() * 8);
EXPECT_EQ(test_info.iv_len, iv.length() * 8);
EXPECT_EQ(test_info.pt_len, pt.length() * 8);
EXPECT_EQ(test_info.aad_len, aad.length() * 8);
EXPECT_EQ(test_info.pt_len, ct.length() * 8);
EXPECT_EQ(test_info.tag_len, tag.length() * 8);
Aes128GcmEncrypter encrypter;
ASSERT_TRUE(encrypter.SetKey(key));
std::unique_ptr<QuicData> encrypted(
EncryptWithNonce(&encrypter, iv,
aad.length() ? aad : absl::string_view(), pt));
ASSERT_TRUE(encrypted.get());
ASSERT_EQ(ct.length() + tag.length(), encrypted->length());
quiche::test::CompareCharArraysWithHexError(
"ciphertext", encrypted->data(), ct.length(), ct.data(), ct.length());
quiche::test::CompareCharArraysWithHexError(
"authentication tag", encrypted->data() + ct.length(), tag.length(),
tag.data(), tag.length());
}
}
}
TEST_F(Aes128GcmEncrypterTest, EncryptPacket) {
std::string key;
std::string iv;
std::string aad;
std::string pt;
std::string ct;
ASSERT_TRUE(absl::HexStringToBytes("d95a145250826c25a77b6a84fd4d34fc", &key));
ASSERT_TRUE(absl::HexStringToBytes("50c4431ebb18283448e276e2", &iv));
ASSERT_TRUE(
absl::HexStringToBytes("875d49f64a70c9cbe713278f44ff000005", &aad));
ASSERT_TRUE(absl::HexStringToBytes("aa0003a250bd000000000001", &pt));
ASSERT_TRUE(absl::HexStringToBytes(
"7dd4708b989ee7d38a013e3656e9b37beefd05808fe1ab41e3b4f2c0", &ct));
uint64_t packet_num = 0x13278f44;
std::vector<char> out(ct.size());
size_t out_size;
Aes128GcmEncrypter encrypter;
ASSERT_TRUE(encrypter.SetKey(key));
ASSERT_TRUE(encrypter.SetIV(iv));
ASSERT_TRUE(encrypter.EncryptPacket(packet_num, aad, pt, out.data(),
&out_size, out.size()));
EXPECT_EQ(out_size, out.size());
quiche::test::CompareCharArraysWithHexError("ciphertext", out.data(),
out.size(), ct.data(), ct.size());
}
TEST_F(Aes128GcmEncrypterTest, GetMaxPlaintextSize) {
Aes128GcmEncrypter encrypter;
EXPECT_EQ(1000u, encrypter.GetMaxPlaintextSize(1016));
EXPECT_EQ(100u, encrypter.GetMaxPlaintextSize(116));
EXPECT_EQ(10u, encrypter.GetMaxPlaintextSize(26));
}
TEST_F(Aes128GcmEncrypterTest, GetCiphertextSize) {
Aes128GcmEncrypter encrypter;
EXPECT_EQ(1016u, encrypter.GetCiphertextSize(1000));
EXPECT_EQ(116u, encrypter.GetCiphertextSize(100));
EXPECT_EQ(26u, encrypter.GetCiphertextSize(10));
}
TEST_F(Aes128GcmEncrypterTest, GenerateHeaderProtectionMask) {
Aes128GcmEncrypter encrypter;
std::string key;
std::string sample;
std::string expected_mask;
ASSERT_TRUE(absl::HexStringToBytes("d9132370cb18476ab833649cf080d970", &key));
ASSERT_TRUE(
absl::HexStringToBytes("d1d7998068517adb769b48b924a32c47", &sample));
ASSERT_TRUE(absl::HexStringToBytes("b132c37d6164da4ea4dc9b763aceec27",
&expected_mask));
ASSERT_TRUE(encrypter.SetHeaderProtectionKey(key));
std::string mask = encrypter.GenerateHeaderProtectionMask(sample);
quiche::test::CompareCharArraysWithHexError(
"header protection mask", mask.data(), mask.size(), expected_mask.data(),
expected_mask.size());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/aes_128_gcm_encrypter.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/aes_128_gcm_encrypter_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
e70ccc92-ee3a-4f91-a06b-cda54f120d37 | cpp | google/arolla | chain_slot_listener | arolla/io/chain_slot_listener.h | arolla/io/chain_slot_listener_test.cc | #ifndef AROLLA_IO_CHAIN_SLOT_LISTENER_H_
#define AROLLA_IO_CHAIN_SLOT_LISTENER_H_
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "arolla/io/slot_listener.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
template <class Output>
class ChainSlotListener final : public SlotListener<Output> {
struct PrivateConstructorTag {};
public:
template <class... ListenerTs>
static absl::StatusOr<std::unique_ptr<SlotListener<Output>>> Build(
std::unique_ptr<ListenerTs>... listeners) {
std::vector<std::unique_ptr<SlotListener<Output>>> listeners_vec;
(listeners_vec.push_back(std::move(listeners)), ...);
return Build(std::move(listeners_vec));
}
static absl::StatusOr<std::unique_ptr<SlotListener<Output>>> Build(
std::vector<std::unique_ptr<SlotListener<Output>>> listeners) {
return std::make_unique<ChainSlotListener>(PrivateConstructorTag{},
std::move(listeners));
}
absl::Nullable<const QType*> GetQTypeOf(
absl::string_view name,
absl::Nullable<const QType*> desired_qtype) const final {
for (const auto& listener : listeners_) {
if (auto qtype = listener->GetQTypeOf(name, desired_qtype);
qtype != nullptr) {
return qtype;
}
}
return nullptr;
}
std::vector<std::string> SuggestAvailableNames() const final {
std::vector<std::string> names;
for (const auto& listener : listeners_) {
auto available = listener->SuggestAvailableNames();
names.insert(names.end(), available.begin(), available.end());
}
return names;
}
explicit ChainSlotListener(
PrivateConstructorTag,
std::vector<std::unique_ptr<SlotListener<Output>>> listeners)
: listeners_(std::move(listeners)) {}
private:
absl::StatusOr<BoundSlotListener<Output>> BindImpl(
const absl::flat_hash_map<std::string, TypedSlot>& slots) const final {
std::vector<BoundSlotListener<Output>> bound_listeners;
bound_listeners.reserve(listeners_.size());
for (const auto& listener : listeners_) {
ASSIGN_OR_RETURN(std::optional<BoundSlotListener<Output>> bound_listener,
listener->PartialBind(slots));
if (bound_listener.has_value()) {
bound_listeners.push_back(*std::move(bound_listener));
}
}
if (bound_listeners.empty()) {
return BoundSlotListener<Output>(
[](ConstFramePtr, Output*) { return absl::OkStatus(); });
}
if (bound_listeners.size() == 1) {
return std::move(bound_listeners[0]);
}
return BoundSlotListener<Output>(
[bound_listeners(std::move(bound_listeners))](
ConstFramePtr frame, Output* output) -> absl::Status {
for (const auto& listener : bound_listeners) {
RETURN_IF_ERROR(listener(frame, output));
}
return absl::OkStatus();
});
}
std::vector<std::unique_ptr<SlotListener<Output>>> listeners_;
};
}
#endif | #include "arolla/io/chain_slot_listener.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/io/accessors_slot_listener.h"
#include "arolla/io/slot_listener.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
namespace arolla {
namespace {
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
struct TestStruct {
int a;
double b;
int c;
};
TEST(SlotListenerTest, ChainSlotListenerErrors) {
ASSERT_OK_AND_ASSIGN(auto loader1,
CreateAccessorsSlotListener<TestStruct>(
"a", [](int a, TestStruct* s) { s->a = a; },
"b", [](double b, TestStruct* s) { s->b = b; }));
ASSERT_OK_AND_ASSIGN(
auto loader2,
CreateAccessorsSlotListener<TestStruct>(
"a", [](double b, TestStruct* s) { s->b = b; }));
ASSERT_OK_AND_ASSIGN(auto chain_listener,
ChainSlotListener<TestStruct>::Build(
std::move(loader1), std::move(loader2)));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
FrameLayout memory_layout = std::move(layout_builder).Build();
EXPECT_THAT(
chain_listener->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
}),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("slots/types match errors:slot types mismatch: "
"a{expected:FLOAT64, actual:INT32}")));
}
TEST(SlotListenerTest, ChainSlotListener) {
auto create_listener1 = []() {
return CreateAccessorsSlotListener<TestStruct>(
"a", [](int a, TestStruct* s) { s->a = a; })
.value();
};
auto create_listener2 = []() {
return CreateAccessorsSlotListener<TestStruct>(
"b", [](double b, TestStruct* s) { s->b = b; },
"a", [](int a, TestStruct* s) { s->c = a; })
.value();
};
ASSERT_OK_AND_ASSIGN(auto chain_listener1,
ChainSlotListener<TestStruct>::Build(
create_listener1(), create_listener2()));
std::vector<std::unique_ptr<SlotListener<TestStruct>>> listeners;
listeners.push_back(create_listener1());
listeners.push_back(create_listener2());
ASSERT_OK_AND_ASSIGN(
auto chain_listener2,
ChainSlotListener<TestStruct>::Build(std::move(listeners)));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
FrameLayout memory_layout = std::move(layout_builder).Build();
for (auto chain_listener : {chain_listener1.get(), chain_listener2.get()}) {
EXPECT_THAT(chain_listener->GetQTypeOf("a"), Eq(GetQType<int>()));
EXPECT_THAT(chain_listener->GetQTypeOf("b"), Eq(GetQType<double>()));
EXPECT_THAT(chain_listener->SuggestAvailableNames(),
ElementsAre("a", "b", "a"));
ASSERT_OK_AND_ASSIGN(BoundSlotListener<TestStruct> bound_chain_listener,
chain_listener->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
}));
MemoryAllocation alloc(&memory_layout);
alloc.frame().Set(a_slot, 5);
alloc.frame().Set(b_slot, 3.5);
TestStruct s;
ASSERT_OK(bound_chain_listener(alloc.frame(), &s));
EXPECT_EQ(s.a, 5);
EXPECT_EQ(s.b, 3.5);
EXPECT_EQ(s.c, 5);
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/chain_slot_listener.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/chain_slot_listener_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
4649cce1-3b5d-4cf7-8780-4bdd02283558 | cpp | tensorflow/tensorflow | rpc_helper | third_party/xla/xla/python/ifrt_proxy/client/rpc_helper.cc | third_party/xla/xla/python/ifrt_proxy/client/rpc_helper_test.cc | #include "xla/python/ifrt_proxy/client/rpc_helper.h"
#include <array>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/functional/bind_front.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt_proxy/client/client_session.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/test_utils.h"
#include "xla/python/ifrt_proxy/common/types.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tsl/platform/env.h"
#include "tsl/platform/random.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/traceme.h"
#include "tsl/profiler/lib/traceme_encode.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::tsl::profiler::XFlow;
constexpr absl::Duration kPeriodicFlushInterval = absl::Microseconds(50);
class XFlowHelper {
public:
explicit XFlowHelper(absl::string_view name)
: xflow_id_(tsl::random::New64() >> 8 ),
name_(name) {}
typedef enum { kSend, kRecv, kRecvSend } Direction;
template <Direction D>
tsl::profiler::TraceMe Span() const {
return tsl::profiler::TraceMe([xflow_id = xflow_id_, name = name_] {
return Encode<D>(xflow_id, name);
});
}
template <Direction D>
void InstantActivity() const {
return tsl::profiler::TraceMe::InstantActivity(
[xflow_id = xflow_id_, name = name_] {
return Encode<D>(xflow_id, name);
});
}
private:
template <Direction D>
static std::string Encode(uint64_t xflow_id, absl::string_view name) {
static constexpr absl::string_view flow_dir_str =
D == kSend ? "send" : (D == kRecv ? "recv" : "recv_send");
const XFlow flow(xflow_id, D == kRecvSend ? XFlow::kFlowInOut
: (D == kRecv ? XFlow::kFlowIn
: XFlow::kFlowOut));
return tsl::profiler::TraceMeEncode(
name, {{"dir", flow_dir_str}, {"flow", flow.ToStatValue()}});
};
const uint64_t xflow_id_;
const absl::string_view name_;
};
class BatchedOps {
public:
using BatchOperation = RpcHelper::BatchOperation;
void Add(BatchOperation op, ArrayHandle handle) {
absl::MutexLock l(&mu_);
batched_[op].push_back(handle);
}
struct IfrtRequests {
std::unique_ptr<IfrtRequest> delete_req;
std::unique_ptr<IfrtRequest> destruct_req;
};
IfrtRequests Consume() {
IfrtRequests result;
absl::MutexLock l(&mu_);
if (!batched_[BatchOperation::kDeleteArray].empty()) {
result.delete_req = std::make_unique<IfrtRequest>();
for (const auto& arr_handle : batched_[BatchOperation::kDeleteArray]) {
result.delete_req->mutable_delete_array_request()->add_array_handle(
arr_handle.handle);
}
batched_[BatchOperation::kDeleteArray].clear();
}
if (!batched_[BatchOperation::kDestructArray].empty()) {
result.destruct_req = std::make_unique<IfrtRequest>();
for (const auto& arr_handle : batched_[BatchOperation::kDestructArray]) {
result.destruct_req->mutable_destruct_array_request()->add_array_handle(
arr_handle.handle);
}
batched_[BatchOperation::kDestructArray].clear();
}
return result;
}
private:
absl::Mutex mu_;
std::array<std::vector<ArrayHandle>, BatchOperation::kSentinelDoNotUse>
batched_ ABSL_GUARDED_BY(mu_);
};
}
class RpcHelper::Batcher {
public:
explicit Batcher(std::shared_ptr<ClientSession> session)
: session_(std::move(session)) {
thread_pool_.emplace(tsl::Env::Default(), "IfrtProxyRpcHelperBatcher",
1);
thread_pool_->Schedule(absl::bind_front(&Batcher::PeriodicFlusher, this));
}
Future<ClientSession::Response> Immediate(
std::unique_ptr<IfrtRequest> request) {
absl::MutexLock l(&mu_);
if (finished_) {
LOG(WARNING) << "After RpcHelper::Finish(): " << request->DebugString();
return Future<ClientSession::Response>(
absl::FailedPreconditionError("RpcHelper::Finish() already called."));
}
Flush();
return session_->Enqueue(std::move(request));
}
void Batch(BatchOperation op, ArrayHandle handle) {
batched_.Add(op, handle);
}
void Finish(absl::Status s) {
{
absl::MutexLock l(&mu_);
finished_ = true;
auto remaining = batched_.Consume();
if (remaining.delete_req != nullptr) {
LOG(WARNING) << "RpcHelper::Batch: Finish() called while there are "
"still batched delete operations";
}
if (remaining.destruct_req != nullptr) {
LOG(WARNING) << "RpcHelper::Batch: Finish() called while there are "
"still batched destruct operations";
}
}
thread_pool_.reset();
session_->Finish(s);
}
private:
void PeriodicFlusher() {
while (true) {
absl::SleepFor(kPeriodicFlushInterval);
absl::MutexLock l(&mu_);
if (finished_) {
return;
}
{
bool periodic_flush_paused = false;
TestHookCall(TestHookName::kRpcBatcherPausePeriodicFlush,
&periodic_flush_paused);
if (periodic_flush_paused) {
continue;
}
}
tsl::profiler::TraceMe traceme("proxy_periodic_flush");
Flush();
}
}
void Flush() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
auto reqs = batched_.Consume();
if (reqs.delete_req != nullptr) {
XFlowHelper x_flow_helper("batch_delete");
auto traceme = x_flow_helper.Span<XFlowHelper::kSend>();
session_->Enqueue(std::move(reqs.delete_req))
.OnReady(
absl::bind_front(HandleBatchResponse, session_, x_flow_helper));
}
if (reqs.destruct_req != nullptr) {
XFlowHelper x_flow_helper("batch_destruct");
auto traceme = x_flow_helper.Span<XFlowHelper::kSend>();
session_->Enqueue(std::move(reqs.destruct_req))
.OnReady(
absl::bind_front(HandleBatchResponse, session_, x_flow_helper));
}
}
static void HandleBatchResponse(
std::shared_ptr<ClientSession> session, XFlowHelper x_flow_helper,
absl::StatusOr<std::shared_ptr<IfrtResponse>> r) {
if (!r.ok()) {
x_flow_helper.InstantActivity<XFlowHelper::kRecv>();
LOG(WARNING) << "Batched response from ifrt proxy server: " << r.status();
return;
}
if (r.value()->has_delete_array_response()) {
auto traceme = x_flow_helper.Span<XFlowHelper::kRecvSend>();
auto ifrt_req = std::make_unique<IfrtRequest>();
ifrt_req->mutable_check_future_request()->set_future_handle(
r.value()->delete_array_response().deletion_future_handle());
session->Enqueue(std::move(ifrt_req))
.OnReady(
absl::bind_front(HandleBatchResponse, session, x_flow_helper));
} else if (r.value()->has_destruct_array_response() ||
r.value()->has_check_future_response()) {
x_flow_helper.InstantActivity<XFlowHelper::kRecv>();
} else {
LOG(ERROR) << "Unrecognized response from server for batched request: "
<< (*r)->DebugString();
}
}
const std::shared_ptr<ClientSession> session_;
BatchedOps batched_;
absl::Mutex mu_;
bool finished_ ABSL_GUARDED_BY(mu_) = false;
std::optional<tsl::thread::ThreadPool> thread_pool_;
};
template <typename Req, typename Resp>
Future<std::shared_ptr<Resp>> DoRpc(RpcHelper::Batcher* batcher,
void (IfrtRequest::*set_req)(Req*),
Resp* (IfrtResponse::*get_resp)(),
bool (IfrtResponse::*has_resp)() const,
std::unique_ptr<Req> req,
absl::string_view profiling_name) {
auto ifrt_req = std::make_unique<IfrtRequest>();
(ifrt_req.get()->*set_req)(req.release());
XFlowHelper x_flow_helper(profiling_name);
auto traceme = x_flow_helper.Span<XFlowHelper::kSend>();
auto promise = Future<std::shared_ptr<Resp>>::CreatePromise();
auto on_ready = [promise, has_resp, get_resp, x_flow_helper](
absl::StatusOr<std::shared_ptr<IfrtResponse>> r) mutable {
auto traceme = x_flow_helper.Span<XFlowHelper::kRecv>();
if (!r.ok()) {
LOG_EVERY_N_SEC(ERROR, 10)
<< "Connection to IFRT proxy server was terminated: " << r.status();
promise.Set(absl::UnavailableError(
absl::StrCat("Connection to IFRT proxy server was terminated: ",
r.status().ToString())));
return;
}
std::shared_ptr<IfrtResponse> response = *std::move(r);
if (!response->has_response_metadata()) {
promise.Set(absl::InternalError(
absl::StrCat("IFRT server sent a message without metadata: ",
response->DebugString())));
return;
}
const absl::Status metadata_status =
tsl::StatusFromProto(response->response_metadata().status());
const bool has_expected_response = (response.get()->*has_resp)();
const auto has_some_response =
response->response_case() != IfrtResponse::RESPONSE_NOT_SET;
if (metadata_status.ok() && !has_some_response) {
promise.Set(absl::InternalError(
absl::StrCat("OK response with no actual response set: ",
response->DebugString())));
return;
}
if (!has_expected_response && has_some_response) {
promise.Set(absl::InternalError(absl::StrCat(
"Response with wrong type (expected ", Resp::GetDescriptor()->name(),
"): ", response->DebugString())));
return;
}
if (!has_some_response) {
promise.Set(metadata_status);
} else {
promise.Set(
std::make_shared<Resp>(*std::move((response.get()->*get_resp)())));
}
};
batcher->Immediate(std::move(ifrt_req)).OnReady(on_ready);
return Future<std::shared_ptr<Resp>>(promise);
}
#define RPC(METHOD, PROPERTY) \
RpcHelper::ResponseFuture<METHOD##Response> RpcHelper::METHOD( \
std::unique_ptr<METHOD##Request> req) { \
return DoRpc( \
batcher_.get(), &IfrtRequest::set_allocated_##PROPERTY##_request, \
&IfrtResponse::mutable_##PROPERTY##_response, \
&IfrtResponse::has_##PROPERTY##_response, std::move(req), #PROPERTY); \
}
RPC(Init, init);
RPC(GetDefaultDeviceAssignment, get_default_device_assignment);
RPC(CheckFuture, check_future);
RPC(CheckValueReady, check_value_ready);
RPC(MakeArrayFromHostBuffer, make_array_from_host_buffer);
RPC(AssembleArrayFromSingleDeviceArrays,
assemble_array_from_single_device_arrays);
RPC(RemapArrays, remap_arrays);
RPC(DisassembleIntoSingleDeviceArrays, disassemble_into_single_device_arrays);
RPC(CopyToHostBuffer, copy_to_host_buffer);
RPC(IsArrayDeleted, is_array_deleted);
RPC(DestructArray, destruct_array)
RPC(CopyArrays, copy_arrays);
RPC(Reshard, reshard);
RPC(FullyReplicatedShard, fully_replicated_shard);
RPC(DeleteArray, delete_array);
RPC(Compile, compile);
RPC(LoadedExecutableMetadata, loaded_executable_metadata);
RPC(LoadedExecutableExecute, loaded_executable_execute);
RPC(LoadedExecutableDelete, loaded_executable_delete);
RPC(LoadedExecutableIsDeleted, loaded_executable_is_deleted);
RPC(LoadedExecutableDestruct, loaded_executable_destruct);
RPC(LoadedHostCallbackPoll, loaded_host_callback_poll);
RPC(LoadedHostCallbackReturn, loaded_host_callback_return);
Future<> RpcHelper::CheckFuture(uint64_t handle) {
auto req = std::make_unique<CheckFutureRequest>();
req->set_future_handle(handle);
auto promise = Future<>::CreatePromise();
CheckFuture(std::move(req))
.OnReady(
[promise](absl::StatusOr<std::shared_ptr<CheckFutureResponse>>
response) mutable { promise.Set(response.status()); });
return Future<>(std::move(promise));
}
RpcHelper::RpcHelper(IfrtProxyVersion version,
std::shared_ptr<ClientSession> session)
: batcher_(std::make_unique<Batcher>(std::move(session))),
version_(std::move(version)) {}
RpcHelper::~RpcHelper() { Disconnect(); }
void RpcHelper::Batch(BatchOperation op, ArrayHandle handle) {
return batcher_->Batch(op, handle);
}
void RpcHelper::Disconnect() {
batcher_->Finish(absl::CancelledError("Disconnected by client"));
}
}
}
} | #include "xla/python/ifrt_proxy/client/rpc_helper.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt_proxy/client/client_session.h"
#include "xla/python/ifrt_proxy/client/mock_client_session.h"
#include "xla/python/ifrt_proxy/client/version.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/test_utils.h"
#include "xla/python/ifrt_proxy/common/types.h"
#include "xla/python/ifrt_proxy/common/types.pb.h"
#include "tsl/platform/test.h"
using ::testing::_;
using ::testing::UnorderedElementsAre;
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
constexpr absl::Duration kMaxFlushTimeout = absl::Seconds(10);
void PausePeriodicFlushes() {
struct AtomicBool {
absl::Mutex mu;
bool b = false;
};
auto called_at_least_once = std::make_shared<AtomicBool>();
auto periodic_flusher_pause_hook = [called_at_least_once](bool* paused) {
*paused = true;
absl::MutexLock l(&called_at_least_once->mu);
called_at_least_once->b = true;
};
TestHookSet(TestHookName::kRpcBatcherPausePeriodicFlush,
std::move(periodic_flusher_pause_hook));
absl::MutexLock l(&called_at_least_once->mu);
CHECK(called_at_least_once->mu.AwaitWithTimeout(
absl::Condition(&called_at_least_once->b), kMaxFlushTimeout));
}
void ResumePeriodicFlushes() {
TestHookClear(TestHookName::kRpcBatcherPausePeriodicFlush);
}
class RpcHelperTest : public ::testing::Test {
public:
RpcHelperTest() : requests_(kMaxFlushTimeout) {
session_ = std::make_shared<MockClientSession>();
IfrtProxyVersion version;
version.set_protocol_version(kClientMaxVersion);
rpc_helper_ = std::make_shared<RpcHelper>(version, session_);
EXPECT_CALL(*session_, Finish(_)).Times(1);
ON_CALL(*session_, Enqueue)
.WillByDefault([this](std::unique_ptr<IfrtRequest> req) {
requests_.Push(std::move(req));
return Future<ClientSession::Response>(
absl::InternalError("Fake error response"));
});
}
std::shared_ptr<MockClientSession> session_;
std::shared_ptr<RpcHelper> rpc_helper_;
TestQueue<std::unique_ptr<IfrtRequest>> requests_;
};
TEST_F(RpcHelperTest, BatchedPeriodicFlush) {
PausePeriodicFlushes();
rpc_helper_->Batch(RpcHelper::kDestructArray, ArrayHandle{1});
rpc_helper_->Batch(RpcHelper::kDeleteArray, ArrayHandle{2});
rpc_helper_->Batch(RpcHelper::kDestructArray, ArrayHandle{3});
rpc_helper_->Batch(RpcHelper::kDeleteArray, ArrayHandle{4});
rpc_helper_->Batch(RpcHelper::kDestructArray, ArrayHandle{9});
rpc_helper_->Batch(RpcHelper::kDeleteArray, ArrayHandle{8});
rpc_helper_->Batch(RpcHelper::kDestructArray, ArrayHandle{7});
rpc_helper_->Batch(RpcHelper::kDeleteArray, ArrayHandle{6});
ResumePeriodicFlushes();
auto delete_req = requests_.Pop();
auto destruct_req = requests_.Pop();
if (destruct_req->has_delete_array_request()) {
destruct_req.swap(delete_req);
}
EXPECT_THAT(destruct_req->destruct_array_request().array_handle(),
UnorderedElementsAre(1, 3, 9, 7));
EXPECT_THAT(delete_req->delete_array_request().array_handle(),
UnorderedElementsAre(2, 4, 8, 6));
}
TEST_F(RpcHelperTest, BatchedNoPeriodicFlush) {
PausePeriodicFlushes();
rpc_helper_->Batch(RpcHelper::kDestructArray, ArrayHandle{1});
rpc_helper_->Batch(RpcHelper::kDeleteArray, ArrayHandle{2});
rpc_helper_->Batch(RpcHelper::kDestructArray, ArrayHandle{3});
rpc_helper_->Batch(RpcHelper::kDeleteArray, ArrayHandle{4});
rpc_helper_->Batch(RpcHelper::kDestructArray, ArrayHandle{9});
rpc_helper_->Batch(RpcHelper::kDeleteArray, ArrayHandle{8});
rpc_helper_->Batch(RpcHelper::kDestructArray, ArrayHandle{7});
rpc_helper_->Batch(RpcHelper::kDeleteArray, ArrayHandle{6});
{
auto dummy_request = std::make_unique<CheckFutureRequest>();
dummy_request->set_future_handle(1);
rpc_helper_->CheckFuture(std::move(dummy_request));
requests_.AllowNonEmptyDestruction(true);
}
auto delete_req = requests_.Pop();
auto destruct_req = requests_.Pop();
if (destruct_req->has_delete_array_request()) {
destruct_req.swap(delete_req);
}
EXPECT_THAT(destruct_req->destruct_array_request().array_handle(),
UnorderedElementsAre(1, 3, 9, 7));
EXPECT_THAT(delete_req->delete_array_request().array_handle(),
UnorderedElementsAre(2, 4, 8, 6));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/client/rpc_helper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/client/rpc_helper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1f1605fc-b75a-439b-9e49-349ab0cdafed | cpp | tensorflow/tensorflow | rng_util | tensorflow/lite/kernels/rng_util.cc | tensorflow/lite/kernels/rng_util_test.cc | #include "tensorflow/lite/kernels/rng_util.h"
#include <array>
#include <cstdint>
namespace tflite {
namespace rng {
static constexpr uint32_t kThreefryParity = 0x1BD11BDA;
static constexpr uint64_t kPhiloxM4x32A = 0xD2511F53;
static constexpr uint64_t kPhiloxM4x32B = 0xCD9E8D57;
static constexpr uint32_t kPhiloxW32A = 0x9E3779B9;
static constexpr uint32_t kPhiloxW32B = 0xBB67AE85;
std::array<uint32_t, 2> Threefry2x32(uint32_t key_0, uint32_t key_1,
std::array<uint32_t, 2> ctr) {
constexpr std::array<std::array<int, 4>, 2> rotations{
std::array<int, 4>{13, 15, 26, 6}, std::array<int, 4>{17, 29, 16, 24}};
uint32_t key_2 = key_0 ^ key_1 ^ kThreefryParity;
ctr[0] += key_0;
ctr[1] += key_1;
auto apply_round = [&](int r, uint32_t ks0, uint32_t ks1, int b) {
for (int rot : rotations[r]) {
ctr[0] += ctr[1];
ctr[1] = (ctr[1] << rot) | (ctr[1] >> (32 - rot));
ctr[1] ^= ctr[0];
}
ctr[0] += ks0;
ctr[1] += ks1 + b;
};
apply_round(0, key_1, key_2, 1);
apply_round(1, key_2, key_0, 2);
apply_round(0, key_0, key_1, 3);
apply_round(1, key_1, key_2, 4);
apply_round(0, key_2, key_0, 5);
return ctr;
}
std::array<uint32_t, 4> Philox4x32(uint32_t key_0, uint32_t key_1,
std::array<uint32_t, 4> ctr) {
struct u32pair {
uint32_t low;
uint32_t high;
};
union prod {
u32pair hilo;
uint64_t prod;
};
for (int i = 0; i < 10; ++i) {
prod p0, p1;
p0.prod = kPhiloxM4x32A * static_cast<uint64_t>(ctr[0]);
p1.prod = kPhiloxM4x32B * static_cast<uint64_t>(ctr[2]);
ctr = {{p1.hilo.high ^ ctr[1] ^ key_0, p1.hilo.low,
p0.hilo.high ^ ctr[3] ^ key_1, p0.hilo.low}};
key_0 += kPhiloxW32A;
key_1 += kPhiloxW32B;
}
return ctr;
}
}
} | #include "tensorflow/lite/kernels/rng_util.h"
#include <array>
#include <cstdint>
#include <limits>
#include <gtest/gtest.h>
namespace tflite {
namespace {
using tflite::rng::Philox4x32;
using tflite::rng::Threefry2x32;
TEST(RngUtilTest, Threefry2x32Test) {
std::array<uint32_t, 2> results = Threefry2x32(0, 0, {0, 0});
std::array<uint32_t, 2> expected = {0x6B200159u, 0x99BA4EFEu};
ASSERT_EQ(results, expected);
uint32_t u32_max = std::numeric_limits<uint32_t>::max();
results = Threefry2x32(u32_max, u32_max, {u32_max, u32_max});
expected = {0x1CB996FCu, 0xBB002BE7u};
ASSERT_EQ(results, expected);
results = Threefry2x32(0x13198A2Eu, 0x03707344u, {0x243F6A88u, 0x85A308D3u});
expected = {0xC4923A9Cu, 0x483DF7A0u};
ASSERT_EQ(results, expected);
}
TEST(RngUtilTest, Philox4x32Test) {
std::array<uint32_t, 4> results = Philox4x32(0, 0, {0, 0, 0, 0});
std::array<uint32_t, 4> expected = {0x6627E8D5u, 0xE169C58Du, 0xBC57AC4Cu,
0x9B00DBD8u};
ASSERT_EQ(results, expected);
uint32_t u32_max = std::numeric_limits<uint32_t>::max();
results = Philox4x32(u32_max, u32_max, {u32_max, u32_max, u32_max, u32_max});
expected = {0x408F276Du, 0x41C83B0Eu, 0xA20BC7C6u, 0x6D5451FDu};
ASSERT_EQ(results, expected);
results = Philox4x32(0xA4093822u, 0x299F31D0u,
{0x243F6A88u, 0x85A308D3u, 0x13198A2Eu, 0x03707344u});
expected = {0xD16CFE09u, 0x94FDCCEBu, 0x5001E420u, 0x24126EA1u};
ASSERT_EQ(results, expected);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/rng_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/rng_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
39095eb1-c166-4ed7-a88c-6c222886e4c3 | cpp | google/arolla | dense_ops | arolla/dense_array/ops/dense_ops.h | arolla/dense_array/ops/dense_ops_test.cc | #ifndef AROLLA_DENSE_ARRAY_OPS_DENSE_OPS_H_
#define AROLLA_DENSE_ARRAY_OPS_DENSE_OPS_H_
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <type_traits>
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/dense_array/bitmap.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/ops/universal_dense_op.h"
#include "arolla/dense_array/ops/util.h"
#include "arolla/memory/buffer.h"
#include "arolla/memory/optional_value.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/util/meta.h"
#include "arolla/util/unit.h"
#include "arolla/util/view_types.h"
namespace arolla {
struct DenseOpFlags {
static constexpr int kRunOnMissing = 1 << 0;
static constexpr int kNoBitmapOffset = 1 << 1;
static constexpr int kNoSizeValidation = 1 << 2;
};
namespace dense_ops_internal {
template <class Fn>
struct SpanOp {
Fn fn;
template <class Res, class... Ts>
void operator()(absl::Span<Res> res, absl::Span<const Ts>... args) const {
for (size_t i = 0; i < res.size(); ++i) {
res[i] = fn(args[i]...);
}
}
};
template <class ResT, class SpanOpT>
class UnaryOpImpl {
public:
explicit UnaryOpImpl(
SpanOpT op, RawBufferFactory* buffer_factory = GetHeapBufferFactory())
: op_(op), buffer_factory_(buffer_factory) {}
template <class ArgT>
DenseArray<ResT> operator()(const DenseArray<ArgT>& arg) const {
typename Buffer<ResT>::Builder builder(arg.size(), buffer_factory_);
op_(builder.GetMutableSpan(), arg.values.span());
return {std::move(builder).Build(), arg.bitmap, arg.bitmap_bit_offset};
}
private:
SpanOpT op_;
RawBufferFactory* buffer_factory_;
};
template <class ResT, bool NoBitmapOffset, class SpanOpT>
class BinaryOpImpl {
public:
explicit BinaryOpImpl(
SpanOpT op, RawBufferFactory* buffer_factory = GetHeapBufferFactory())
: op_(op), buffer_factory_(buffer_factory) {}
template <class Arg1T, class Arg2T>
DenseArray<ResT> operator()(const DenseArray<Arg1T>& arg1,
const DenseArray<Arg2T>& arg2) const {
DCHECK_EQ(arg1.size(), arg2.size());
DCHECK(!NoBitmapOffset ||
(arg1.bitmap_bit_offset == 0 && arg2.bitmap_bit_offset == 0));
typename Buffer<ResT>::Builder builder(arg1.size(), buffer_factory_);
op_(builder.GetMutableSpan(), arg1.values.span(), arg2.values.span());
if (arg2.bitmap.empty()) {
return {std::move(builder).Build(), arg1.bitmap, arg1.bitmap_bit_offset};
} else if (arg1.bitmap.empty()) {
return {std::move(builder).Build(), arg2.bitmap, arg2.bitmap_bit_offset};
} else {
bitmap::RawBuilder bitmap_builder(
std::min(arg1.bitmap.size(), arg2.bitmap.size()), buffer_factory_);
int res_bit_offset = 0;
if constexpr (NoBitmapOffset) {
bitmap::Intersect(arg1.bitmap, arg2.bitmap,
bitmap_builder.GetMutableSpan());
} else {
res_bit_offset =
std::min(arg1.bitmap_bit_offset, arg2.bitmap_bit_offset);
bitmap::Intersect(arg1.bitmap, arg2.bitmap, arg1.bitmap_bit_offset,
arg2.bitmap_bit_offset,
bitmap_builder.GetMutableSpan());
}
return {std::move(builder).Build(), std::move(bitmap_builder).Build(),
res_bit_offset};
}
}
private:
SpanOpT op_;
RawBufferFactory* buffer_factory_;
};
template <class ResT, class SpanOpT>
class SimpleOpImpl {
public:
explicit SimpleOpImpl(
SpanOpT op, RawBufferFactory* buffer_factory = GetHeapBufferFactory())
: op_(op), buffer_factory_(buffer_factory) {}
template <class Arg1T, class... ArgsT>
DenseArray<ResT> operator()(const DenseArray<Arg1T>& arg1,
const DenseArray<ArgsT>&... args) const {
DCHECK(((arg1.size() == args.size()) && ... && true));
DCHECK(arg1.bitmap_bit_offset == 0 &&
((args.bitmap_bit_offset == 0) && ... && true));
typename Buffer<ResT>::Builder builder(arg1.size(), buffer_factory_);
op_(builder.GetMutableSpan(), arg1.values.span(), args.values.span()...);
if ((args.bitmap.empty() && ... && true)) {
return {std::move(builder).Build(), arg1.bitmap};
} else {
size_t bitmap_size = bitmap::BitmapSize(arg1.size());
bitmap::RawBuilder bitmap_builder(bitmap_size, buffer_factory_);
bitmap::Word* bitmap = bitmap_builder.GetMutableSpan().begin();
bool initialized = false;
auto intersect_fn = [&](const bitmap::Bitmap& b) {
if (b.empty()) return;
const bitmap::Word* ptr = b.begin();
if (initialized) {
for (int64_t i = 0; i < bitmap_size; ++i) {
bitmap[i] &= ptr[i];
}
} else {
std::memcpy(bitmap, ptr, bitmap_size * sizeof(bitmap::Word));
initialized = true;
}
};
intersect_fn(arg1.bitmap);
(intersect_fn(args.bitmap), ...);
return {std::move(builder).Build(), std::move(bitmap_builder).Build()};
}
}
private:
SpanOpT op_;
RawBufferFactory* buffer_factory_;
};
template <class ResT, class Op>
class OpWithSizeValidation {
public:
explicit OpWithSizeValidation(Op op) : op_(std::move(op)) {}
template <class... Args>
absl::StatusOr<DenseArray<ResT>> operator()(const Args&... args) const {
if (!AreSizesEqual(args...)) {
return SizeMismatchError({args.size()...});
}
return op_(args...);
}
private:
template <class A, class... As>
static bool AreSizesEqual(const A& a, const As&... as) {
return ((a.size() == as.size()) && ...);
}
Op op_;
};
template <class TypeList>
struct ArgsAnalyzer {};
template <class... Ts>
struct ArgsAnalyzer<meta::type_list<Ts...>> {
static constexpr bool kHasOptionalArg =
(meta::is_wrapped_with<OptionalValue, Ts>::value || ...);
static constexpr bool kHasStringArg =
(std::is_same_v<view_type_t<Ts>, absl::string_view> || ...);
static constexpr bool kHasUnitArg =
(std::is_same_v<view_type_t<Ts>, Unit> || ...);
};
template <class Fn, int flags>
struct ImplChooser {
static constexpr bool kRunOnMissing = flags & DenseOpFlags::kRunOnMissing;
static constexpr bool kNoBitmapOffset = flags & DenseOpFlags::kNoBitmapOffset;
static constexpr bool kNoSizeValidation =
flags & DenseOpFlags::kNoSizeValidation;
static constexpr int kArgCount = meta::function_traits<Fn>::arity;
using args = ArgsAnalyzer<typename meta::function_traits<Fn>::arg_types>;
using fn_return_t = typename meta::function_traits<Fn>::return_type;
static constexpr bool kComplicatedFn =
meta::is_wrapped_with<absl::StatusOr, fn_return_t>::value ||
meta::is_wrapped_with<OptionalValue, fn_return_t>::value ||
std::is_same_v<view_type_t<fn_return_t>, absl::string_view> ||
args::kHasOptionalArg || args::kHasStringArg || args::kHasUnitArg ||
!(flags & DenseOpFlags::kRunOnMissing);
static constexpr bool kWithSizeValidationOp =
!kNoSizeValidation && kArgCount > 1;
static constexpr bool kCanUseUnaryOp = kArgCount == 1 && !kComplicatedFn;
static constexpr bool kCanUseBinaryOp =
kArgCount == 2 && !kComplicatedFn && !kWithSizeValidationOp;
static constexpr bool kCanUseSimpleOp = kArgCount > 2 && !kComplicatedFn &&
kNoBitmapOffset &&
!kWithSizeValidationOp;
};
template <class Fn, class ResT, int flags, class = void>
struct ImplSwitcher {
using Chooser = ImplChooser<Fn, flags>;
using Impl = UniversalDenseOp<Fn, ResT, !Chooser::kRunOnMissing,
Chooser::kNoBitmapOffset>;
static Impl Create(Fn fn, RawBufferFactory* buffer_factory) {
return Impl(fn, buffer_factory);
}
};
template <class Fn, class ResT, int flags>
struct ImplSwitcher<
Fn, ResT, flags,
std::enable_if_t<ImplChooser<Fn, flags>::kCanUseUnaryOp, void>> {
using Impl = UnaryOpImpl<ResT, SpanOp<Fn>>;
static Impl Create(Fn fn, RawBufferFactory* buffer_factory) {
return Impl({fn}, buffer_factory);
}
};
template <class Fn, class ResT, int flags>
struct ImplSwitcher<
Fn, ResT, flags,
std::enable_if_t<ImplChooser<Fn, flags>::kCanUseBinaryOp, void>> {
using Chooser = ImplChooser<Fn, flags>;
using Impl = BinaryOpImpl<ResT, Chooser::kNoBitmapOffset, SpanOp<Fn>>;
static Impl Create(Fn fn, RawBufferFactory* buffer_factory) {
return Impl({fn}, buffer_factory);
}
};
template <class Fn, class ResT, int flags>
struct ImplSwitcher<
Fn, ResT, flags,
std::enable_if_t<ImplChooser<Fn, flags>::kCanUseSimpleOp, void>> {
using Chooser = ImplChooser<Fn, flags>;
using Impl = SimpleOpImpl<ResT, SpanOp<Fn>>;
static Impl Create(Fn fn, RawBufferFactory* buffer_factory) {
return Impl({fn}, buffer_factory);
}
};
template <class Fn, class ResT, int flags>
struct ImplSwitcher<
Fn, ResT, flags,
std::enable_if_t<ImplChooser<Fn, flags>::kWithSizeValidationOp, void>> {
using BaseSwitcher =
ImplSwitcher<Fn, ResT, flags | DenseOpFlags::kNoSizeValidation>;
using Impl = OpWithSizeValidation<ResT, typename BaseSwitcher::Impl>;
template <class... Args>
static Impl Create(Args&&... args) {
return Impl(BaseSwitcher::Create(args...));
}
};
template <class Fn>
using result_base_t = strip_optional_t<meta::strip_template_t<
absl::StatusOr, typename meta::function_traits<Fn>::return_type>>;
}
template <class Fn, class ResT = dense_ops_internal::result_base_t<Fn>,
int flags = 0>
using DenseOp =
typename dense_ops_internal::ImplSwitcher<Fn, ResT, flags>::Impl;
template <class Fn, class ResT = dense_ops_internal::result_base_t<Fn>>
DenseOp<Fn, ResT> CreateDenseOp(
Fn fn, RawBufferFactory* buf_factory = GetHeapBufferFactory()) {
return dense_ops_internal::ImplSwitcher<Fn, ResT, 0>::Create(fn, buf_factory);
}
template <int flags, class Fn,
class ResT = dense_ops_internal::result_base_t<Fn>>
DenseOp<Fn, ResT, flags> CreateDenseOp(
Fn fn, RawBufferFactory* buf_factory = GetHeapBufferFactory()) {
return dense_ops_internal::ImplSwitcher<Fn, ResT, flags>::Create(fn,
buf_factory);
}
template <class ResT, class SpanOpT>
auto CreateDenseUnaryOpFromSpanOp(
SpanOpT op, RawBufferFactory* buf_factory = GetHeapBufferFactory()) {
return dense_ops_internal::UnaryOpImpl<ResT, SpanOpT>(op, buf_factory);
}
template <class ResT, class SpanOpT>
auto CreateDenseBinaryOpFromSpanOp(
SpanOpT op, RawBufferFactory* buf_factory = GetHeapBufferFactory()) {
using ImplBase = dense_ops_internal::BinaryOpImpl<ResT, false, SpanOpT>;
using Impl = dense_ops_internal::OpWithSizeValidation<ResT, ImplBase>;
return Impl(ImplBase(op, buf_factory));
}
template <class ResT, int flags, class SpanOpT>
auto CreateDenseBinaryOpFromSpanOp(
SpanOpT op, RawBufferFactory* buf_factory = GetHeapBufferFactory()) {
static constexpr bool kNoBitmapOffset = flags & DenseOpFlags::kNoBitmapOffset;
static constexpr bool kNoSizeValidation =
flags & DenseOpFlags::kNoSizeValidation;
using ImplBase =
dense_ops_internal::BinaryOpImpl<ResT, kNoBitmapOffset, SpanOpT>;
if constexpr (kNoSizeValidation) {
return ImplBase(op, buf_factory);
} else {
using Impl = dense_ops_internal::OpWithSizeValidation<ResT, ImplBase>;
return Impl(ImplBase(op, buf_factory));
}
}
template <class Fn, class T, class... As>
absl::Status DenseArraysForEach(Fn&& fn, const DenseArray<T>& arg0,
const As&... args) {
if (!((arg0.size() == args.size()) && ...)) {
return SizeMismatchError({arg0.size(), args.size()...});
}
using fn_arg_types =
typename meta::function_traits<std::decay_t<Fn>>::arg_types;
using value_types = meta::tail_t<meta::tail_t<fn_arg_types>>;
dense_ops_internal::DenseOpsUtil<value_types>::IterateFromZero(
fn, arg0.size(), arg0, args...);
return absl::OkStatus();
}
template <class Fn, class T, class... As>
absl::Status DenseArraysForEachPresent(Fn&& fn, const DenseArray<T>& arg0,
const As&... args) {
if (!((arg0.size() == args.size()) && ...)) {
return SizeMismatchError({arg0.size(), args.size()...});
}
using fn_arg_types =
typename meta::function_traits<std::decay_t<Fn>>::arg_types;
using value_types = meta::tail_t<fn_arg_types>;
dense_ops_internal::DenseOpsUtil<value_types>::IterateFromZero(
[&fn](int64_t id, bool valid, auto&&... vals) {
if (valid) {
fn(id, std::forward<decltype(vals)>(vals)...);
}
},
arg0.size(), arg0, args...);
return absl::OkStatus();
}
}
#endif | #include "arolla/dense_array/ops/dense_ops.h"
#include <cstdint>
#include <optional>
#include <tuple>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "arolla/dense_array/bitmap.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/memory/buffer.h"
#include "arolla/memory/optional_value.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qexpr/operators/math/batch_arithmetic.h"
#include "arolla/util/bytes.h"
#include "arolla/util/text.h"
namespace arolla::testing {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::dense_ops_internal::BinaryOpImpl;
using ::arolla::dense_ops_internal::OpWithSizeValidation;
using ::arolla::dense_ops_internal::SimpleOpImpl;
using ::arolla::dense_ops_internal::SpanOp;
using ::arolla::dense_ops_internal::UnaryOpImpl;
using ::arolla::dense_ops_internal::UniversalDenseOp;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
struct BoolOrFn {
bool operator()(bool a, bool b) const { return a || b; }
};
struct AddFn {
int operator()(int a, int b) const { return a + b; }
};
struct UnionAddFn {
OptionalValue<int> operator()(OptionalValue<int> a,
OptionalValue<int> b) const {
return {a.present || b.present,
(a.present ? a.value : 0) + (b.present ? b.value : 0)};
}
};
TEST(UnaryDenseOp, PlusOne) {
DenseArray<int64_t> arr = CreateDenseArray<int64_t>({1, {}, 2, 3});
auto fn = [](int64_t a) -> int { return a + 1; };
UnaryOpImpl<int, SpanOp<decltype(fn)>> op =
CreateDenseOp<DenseOpFlags::kRunOnMissing>(fn);
DenseArray<int> res = op(arr);
EXPECT_EQ(res.bitmap.span().data(), arr.bitmap.span().data());
EXPECT_THAT(op(arr), ElementsAre(2, std::nullopt, 3, 4));
}
TEST(BinaryDenseOp, AddFullDense) {
DenseArray<int> arr1{CreateBuffer<int>({1, 4, 2, 3})};
DenseArray<int> arr2 = CreateDenseArray<int>({3, 6, {}, 2});
BinaryOpImpl<int, false, SpanOp<AddFn>> op =
CreateDenseOp<DenseOpFlags::kNoSizeValidation |
DenseOpFlags::kRunOnMissing>(AddFn());
{
DenseArray<int> res = op(arr1, arr2);
EXPECT_EQ(res.bitmap.span().data(), arr2.bitmap.span().data());
EXPECT_THAT(res, ElementsAre(4, 10, std::nullopt, 5));
}
{
DenseArray<int> res = op(arr2, arr1);
EXPECT_EQ(res.bitmap.span().data(), arr2.bitmap.span().data());
EXPECT_THAT(res, ElementsAre(4, 10, std::nullopt, 5));
}
}
TEST(BinaryDenseOp, AddDenseDense) {
DenseArray<int> arr1 = CreateDenseArray<int>({1, {}, 2, 3});
DenseArray<int> arr2 = CreateDenseArray<int>({3, 6, {}, 2});
BinaryOpImpl<int, false, SpanOp<AddFn>> op =
CreateDenseOp<DenseOpFlags::kNoSizeValidation |
DenseOpFlags::kRunOnMissing>(AddFn());
EXPECT_THAT(op(arr1, arr2), ElementsAre(4, std::nullopt, std::nullopt, 5));
}
TEST(BinaryDenseOp, BoolOrDenseDenseOnUninitializedMemory) {
UnsafeArenaBufferFactory factory(1024);
Buffer<bool>::Builder builder1(bitmap::kWordBitCount * 3, &factory);
builder1.GetMutableSpan()[0] = true;
DenseArray<bool> arr1{std::move(builder1).Build(),
bitmap::Bitmap::Create({1, 0, 0})};
Buffer<bool>::Builder builder2(bitmap::kWordBitCount * 3, &factory);
builder2.GetMutableSpan()[1] = true;
DenseArray<bool> arr2{std::move(builder2).Build(),
bitmap::Bitmap::Create({2, 0, 0})};
BinaryOpImpl<bool, false, SpanOp<BoolOrFn>> op =
CreateDenseOp<DenseOpFlags::kNoSizeValidation |
DenseOpFlags::kRunOnMissing>(BoolOrFn());
EXPECT_THAT(op(arr1, arr2), ElementsAreArray(std::vector<OptionalValue<bool>>(
bitmap::kWordBitCount * 3, std::nullopt)));
}
TEST(BinaryDenseOp, EigenAdd) {
DenseArray<int> arr1 = CreateDenseArray<int>({1, {}, 2, 3});
DenseArray<int> arr2 = CreateDenseArray<int>({3, 6, {}, 2});
auto op = CreateDenseBinaryOpFromSpanOp<int>(BatchAdd<int>());
EXPECT_THAT(op(arr1, arr2),
IsOkAndHolds(ElementsAre(4, std::nullopt, std::nullopt, 5)));
EXPECT_THAT(
op(arr1, CreateDenseArray<int>({3})),
StatusIs(absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("argument sizes mismatch: (4, 1)")));
}
TEST(BinaryDenseOp, FromSpanOpWithoutSizeValidation) {
DenseArray<int> arr1 = CreateDenseArray<int>({1, {}, 2, 3});
DenseArray<int> arr2 = CreateDenseArray<int>({3, 6, {}, 2});
auto op = CreateDenseBinaryOpFromSpanOp<int, DenseOpFlags::kNoSizeValidation>(
BatchAdd<int>());
EXPECT_THAT(op(arr1, arr2), ElementsAre(4, std::nullopt, std::nullopt, 5));
}
TEST(BinaryDenseOp, BitOffset) {
DenseArray<int> arr1 = CreateDenseArray<int>({1, {false, 2}, 3, 4});
DenseArray<int> arr2 = CreateDenseArray<int>({5, 5, {false, 5}, 5});
{
BinaryOpImpl<int, true, SpanOp<AddFn>> op =
CreateDenseOp<DenseOpFlags::kNoBitmapOffset |
DenseOpFlags::kNoSizeValidation |
DenseOpFlags::kRunOnMissing>(AddFn());
auto res = op(arr1, arr2);
EXPECT_EQ(res.bitmap_bit_offset, arr1.bitmap_bit_offset);
EXPECT_THAT(res, ElementsAre(6, std::nullopt, std::nullopt, 9));
}
arr1.bitmap_bit_offset = 1;
EXPECT_THAT(arr1, ElementsAre(std::nullopt, 2, 3, std::nullopt));
{
BinaryOpImpl<int, false, SpanOp<AddFn>> op =
CreateDenseOp<DenseOpFlags::kNoSizeValidation |
DenseOpFlags::kRunOnMissing>(AddFn());
auto res = op(arr1, arr2);
EXPECT_THAT(res, ElementsAre(std::nullopt, 7, std::nullopt, std::nullopt));
}
}
TEST(BinaryDenseOp, DifferentTypes) {
DenseArray<int> arr1 = CreateDenseArray<int>({1, {}, 2, 3});
DenseArray<float> arr2 = CreateDenseArray<float>({3.f, 6.f, {}, 2.f});
auto fn = [](int a, float b) { return a > b; };
BinaryOpImpl<bool, false, SpanOp<decltype(fn)>> op =
CreateDenseOp<DenseOpFlags::kNoSizeValidation |
DenseOpFlags::kRunOnMissing>(fn);
EXPECT_THAT(op(arr1, arr2),
ElementsAre(false, std::nullopt, std::nullopt, true));
}
TEST(BinaryDenseOp, SizeValidation) {
auto fn = [](int a, float b) { return a > b; };
using ImplBase = BinaryOpImpl<bool, false, SpanOp<decltype(fn)>>;
using Impl = OpWithSizeValidation<bool, ImplBase>;
Impl op = CreateDenseOp<DenseOpFlags::kRunOnMissing>(fn);
{
DenseArray<int> arr1 = CreateDenseArray<int>({1, {}, 2, 3});
DenseArray<float> arr2 = CreateDenseArray<float>({3.f, 6.f, {}, 2.f});
EXPECT_THAT(op(arr1, arr2), IsOkAndHolds(ElementsAre(false, std::nullopt,
std::nullopt, true)));
}
{
DenseArray<int> arr1 = CreateDenseArray<int>({1, {}, 2, 3});
DenseArray<float> arr2 = CreateDenseArray<float>({3.f, 6.f, {}});
EXPECT_THAT(
op(arr1, arr2),
StatusIs(absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("argument sizes mismatch: (4, 3)")));
}
}
TEST(SimpleDenseOp, Add3) {
auto add3 = [](int a, int b, int c) { return a + b + c; };
DenseArray<int> arr1 = CreateDenseArray<int>({1, {}, 2, 3});
DenseArray<int> arr2 = CreateDenseArray<int>({3, 6, {}, 2});
DenseArray<int> arr3 = CreateDenseArray<int>({4, 3, 2, 1});
SimpleOpImpl<int, SpanOp<decltype(add3)>> op =
CreateDenseOp<DenseOpFlags::kNoBitmapOffset |
DenseOpFlags::kNoSizeValidation |
DenseOpFlags::kRunOnMissing>(add3);
EXPECT_THAT(op(arr1, arr2, arr3),
ElementsAre(8, std::nullopt, std::nullopt, 6));
EXPECT_THAT(op(arr1, arr3, arr2),
ElementsAre(8, std::nullopt, std::nullopt, 6));
EXPECT_THAT(op(arr3, arr1, arr2),
ElementsAre(8, std::nullopt, std::nullopt, 6));
EXPECT_THAT(op(arr3, arr3, arr3), ElementsAre(12, 9, 6, 3));
}
TEST(SimpleDenseOp, SizeValidation) {
auto add3 = [](int a, int b, int c) { return a + b + c; };
DenseArray<int> arr1 = CreateDenseArray<int>({1, {}, 2, 3});
DenseArray<int> arr2 = CreateDenseArray<int>({3, 6, {}, 2});
DenseArray<int> arr3 = CreateDenseArray<int>({4, 3, 2, 1});
using BaseImpl = SimpleOpImpl<int, SpanOp<decltype(add3)>>;
using Op = OpWithSizeValidation<int, BaseImpl>;
Op op = CreateDenseOp<DenseOpFlags::kNoBitmapOffset |
DenseOpFlags::kRunOnMissing>(add3);
EXPECT_THAT(op(arr1, arr2, arr3),
IsOkAndHolds(ElementsAre(8, std::nullopt, std::nullopt, 6)));
EXPECT_THAT(
op(arr1, CreateDenseArray<int>({1, {}}), arr3),
StatusIs(absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("argument sizes mismatch: (4, 2, 4)")));
}
TEST(UniversalDenseOp, Add) {
DenseArray<int> arr1 = CreateDenseArray<int>({1, {}, 2, 3});
DenseArray<int> arr2 = CreateDenseArray<int>({3, 6, {}, 2});
auto res = UniversalDenseOp<AddFn, int, false, false>(AddFn())(arr1, arr2);
EXPECT_THAT(res, ElementsAre(4, std::nullopt, std::nullopt, 5));
}
TEST(UniversalDenseOp, SkipMissed) {
DenseArray<int> arr1 = CreateDenseArray<int>({1, {}, 2, 3});
DenseArray<int> arr2 = CreateDenseArray<int>({3, 6, {}, 2});
UniversalDenseOp<AddFn, int, true, false> op =
CreateDenseOp<DenseOpFlags::kNoSizeValidation>(AddFn());
EXPECT_THAT(op(arr1, arr2), ElementsAre(4, std::nullopt, std::nullopt, 5));
}
TEST(UniversalDenseOp, UnionAdd) {
DenseArray<int> arr1 = CreateDenseArray<int>({1, {}, {}, 3});
DenseArray<int> arr2 = CreateDenseArray<int>({3, 6, {}, {}});
UniversalDenseOp<UnionAddFn, int, false, false> op =
CreateDenseOp<DenseOpFlags::kNoSizeValidation |
DenseOpFlags::kRunOnMissing>(UnionAddFn());
EXPECT_THAT(op(arr1, arr2), ElementsAre(4, 6, std::nullopt, 3));
auto op_with_size_validation = CreateDenseOp(UnionAddFn());
EXPECT_THAT(op_with_size_validation(arr1, arr2),
IsOkAndHolds(ElementsAre(4, 6, std::nullopt, 3)));
EXPECT_THAT(
op_with_size_validation(CreateDenseArray<int>({3, 6}), arr2),
StatusIs(absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("argument sizes mismatch: (2, 4)")));
}
TEST(UniversalDenseOp, BitOffset) {
DenseArray<int> arr1 = CreateDenseArray<int>({1, {false, 2}, 3, 4});
DenseArray<int> arr2 = CreateDenseArray<int>({{}, 5, 5, 5});
{
auto res = UniversalDenseOp<AddFn, int, false, true>(AddFn())(arr1, arr2);
EXPECT_THAT(res, ElementsAre(std::nullopt, std::nullopt, 8, 9));
}
arr1.bitmap_bit_offset = 1;
EXPECT_THAT(arr1, ElementsAre(std::nullopt, 2, 3, std::nullopt));
{
auto res = UniversalDenseOp<AddFn, int, false, false>(AddFn())(arr1, arr2);
EXPECT_THAT(res, ElementsAre(std::nullopt, 7, 8, std::nullopt));
}
}
TEST(UniversalDenseOp, DifferentSizesAndOffsets) {
for (int64_t item_count = 0; item_count < 1024; ++item_count) {
int bit_offset1 = item_count % bitmap::kWordBitCount;
int bit_offset2 = (4096 - item_count * 3) % bitmap::kWordBitCount;
int64_t bitmap_size1 = bitmap::BitmapSize(item_count + bit_offset1);
int64_t bitmap_size2 = bitmap::BitmapSize(item_count + bit_offset2);
Buffer<int>::Builder values1(item_count);
Buffer<int>::Builder values2(item_count);
Buffer<bitmap::Word>::Builder presence1(bitmap_size1);
Buffer<bitmap::Word>::Builder presence2(bitmap_size2);
for (int i = 0; i < item_count; ++i) {
values1.Set(i, i);
values2.Set(i, 2 * i);
}
for (int i = 0; i < bitmap_size1; ++i) {
presence1.Set(i, 0xfff0ff0f + i);
}
for (int i = 0; i < bitmap_size2; ++i) {
presence2.Set(i, 0xfff0ff0f - i);
}
DenseArray<int> arr1{std::move(values1).Build(),
std::move(presence1).Build(), bit_offset1};
DenseArray<int> arr2{std::move(values2).Build(),
std::move(presence2).Build(), bit_offset2};
ASSERT_OK_AND_ASSIGN(auto res, CreateDenseOp(UnionAddFn())(arr1, arr2));
for (int64_t i = 0; i < item_count; ++i) {
EXPECT_EQ(res[i], UnionAddFn()(arr1[i], arr2[i]));
}
}
}
TEST(UniversalDenseOp, String) {
DenseArray<absl::string_view> arg_str;
DenseArray<int> arg_pos;
DenseArray<int> arg_len;
arg_str.values = CreateBuffer<absl::string_view>(
{"Hello, world!", "Some other string", "?", "abaca",
"Gooooooooooooooooooooooooooooooooooooooooooogle"});
arg_pos.values = CreateBuffer<int>({3, 5, 1, 0, 0});
arg_len.values = CreateBuffer<int>({5, 4, 0, 3, 4});
auto op = CreateDenseOp([](absl::string_view str, int pos, int len) {
return str.substr(pos, len);
});
ASSERT_OK_AND_ASSIGN(DenseArray<absl::string_view> res,
op(arg_str, arg_pos, arg_len));
EXPECT_EQ(res.values[0], "lo, w");
EXPECT_EQ(res.values[1], "othe");
EXPECT_EQ(res.values[2], "");
EXPECT_EQ(res.values[3], "aba");
EXPECT_EQ(res.values[4], "Gooo");
}
TEST(UniversalDenseOp, Text) {
DenseArray<Text> arg_str;
DenseArray<int> arg_pos;
DenseArray<int> arg_len;
arg_str.values = CreateBuffer<absl::string_view>(
{"Hello, world!", "Some other string", "?", "abaca",
"Gooooooooooooooooooooooooooooooooooooooooooogle"});
arg_pos.values = CreateBuffer<int>({3, 5, 1, 0, 0});
arg_len.values = CreateBuffer<int>({5, 4, 0, 3, 4});
auto op = CreateDenseOp([](absl::string_view str, int pos, int len) {
return str.substr(pos, len);
});
ASSERT_OK_AND_ASSIGN(DenseArray<absl::string_view> res,
op(arg_str, arg_pos, arg_len));
EXPECT_EQ(res.values[0], "lo, w");
EXPECT_EQ(res.values[1], "othe");
EXPECT_EQ(res.values[2], "");
EXPECT_EQ(res.values[3], "aba");
EXPECT_EQ(res.values[4], "Gooo");
}
TEST(UniversalDenseOp, OutputText) {
DenseArray<Text> arg_str;
DenseArray<int> arg_pos;
DenseArray<int> arg_len;
arg_str.values = CreateBuffer<absl::string_view>(
{"Hello, world!", "Some other string", "?", "abaca",
"Gooooooooooooooooooooooooooooooooooooooooooogle"});
arg_pos.values = CreateBuffer<int>({3, 5, 1, 0, 0});
arg_len.values = CreateBuffer<int>({5, 4, 0, 3, 4});
auto op = CreateDenseOp<DenseOpFlags::kNoSizeValidation>(
[](absl::string_view str, int pos, int len) {
return Text(str.substr(pos, len));
});
DenseArray<Text> res = op(arg_str, arg_pos, arg_len);
EXPECT_EQ(res.values[0], "lo, w");
EXPECT_EQ(res.values[1], "othe");
EXPECT_EQ(res.values[2], "");
EXPECT_EQ(res.values[3], "aba");
EXPECT_EQ(res.values[4], "Gooo");
}
TEST(UniversalDenseOp, ForwardErrorStatus) {
DenseArray<int> arr = CreateDenseArray<int>({1, {}, 2, 3});
auto status = absl::InternalError("it is an error");
auto op1 =
CreateDenseOp([status](int a) -> absl::StatusOr<int> { return status; });
auto op2 =
CreateDenseOp([status](int a) -> absl::StatusOr<OptionalValue<float>> {
return status;
});
auto op3 = CreateDenseOp<DenseOpFlags::kNoSizeValidation>(
[status](int a) -> absl::StatusOr<int> { return status; });
auto op4 = CreateDenseOp([](int a) -> absl::StatusOr<int> { return a + 1; });
EXPECT_EQ(op1(arr).status(), status);
EXPECT_EQ(op2(arr).status(), status);
EXPECT_EQ(op3(arr).status(), status);
ASSERT_OK_AND_ASSIGN(auto res, op4(arr));
EXPECT_THAT(res, ElementsAre(2, std::nullopt, 3, 4));
}
TEST(DenseOps, DenseArraysForEach) {
using OB = ::arolla::OptionalValue<::arolla::Bytes>;
DenseArray<float> af = CreateDenseArray<float>(
{std::nullopt, 3.0, std::nullopt, 2.0, std::nullopt});
DenseArray<::arolla::Bytes> as = CreateDenseArray<::arolla::Bytes>(
{OB("abc"), OB(std::nullopt), OB("bca"), OB("def"), OB("cab")});
DenseArray<int> ai =
CreateDenseArray<int>({std::nullopt, 10, 20, 30, std::nullopt});
struct Row {
int64_t id;
OptionalValue<float> f;
absl::string_view s;
int i;
auto AsTuple() const { return std::make_tuple(id, f, s, i); }
};
{
std::vector<Row> rows;
auto fn = [&](int64_t id, bool valid, OptionalValue<float> f,
absl::string_view s, int i) {
if (valid) {
rows.push_back({id, f, s, i});
}
};
EXPECT_THAT(
DenseArraysForEach(fn, af, as, DenseArray<int>()),
StatusIs(absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("argument sizes mismatch: (5, 5, 0)")));
EXPECT_OK(DenseArraysForEach(fn, af, as, ai));
ASSERT_EQ(rows.size(), 2);
EXPECT_EQ(rows[0].AsTuple(), (Row{2, {}, "bca", 20}).AsTuple());
EXPECT_EQ(rows[1].AsTuple(), (Row{3, 2.f, "def", 30}).AsTuple());
}
{
std::vector<Row> rows;
auto fn = [&](int64_t id, OptionalValue<float> f, absl::string_view s,
int i) { rows.push_back({id, f, s, i}); };
EXPECT_THAT(
DenseArraysForEachPresent(fn, af, as, DenseArray<int>()),
StatusIs(absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("argument sizes mismatch: (5, 5, 0)")));
EXPECT_OK(DenseArraysForEachPresent(fn, af, as, ai));
ASSERT_EQ(rows.size(), 2);
EXPECT_EQ(rows[0].AsTuple(), (Row{2, {}, "bca", 20}).AsTuple());
EXPECT_EQ(rows[1].AsTuple(), (Row{3, 2.f, "def", 30}).AsTuple());
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/dense_array/ops/dense_ops.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/dense_array/ops/dense_ops_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
f8e4857a-743f-4562-bda6-e2d2ac928b31 | cpp | tensorflow/tensorflow | ifrt_loaded_variable_utils | tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.cc | tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils_test.cc | #include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/future.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tensorflow/core/framework/resource_handle.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/ifrt/sharding_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tfrt/host_context/concurrent_work_queue.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
absl::StatusOr<tsl::RCReference<xla::ifrt::Array>> LoadIfrtVariable(
std::shared_ptr<xla::ifrt::Client> ifrt_client,
const tsl::thread::ThreadPool& thread_pool,
const tensorflow::Tensor& variable,
const VariableDeviceShardingConfig& sharding_config) {
return tensorflow::ifrt_serving::MakeArrayFromTensor(
*ifrt_client, variable, sharding_config.device_ids,
sharding_config.hlo_sharding, thread_pool);
}
}
absl::StatusOr<ifrt_serving::DtypeAndShape> GetDtypeAndShape(
const ResourceHandle& resource_handle) {
const std::vector<DtypeAndPartialTensorShape>& dtype_and_partial_shapes =
resource_handle.dtypes_and_shapes();
if (dtype_and_partial_shapes.size() != 1) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected 1 dtype and shape, got ", dtype_and_partial_shapes.size()));
}
ifrt_serving::DtypeAndShape dtype_and_shape;
if (!dtype_and_partial_shapes.front().shape.AsTensorShape(
&dtype_and_shape.shape)) {
return absl::InvalidArgumentError(
absl::StrCat("Failed to convert partial shape to full tensor shape: ",
dtype_and_partial_shapes.front().shape.DebugString()));
}
dtype_and_shape.dtype = dtype_and_partial_shapes.front().dtype;
return dtype_and_shape;
}
std::string GetRuntimeNameFromVarHandle(const ResourceHandle& handle) {
return absl::StrCat(handle.container(), "__", handle.name());
}
absl::Status AsyncLoadRestoredTensorAsIfrtLoadedVariable(
absl::string_view runtime_name,
std::shared_ptr<xla::ifrt::Client> ifrt_client,
const tsl::thread::ThreadPool& thread_pool,
const ifrt_serving::IfrtRestoreTensorRegistry& ifrt_restore_tensor_registry,
ifrt_serving::IfrtLoadedVariableRegistry& ifrt_loaded_variable_registry,
tfrt::ConcurrentWorkQueue* checkpoint_loader_queue,
const VariableDeviceShardingConfig& sharding_config) {
IfrtLoadedVariableRegistry::Key loaded_variable_key{
.device_ids = sharding_config.device_ids,
.input_name = std::string(runtime_name),
.hlo_sharding = sharding_config.hlo_sharding,
};
if (ifrt_loaded_variable_registry.GetLoadedVariable(loaded_variable_key)
.ok()) {
VLOG(1) << "Found alread registered variable for " << runtime_name;
return absl::OkStatus();
}
xla::ifrt::Future<tensorflow::Tensor> restored_tensor_future =
ifrt_restore_tensor_registry.GetRestoredTensor(runtime_name);
if (!restored_tensor_future.IsValid()) {
return absl::InternalError(absl::StrCat(
"LoadVariableOp: failed to fetch variable tensor: ", runtime_name));
}
auto loaded_variable_promise =
xla::ifrt::Future<tsl::RCReference<xla::ifrt::Array>>::CreatePromise();
auto loaded_variable_future =
xla::ifrt::Future<tsl::RCReference<xla::ifrt::Array>>(
loaded_variable_promise);
TF_ASSIGN_OR_RETURN(
absl::StatusOr<ifrt_serving::DtypeAndShape> dtype_and_shape,
ifrt_restore_tensor_registry.GetDtypeAndShape(runtime_name));
TF_RETURN_IF_ERROR(ifrt_loaded_variable_registry.TryRegisterLoadedVariable(
loaded_variable_key,
[&]() -> absl::StatusOr<
ifrt_serving::IfrtLoadedVariableRegistry::LoadedVariable> {
return ifrt_serving::IfrtLoadedVariableRegistry::LoadedVariable(
{.array = loaded_variable_future});
}));
restored_tensor_future.OnReady(
[ifrt_client = std::move(ifrt_client), &thread_pool = thread_pool,
checkpoint_loader_queue = checkpoint_loader_queue,
sharding_config = sharding_config,
loaded_variable_promise = std::move(loaded_variable_promise)](
absl::StatusOr<tensorflow::Tensor> restored_tensor) mutable {
if (!restored_tensor.ok()) {
loaded_variable_promise.Set(restored_tensor.status());
return;
}
checkpoint_loader_queue->AddTask(
[ifrt_client = ifrt_client, &thread_pool = thread_pool,
sharding_config = std::move(sharding_config),
restored_tensor = std::move(*restored_tensor),
loaded_variable_promise =
std::move(loaded_variable_promise)]() mutable {
absl::StatusOr<tsl::RCReference<xla::ifrt::Array>>
variable_array =
LoadIfrtVariable(ifrt_client, thread_pool,
restored_tensor, sharding_config);
loaded_variable_promise.Set(std::move(variable_array));
});
});
return absl::OkStatus();
}
}
} | #include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/resource_handle.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tsl/platform/env.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
#include "tfrt/host_context/concurrent_work_queue.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
using tensorflow::test::TensorEq;
using tsl::testing::StatusIs;
TEST(ShardingUtilsTest, ShardTensorToIfrtLoadedVariableNotFoundWrongName) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({2, 2}));
Tensor variable_handle(DT_RESOURCE, TensorShape({}));
ResourceHandle resource_handle;
resource_handle.set_name("var_x");
resource_handle.set_dtypes_and_shapes({{
DT_INT32,
TensorShape({2, 2}),
}});
variable_handle.flat<ResourceHandle>()(0) = std::move(resource_handle);
IfrtRestoreTensorRegistry restored_tensor_registry;
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
constexpr int kMaxParallelism = 16;
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), tsl::ThreadOptions(),
"Resharding", kMaxParallelism);
IfrtLoadedVariableRegistry loaded_variable_registry;
auto restore_work_queue = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
VariableDeviceShardingConfig sharding_config = {
.device_ids = {0},
.hlo_sharding = xla::HloSharding::Replicate(),
};
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
false,
GetDtypeAndShape(variable_handle.scalar<ResourceHandle>()()).value(),
future};
TF_ASSERT_OK(restored_tensor_registry.TryRegister("var_x_wrong",
restored_tensor_info));
promise.Set(input_tensor);
EXPECT_THAT(
AsyncLoadRestoredTensorAsIfrtLoadedVariable(
"var_x", client, thread_pool, restored_tensor_registry,
loaded_variable_registry, restore_work_queue.get(), sharding_config),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(ShardingUtilsTest, ShardTensorToIfrtLoadedVariableSucceed) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, TensorShape({2, 2}));
Tensor variable_handle(DT_RESOURCE, TensorShape({}));
ResourceHandle resource_handle;
resource_handle.set_name("var_x");
resource_handle.set_dtypes_and_shapes({{
DT_INT32,
TensorShape({2, 2}),
}});
variable_handle.flat<ResourceHandle>()(0) = std::move(resource_handle);
IfrtRestoreTensorRegistry restored_tensor_registry;
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
constexpr int kMaxParallelism = 16;
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), tsl::ThreadOptions(),
"Resharding", kMaxParallelism);
IfrtLoadedVariableRegistry loaded_variable_registry;
auto restore_work_queue = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
VariableDeviceShardingConfig sharding_config{
.device_ids = {0},
.hlo_sharding = xla::HloSharding::Replicate(),
};
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
false,
GetDtypeAndShape(variable_handle.scalar<ResourceHandle>()()).value(),
future};
TF_ASSERT_OK(
restored_tensor_registry.TryRegister("var_x", restored_tensor_info));
TF_ASSERT_OK(AsyncLoadRestoredTensorAsIfrtLoadedVariable(
"var_x", client, thread_pool, restored_tensor_registry,
loaded_variable_registry, restore_work_queue.get(), sharding_config));
promise.Set(input_tensor);
IfrtLoadedVariableRegistry::Key key{
.device_ids = {0},
.input_name = "var_x",
.hlo_sharding = sharding_config.hlo_sharding,
};
TF_ASSERT_OK_AND_ASSIGN(auto v,
loaded_variable_registry.GetLoadedVariable(key));
TF_ASSERT_OK_AND_ASSIGN(auto assembled_array, v.array.Await());
TF_ASSERT_OK_AND_ASSIGN(auto disassembled_arrays,
assembled_array->DisassembleIntoSingleDeviceArrays(
xla::ifrt::ArrayCopySemantics::kAlwaysCopy));
ASSERT_EQ(disassembled_arrays.size(), 1);
for (int i = 0; i < disassembled_arrays.size(); ++i) {
tensorflow::Tensor host_tensor(input_tensor.dtype(), input_tensor.shape());
TF_ASSERT_OK(
disassembled_arrays[i]
->CopyToHostBuffer(host_tensor.data(), {},
xla::ifrt::ArrayCopySemantics::kAlwaysCopy)
.Await());
EXPECT_THAT(host_tensor, TensorEq(input_tensor));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1b74e393-915d-4eaa-9c6c-9893a7f52c8c | cpp | tensorflow/tensorflow | virtual_scheduler | tensorflow/core/grappler/costs/virtual_scheduler.cc | tensorflow/core/grappler/costs/virtual_scheduler_test.cc | #include "tensorflow/core/grappler/costs/virtual_scheduler.h"
#include <algorithm>
#include <functional>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_replace.h"
#include "tensorflow/core/framework/allocation_description.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_description.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/clusters/utils.h"
#include "tensorflow/core/grappler/costs/utils.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/transitive_fanin.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
const char kAttrInputSrc[] = "input_source_";
const char kAttrSrcDevice[] = "send_device";
const char kAttrDstDevice[] = "recv_device";
const char kAttrTensorName[] = "tensor_name";
const char kChannelDevice[] = "Channel";
const char kStreaming[] = "_streaming";
namespace {
using ::tensorflow::strings::HumanReadableNumBytes;
float Round2(const float x) {
return ::round(100.0 * x) / 100.0;
}
Costs& FindOrCreateZero(const string& op_name,
std::map<string, Costs>* op_cost) {
auto it = op_cost->find(op_name);
if (it == op_cost->end()) {
it = op_cost->emplace(op_name, Costs::ZeroCosts()).first;
}
return it->second;
}
struct RecvNodeDescriptor {
const NodeDef* node;
const int port_num;
const string device;
RecvNodeDescriptor(const NodeDef* node_, const int port_num_,
const string& device_)
: node(node_), port_num(port_num_), device(device_) {}
};
struct RecvNodeDescriptorHash {
std::size_t operator()(const RecvNodeDescriptor& recv_node) const {
return std::hash<const NodeDef*>()(recv_node.node) ^
std::hash<int>()(recv_node.port_num) ^
std::hash<string>()(recv_node.device);
}
};
struct RecvNodeDescriptorEqual {
bool operator()(const RecvNodeDescriptor& a,
const RecvNodeDescriptor& b) const {
return a.node == b.node && a.port_num == b.port_num && a.device == b.device;
}
};
void UpdateDeviceAnnotationState(const NodeDef* node,
const NodeState& node_state,
DeviceState* device) {
if (node->attr().count(kOutputShapes) == 0) return;
int64_t execution_count = node->attr().count(kExecutionCount) == 0
? 1
: node->attr().at(kExecutionCount).i();
auto& shape_annotation_stats = device->shape_annotation_stats;
shape_annotation_stats.num_ops_annotated += 1;
shape_annotation_stats.num_ops_executed += execution_count;
shape_annotation_stats.num_ops_executed_more_than_once +=
execution_count > 1 ? 1 : 0;
shape_annotation_stats.num_ops_with_incompatible_shapes +=
node_state.shape_incompatible ? 1 : 0;
shape_annotation_stats.num_ops_with_dynamic_shapes +=
(execution_count > 1 && node->attr().count(kOutputSame) == 0) ? 1 : 0;
}
bool IsStreamingPort(const NodeDef& node, const int port) {
if (!node.attr().contains(kStreaming)) return false;
auto& attr_list = node.attr().at(kStreaming).list();
bool is_streaming_port = false;
if (port >= 0 && port < attr_list.b().size()) {
is_streaming_port = attr_list.b(port);
}
return is_streaming_port;
}
}
void LIFOManager::AddNode(const NodeDef* node) {
if (IsMerge(*node)) {
nodes_.push_front(node);
} else {
nodes_.push_back(node);
}
}
const NodeDef* LIFOManager::GetCurrNode() {
CHECK(!nodes_.empty()) << "GetCurrNode(), but there's no ready node";
if (curr_pos_ == nodes_.end()) {
curr_pos_ = --(nodes_.rbegin().base());
}
return *curr_pos_;
}
void LIFOManager::RemoveCurrNode() {
GetCurrNode();
nodes_.erase(curr_pos_);
curr_pos_ = nodes_.end();
}
HeapReadyManager::HeapReadyManager() : ReadyNodeManager() {
std::make_heap(nodes_.begin(), nodes_.end());
}
Status HeapReadyManager::Init(
const std::unordered_map<const NodeDef*, NodeState>* node_map) {
node_map_ = node_map;
nodes_.clear();
curr_node_ = nullptr;
greater_ = Greater();
return absl::OkStatus();
}
void HeapReadyManager::AddNode(const NodeDef* node) {
nodes_.push_back(node);
std::push_heap(nodes_.begin(), nodes_.end(), greater_);
}
const NodeDef* HeapReadyManager::GetCurrNode() {
if (curr_node_) return curr_node_;
if (nodes_.empty()) {
CHECK(!nodes_.empty()) << "GetCurrNode(), but there's no ready node";
}
const std::string node_name = nodes_.front()->name();
curr_node_ = nodes_.front();
std::pop_heap(nodes_.begin(), nodes_.end(), greater_);
nodes_.pop_back();
return curr_node_;
}
void HeapReadyManager::RemoveCurrNode() {
if (curr_node_) {
curr_node_ = nullptr;
} else {
std::pop_heap(nodes_.begin(), nodes_.end(), greater_);
nodes_.pop_back();
}
}
bool HeapReadyManager::Empty() const {
return nodes_.empty() && curr_node_ == nullptr;
}
bool FirstReadyCmp(
const std::unordered_map<const NodeDef*, NodeState>* node_map,
const NodeDef* a, const NodeDef* b) {
if (node_map->at(a).time_ready == node_map->at(b).time_ready) {
return a->name().compare(b->name()) > 0;
} else {
return node_map->at(a).time_ready > node_map->at(b).time_ready;
}
}
std::function<bool(const NodeDef*, const NodeDef*)>
FirstReadyManager::Greater() {
auto greater = [this](const NodeDef* a, const NodeDef* b) -> bool {
return FirstReadyCmp(node_map_, a, b);
};
return greater;
}
std::function<bool(const NodeDef*, const NodeDef*)>
PriorityReadyManager::Greater() {
auto greater = [this](const NodeDef* a, const NodeDef* b) -> bool {
auto pri_a = node_priority_.at(a->name());
auto pri_b = node_priority_.at(b->name());
if (pri_a == pri_b) {
return FirstReadyCmp(node_map_, a, b);
}
return pri_a > pri_b;
};
return greater;
}
void PriorityReadyManager::AddNode(const NodeDef* node) {
if (node_priority_.count(node->name()) == 0) {
VLOG(3) << "Priority of node " << node->name() << " not found.";
node_priority_[node->name()] = 0;
}
HeapReadyManager::AddNode(node);
}
Status PriorityReadyManager::SetPriority(
const std::unordered_map<string, int>& node_priority) {
node_priority_ = node_priority;
return absl::OkStatus();
}
CompositeNodeManager::CompositeNodeManager()
: ReadyNodeManager(), send_manager_(), recv_manager_() {}
Status CompositeNodeManager::Init(
const std::unordered_map<const NodeDef*, NodeState>* node_map) {
node_map_ = node_map;
TF_RETURN_IF_ERROR(send_manager_.Init(node_map));
TF_RETURN_IF_ERROR(recv_manager_.Init(node_map));
curr_node_ = nullptr;
return absl::OkStatus();
}
void CompositeNodeManager::AddNode(const NodeDef* node) {
if (IsSend(*node)) {
send_manager_.AddNode(node);
} else if (IsRecv(*node)) {
recv_manager_.AddNode(node);
} else {
const auto& device = node_map_->at(node).device_name;
ops_lifo_map_[device].AddNode(node);
}
}
const NodeDef* CompositeNodeManager::GetCurrNode() {
if (curr_node_) return curr_node_;
std::vector<std::pair<const NodeDef*, Costs::Duration>> candidates;
for (auto& ops_lifo : ops_lifo_map_) {
if (!ops_lifo.second.Empty()) {
const auto* op = ops_lifo.second.GetCurrNode();
candidates.emplace_back(op, node_map_->at(op).time_ready);
}
}
if (!send_manager_.Empty()) {
const auto* send = send_manager_.GetCurrNode();
candidates.emplace_back(send, node_map_->at(send).time_ready);
}
if (!recv_manager_.Empty()) {
const auto* recv = recv_manager_.GetCurrNode();
candidates.emplace_back(recv, node_map_->at(recv).time_ready);
}
CHECK(!candidates.empty());
auto first_ready = std::min_element(
candidates.begin(), candidates.end(),
[](const std::pair<const NodeDef*, Costs::Duration>& a,
const std::pair<const NodeDef*, Costs::Duration>& b) {
if (a.second == b.second) {
int a_score = 2 * IsSend(*a.first) + IsRecv(*a.first);
int b_score = 2 * IsSend(*b.first) + IsRecv(*b.first);
if (a_score == b_score) {
return a.first->name().compare(b.first->name()) < 0;
} else {
return a_score > b_score;
}
} else {
return a.second < b.second;
}
});
curr_node_ = first_ready->first;
return curr_node_;
}
void CompositeNodeManager::RemoveCurrNode() {
const auto* node = GetCurrNode();
if (IsSend(*node)) {
send_manager_.RemoveCurrNode();
} else if (IsRecv(*node)) {
recv_manager_.RemoveCurrNode();
} else {
const auto device = node_map_->at(node).device_name;
ops_lifo_map_[device].RemoveCurrNode();
}
curr_node_ = nullptr;
}
bool CompositeNodeManager::Empty() const {
bool empty = true;
for (const auto& ops_lifo : ops_lifo_map_) {
empty &= ops_lifo.second.Empty();
}
return empty && send_manager_.Empty() && recv_manager_.Empty();
}
std::unique_ptr<ReadyNodeManager> ReadyNodeManagerFactory(
const string& ready_node_manager) {
if (ready_node_manager == "FIFO") {
return std::make_unique<FIFOManager>();
} else if (ready_node_manager == "LIFO") {
return std::make_unique<LIFOManager>();
} else if (ready_node_manager == "FirstReady") {
return std::make_unique<FirstReadyManager>();
} else if (ready_node_manager == "Composite") {
return std::make_unique<CompositeNodeManager>();
}
LOG(FATAL) << "Not a valid ready node manager: " << ready_node_manager;
return nullptr;
}
SchedulerState::~SchedulerState() {}
SchedulerState::SchedulerState(const bool use_static_shapes,
const bool use_aggressive_shape_inference,
Cluster* cluster,
std::unique_ptr<VirtualPlacer> placer)
: graph_costs_(Costs::ZeroCosts()),
cluster_(cluster),
use_static_shapes_(use_static_shapes),
use_aggressive_shape_inference_(use_aggressive_shape_inference),
placer_(std::move(placer)) {
DCHECK(placer_);
graph_costs_.num_ops_total = 0;
initialized_ = false;
track_mem_usage_snapshot_ = VLOG_IS_ON(1);
}
Status SchedulerState::Init(const GrapplerItem* item,
std::vector<const NodeDef*>* initial_nodes,
bool create_explicit_channel_device) {
initialized_ = false;
node_map_.clear();
device_.clear();
additional_nodes_.clear();
graph_costs_ = Costs::ZeroCosts();
graph_costs_.num_ops_total = 0;
op_to_cost_.clear();
op_counts_.clear();
op_costs_.clear();
initial_nodes->clear();
graph_properties_ = std::make_unique<GraphProperties>(*item);
if (use_static_shapes_) {
TF_RETURN_IF_ERROR(graph_properties_->InferStatically(
true, use_aggressive_shape_inference_, true));
} else {
TF_RETURN_IF_ERROR(graph_properties_->InferDynamically(cluster_));
}
grappler_item_ = item;
const auto& graph = grappler_item_->graph;
const auto& fetch_nodes = grappler_item_->fetch;
std::set<string> feed_nodes;
for (const auto& f : grappler_item_->feed) {
auto iter_and_inserted_flag = feed_nodes.insert(f.first);
QCHECK(iter_and_inserted_flag.second)
<< "Duplicate feed node found: " << f.first;
}
std::unordered_map<string, const NodeDef*> name_to_node;
std::vector<const NodeDef*> fetch_fanin_nodes;
TF_RETURN_IF_ERROR(ComputeTransitiveFanin(graph, fetch_nodes, &name_to_node,
&fetch_fanin_nodes));
std::unordered_map<string, const NodeDef*> name_to_send;
for (const auto& node : graph.node()) {
if (IsSend(node)) {
const auto& attr = node.attr();
name_to_send[attr.at("tensor_name").s()] = &node;
}
}
std::unordered_map<RecvNodeDescriptor, const NodeDef*, RecvNodeDescriptorHash,
RecvNodeDescriptorEqual>
cached_recv_nodes;
for (const auto* curr_node : fetch_fanin_nodes) {
auto& curr_node_state = GetNodeStateOrCreateIt(curr_node);
const string curr_node_device = DeviceName(curr_node);
std::vector<string> inputs;
if (IsRecv(*curr_node)) {
const auto& attr = curr_node->attr();
if (attr.count("tensor_name")) {
const auto& send_node_name = attr.at("tensor_name").s();
auto it = name_to_send.find(send_node_name);
if (it != name_to_send.end()) {
const NodeDef* send = it->second;
inputs = {send->name()};
}
}
} else {
for (const string& input : curr_node->input()) {
inputs.push_back(input);
}
}
for (const string& input_node_name : inputs) {
const string node_name = NodeName(input_node_name);
const NodeDef* input_node = name_to_node[node_name];
if (input_node == nullptr) {
return absl::InvalidArgumentError(
absl::StrCat("Unknown node: ", node_name));
}
const string in_device = DeviceName(input_node);
const auto input_node_port_num = NodePosition(input_node_name);
if (curr_node_device == in_device || IsControlInput(input_node_name)) {
curr_node_state.inputs.push_back(
std::make_pair(input_node, input_node_port_num));
auto& input_node_state = GetNodeStateOrCreateIt(input_node);
input_node_state.outputs[input_node_port_num].push_back(curr_node);
} else {
RecvNodeDescriptor recv_node(input_node, input_node_port_num,
curr_node_device);
auto it = cached_recv_nodes.find(recv_node);
if (it != cached_recv_nodes.end()) {
const NodeDef* recv_op = it->second;
curr_node_state.inputs.push_back(std::make_pair(recv_op, 0));
auto& input_node_state = node_map_.at(recv_op);
input_node_state.outputs[0].push_back(curr_node);
} else {
auto send_and_recv =
CreateSendRecv(input_node, curr_node, input_node, input_node_name,
create_explicit_channel_device);
const auto* send = send_and_recv.first;
const auto* recv = send_and_recv.second;
curr_node_state.inputs.push_back(std::make_pair(recv, 0));
auto& input_node_state = GetNodeStateOrCreateIt(input_node);
input_node_state.outputs[input_node_port_num].push_back(send);
cached_recv_nodes[recv_node] = recv;
}
}
}
const bool given_as_feed =
feed_nodes.find(curr_node->name()) != feed_nodes.end();
const bool has_no_inputs = inputs.empty();
if (given_as_feed || has_no_inputs) {
curr_node_state.time_ready = Costs::Duration();
initial_nodes->push_back(curr_node);
VLOG(3) << "Added ready node: " << curr_node->name();
}
feed_nodes.erase(curr_node->name());
if (IsPersistent(*curr_node)) {
auto& device_state = device_[curr_node_device];
for (int port_num = 0,
port_num_end = curr_node_state.output_properties.size();
port_num < port_num_end; ++port_num) {
device_state.persistent_nodes.insert(
std::make_pair(curr_node, port_num));
}
}
}
if (initial_nodes->empty()) {
return errors::InvalidArgument("No ready nodes in the graph.");
}
if (!feed_nodes.empty()) {
VLOG(1) << "Some feed nodes were not consumed by the fetch fanin: "
<< absl::StrJoin(feed_nodes, ",");
}
initialized_ = true;
return absl::OkStatus();
}
void SchedulerState::MaybeUpdateInputOutput(const NodeDef* node) {
CHECK(!initialized_) << "MaybeUpdateInputOutput is called after Init().";
if ((IsSend(*node) || IsRecv(*node)) && node->attr().count(kAttrInputSrc)) {
auto& node_state = node_map_[node];
auto& inputs = node_state.input_properties;
auto& outputs = node_state.output_properties;
CHECK(inputs.empty());
CHECK(outputs.empty());
const auto& attr = node->attr();
const auto& input_source_name = attr.at(kAttrInputSrc).s();
if (IsControlInput(input_source_name)) {
OpInfo::TensorProperties control_message;
control_message.set_dtype(DT_FLOAT);
control_message.mutable_shape()->add_dim()->set_size(1);
auto* value = control_message.mutable_value();
value->add_float_val(1);
inputs.push_back(control_message);
outputs.push_back(control_message);
} else {
const auto& output_properties =
graph_properties_->GetOutputProperties(NodeName(input_source_name));
if (!output_properties.empty()) {
const auto input_node_port_num = NodePosition(input_source_name);
CHECK_GT(output_properties.size(), input_node_port_num);
inputs.push_back(output_properties[input_node_port_num]);
outputs.push_back(output_properties[input_node_port_num]);
}
}
}
}
string SchedulerState::DeviceName(const NodeDef* node) const {
return placer_->get_canonical_device_name(*node);
}
string SchedulerState::SanitizedDeviceName(const NodeDef* node) const {
return absl::StrReplaceAll(placer_->get_canonical_device_name(*node),
{{":", "_"}});
}
string SchedulerState::ChannelDeviceName(const NodeDef* from,
const NodeDef* to) const {
CHECK(!initialized_) << "ChannelDeviceName is called after Init().";
return absl::StrCat(kChannelDevice, "_from_", SanitizedDeviceName(from),
"_to_", SanitizedDeviceName(to));
}
std::pair<const NodeDef*, const NodeDef*> SchedulerState::CreateSendRecv(
const NodeDef* from, const NodeDef* to, const NodeDef* input_node,
const string& input_name, bool create_channel_device) {
CHECK(!initialized_) << "CreateSendRecv is called after Init().";
auto input_node_port_num = NodePosition(input_name);
string src_name;
bool control_input = false;
if (input_node_port_num >= 0) {
src_name = absl::StrCat(from->name(), "_", input_node_port_num);
} else {
src_name = absl::StrCat(from->name(), "_minus1");
control_input = true;
}
auto* send = new NodeDef();
send->set_name("Send_" + src_name + "_from_" + SanitizedDeviceName(from) +
"_to_" + SanitizedDeviceName(to));
send->set_op("_Send");
send->add_input(from->name());
auto send_device =
create_channel_device ? ChannelDeviceName(from, to) : DeviceName(from);
send->set_device(send_device);
auto& send_attr = *(send->mutable_attr());
send_attr[kAttrInputSrc].set_s(input_name);
send_attr[kAttrSrcDevice].set_s(DeviceName(from));
send_attr[kAttrDstDevice].set_s(DeviceName(to));
if (input_node->attr().count(kAttrTensorName)) {
send_attr[kAttrTensorName].set_s(
input_node->attr().at(kAttrTensorName).s());
}
auto* recv = new NodeDef();
recv->set_name("Recv_" + src_name + "_on_" + SanitizedDeviceName(to));
recv->set_op("_Recv");
recv->add_input(send->name());
recv->set_device(DeviceName(to));
auto& recv_attr = *(recv->mutable_attr());
recv_attr[kAttrInputSrc].set_s(input_name);
if (input_node->attr().count(kAttrTensorName)) {
recv_attr[kAttrTensorName].set_s(
input_node->attr().at(kAttrTensorName).s());
}
if (from->attr().contains(kStreaming) && !control_input) {
if (input_node_port_num >= from->attr().at(kStreaming).list().b_size()) {
LOG(ERROR)
<< from->name()
<< " port index larger than length of _streaming attribute list.";
} else if (from->attr().at(kStreaming).list().b(input_node_port_num)) {
send_attr[kStreaming].mutable_list()->add_b(true);
recv_attr[kStreaming].mutable_list()->add_b(true);
}
}
auto& send_node_state = GetNodeStateOrCreateIt(send);
send_node_state.device_name = send->device();
send_node_state.inputs.push_back(std::make_pair(from, input_node_port_num));
send_node_state.outputs[0].push_back(recv);
auto& recv_node_state = GetNodeStateOrCreateIt(recv);
recv_node_state.inputs.push_back(std::make_pair(send, 0));
recv_node_state.outputs[0].push_back(to);
additional_nodes_.emplace_back(std::unique_ptr<NodeDef>(send));
additional_nodes_.emplace_back(std::unique_ptr<NodeDef>(recv));
return std::make_pair(send, recv);
}
OpContext SchedulerState::CreateOpContext(const NodeDef* node) const {
DeviceProperties device;
device = placer_->get_device(*node);
if (IsSend(*node)) {
device.set_type(kChannelDevice);
}
OpContext op_context;
const auto& node_state = node_map_.at(node);
op_context.name = node->name();
op_context.device_name = node_state.device_name;
auto& op_info = op_context.op_info;
op_info.set_op(node->op());
*op_info.mutable_attr() = node->attr();
for (auto& input : node_state.input_properties) {
*op_info.add_inputs() = input;
}
for (auto& output : node_state.output_properties) {
*op_info.add_outputs() = output;
}
op_info.mutable_device()->Swap(&device);
if (grappler_item_->graph.has_library()) {
op_context.function_library = &grappler_item_->graph.library();
}
return op_context;
}
NodeState& SchedulerState::GetNodeStateOrCreateIt(const NodeDef* node) {
CHECK(!initialized_) << "GetNodeStateOrCreateIt is called after Init().";
auto it = node_map_.find(node);
if (it != node_map_.end()) {
return it->second;
}
it = node_map_.emplace(node, NodeState()).first;
auto& node_state = it->second;
node_state.input_properties =
graph_properties_->GetInputProperties(node->name());
node_state.output_properties =
graph_properties_->GetOutputProperties(node->name());
node_state.shape_incompatible =
graph_properties_->CheckShapeIncompatible(node->name());
MaybeUpdateInputOutput(node);
if (!IsSend(*node)) {
node_state.device_name = DeviceName(node);
}
for (size_t i = 0; i < node_state.output_properties.size(); ++i) {
node_state.time_no_references[i] = Costs::Duration::max();
node_state.num_outputs_executed[i] = 0;
node_state.outputs[i] = {};
}
node_state.time_no_references[-1] = Costs::Duration::max();
node_state.num_outputs_executed[-1] = 0;
node_state.outputs[-1] = {};
node_state.time_scheduled = Costs::Duration().infinity();
return it->second;
}
void SchedulerState::GetOutputNodes(const NodeDef* node,
const Costs::Duration& curr_time,
std::vector<const NodeDef*>* output_nodes) {
int slot = -1;
if (IsSwitch(*node) && node->attr().count(kOutputSlots) > 0 &&
node->attr().at(kOutputSlots).list().i_size() > 0) {
slot = node->attr().at(kOutputSlots).list().i(0);
for (int i = 1; i < node->attr().at(kOutputSlots).list().i_size(); ++i) {
if (slot != node->attr().at(kOutputSlots).list().i(i)) {
slot = -1;
break;
}
}
}
auto& node_state = node_map_[node];
for (const auto& port_num_output_pair : node_state.outputs) {
if (slot >= 0 && port_num_output_pair.first != slot) continue;
for (auto* output_node : port_num_output_pair.second) {
auto& output_state = node_map_[output_node];
output_state.num_inputs_ready++;
int output_state_inputs_size = output_state.inputs.size();
if (output_state.num_inputs_ready == output_state_inputs_size ||
IsMerge(*output_node)) {
output_state.time_ready = curr_time;
output_nodes->push_back(output_node);
VLOG(3) << " Add output: " << output_node->name();
}
}
}
}
std::vector<const NodeDef*> SchedulerState::MarkNodeExecuted(
const NodeDef* node, const Costs& node_costs, const OpContext& op_context,
bool extract_execution_count_attr,
const std::string& override_device_name) {
auto& node_state = node_map_[node];
bool previously_executed_merge =
IsMerge(*node) && (node_state.time_finished != Costs::Duration::max());
node_state.execution_count = 1;
if (extract_execution_count_attr && node->attr().count(kExecutionCount) > 0) {
node_state.execution_count = node->attr().at(kExecutionCount).i();
}
node_state.node_costs = node_costs;
Costs total_node_costs = node_state.TotalNodeCosts();
graph_costs_ = CombineCosts(graph_costs_, total_node_costs);
const string& op_name = node->op();
auto& op_cost = FindOrCreateZero(op_name, &op_to_cost_);
op_cost = CombineCosts(op_cost, total_node_costs);
if (VLOG_IS_ON(2)) {
string node_description = GetOpDescription(op_context.op_info);
op_counts_[node_description] += 1;
op_costs_[node_description] =
std::make_pair(total_node_costs.execution_time.asMicroSeconds().count(),
!node_costs.inaccurate);
}
std::string device_name = node_state.device_name;
if (!override_device_name.empty()) {
device_name = override_device_name;
}
auto& device = device_[device_name];
device.nodes_executed.push_back(node);
if (node_state.time_scheduled == Costs::Duration().infinity()) {
node_state.time_scheduled =
std::max(device.GetCurrTime(), node_state.time_ready);
device.device_costs.execution_time = node_state.time_scheduled;
}
device.device_costs = CombineCosts(device.device_costs, total_node_costs);
auto curr_time = device.GetCurrTime();
node_state.time_finished = curr_time;
UpdateDeviceAnnotationState(node, node_state, &device);
if (!IsPersistent(*node)) {
for (const auto& port_num_output_pair : node_state.outputs) {
int port_num = port_num_output_pair.first;
if (node_state.outputs[port_num].empty()) {
node_state.time_no_references[port_num] = curr_time;
} else {
if (node_state.node_costs.persistent_output_ports.contains(port_num)) {
continue;
}
if (!IsStreamingPort(*node, port_num)) {
device.memory_usage += GetOrCalculateOutputSize(node_state, port_num);
}
device.nodes_in_memory.insert(std::make_pair(node, port_num));
}
}
}
for (const auto& port : node_costs.persistent_output_ports) {
device.persistent_nodes.insert({node, port});
}
auto& device_op_cost = FindOrCreateZero(op_name, &device.op_to_cost);
device_op_cost = CombineCosts(device_op_cost, total_node_costs);
VLOG(3) << "Op scheduled -- name: " << node->name() << ", op: " << node->op()
<< ", device: " << node->device()
<< ", execution_count: " << node_state.execution_count
<< ", ready: " << node_state.time_ready.count()
<< ", scheduled: " << node_state.time_scheduled.count()
<< ", finished: " << node_state.time_finished.count();
VLOG(5) << " Current device memory usage (before deallocation): "
<< device.memory_usage;
std::vector<const NodeDef*> new_nodes;
if (previously_executed_merge) {
VLOG(1) << "node [ " << node->name() << ", " << node->op() << " ] "
<< "is executed more than once. "
<< "Skip scheduling its output nodes.";
} else {
GetOutputNodes(node, curr_time, &new_nodes);
}
if (!IsPersistent(*node)) {
if (device.memory_usage > device.max_memory_usage) {
device.max_memory_usage = device.memory_usage;
if (track_mem_usage_snapshot_) {
device.mem_usage_snapshot_at_peak = device.nodes_in_memory;
}
}
}
if (track_mem_usage_snapshot_) {
device.temporary_memory_usage_trace.push_back(
{node->name(), device.memory_usage});
}
for (const auto& input_port : node_state.inputs) {
auto* input = input_port.first;
auto port = input_port.second;
auto& input_state = node_map_[input];
input_state.num_outputs_executed[port]++;
int input_state_outputs_size_ = input_state.outputs[port].size();
if (input_state.node_costs.persistent_output_ports.contains(port)) continue;
if (input_state.num_outputs_executed[port] == input_state_outputs_size_ &&
!IsPersistent(*input)) {
input_state.time_no_references[port] = curr_time;
auto& input_device = device_[input_state.device_name];
if (!IsStreamingPort(*input, port)) {
input_device.memory_usage -=
GetOrCalculateOutputSize(input_state, port);
}
input_device.nodes_in_memory.erase(std::make_pair(input, port));
}
}
return new_nodes;
}
Costs SchedulerState::Summary() const {
VLOG(1) << graph_costs_.num_ops_total << " ops processed in total, with "
<< graph_costs_.num_ops_with_unknown_shapes
<< " having unknown shapes";
VLOG(1) << "Expected execution time: " << graph_costs_.execution_time.count();
VLOG(1) << "Expected compute time: " << graph_costs_.compute_time.count();
VLOG(1) << "Expected memory time: " << graph_costs_.memory_time.count();
VLOG(1) << "Expected intermediate memory time: "
<< graph_costs_.intermediate_memory_time.count();
VLOG(1) << "Expected max memory: " << graph_costs_.max_memory;
VLOG(1) << "Expected max per-op buffers: " << graph_costs_.max_per_op_buffers;
VLOG(1) << "Expected max per-op streaming buffers: "
<< graph_costs_.max_per_op_streaming;
VLOG(1) << "Per-op execution time / compute time / memory time"
<< " / intermediate memory time:";
for (const auto& op_cost_pair : op_to_cost_) {
const auto& op = op_cost_pair.first;
const auto& cost = op_cost_pair.second.execution_time.count();
const auto& compute_cost = op_cost_pair.second.compute_time.count();
const auto& memory_cost = op_cost_pair.second.memory_time.count();
const auto& intermediate_memory_cost =
op_cost_pair.second.intermediate_memory_time.count();
const bool is_op_cost_accurate = !op_cost_pair.second.inaccurate;
if (cost) {
VLOG(1) << absl::StrFormat(" + %30s : %c %10d / %10d / %10d / %10d", op,
(is_op_cost_accurate ? ' ' : '~'), cost,
compute_cost, memory_cost,
intermediate_memory_cost);
}
}
VLOG(1) << "Devices:";
Costs critical_path_costs = Costs::ZeroCosts();
std::vector<string> device_names;
device_names.reserve(device_.size());
for (auto& it : device_) {
device_names.push_back(it.first);
}
std::sort(device_names.begin(), device_names.end());
for (const auto& name : device_names) {
const auto& state = device_.at(name);
std::map<string, int64_t> op_to_memory;
int64_t persistent_memory_usage = 0;
std::set<string> persistent_ops;
for (const auto& node_port : state.persistent_nodes) {
const auto* node = node_port.first;
const auto port = node_port.second;
int64_t output_size = 0;
auto it = node_map_.find(node);
if (it != node_map_.end()) {
output_size = GetOrCalculateOutputSize(it->second, port);
}
persistent_memory_usage += output_size;
op_to_memory[node->op()] += output_size;
persistent_ops.insert(node->op());
}
int64_t max_memory_usage = persistent_memory_usage + state.max_memory_usage;
critical_path_costs.estimated_max_memory_per_device[name] =
max_memory_usage;
const Costs::NanoSeconds wall_time_ns = state.GetCurrTime();
VLOG(1) << "Device = " << name
<< ", num_nodes = " << state.nodes_executed.size()
<< ", wall_time_ns = " << wall_time_ns.count() << ", memory usage: "
<< "persistent = " << HumanReadableNumBytes(persistent_memory_usage)
<< ", peak = " << HumanReadableNumBytes(state.max_memory_usage)
<< ", total = " << HumanReadableNumBytes(max_memory_usage)
<< ", at the end: " << HumanReadableNumBytes(state.memory_usage);
VLOG(1) << state.device_costs.num_ops_total
<< " ops processed in total, with "
<< state.device_costs.num_ops_with_unknown_shapes
<< " having unknown shapes";
const auto& device_annotation_stats = state.shape_annotation_stats;
if (device_annotation_stats.num_ops_annotated > 0) {
VLOG(1) << device_annotation_stats.num_ops_annotated
<< " ops with shape annotation, with "
<< device_annotation_stats.num_ops_executed_more_than_once
<< " executed more than once, "
<< device_annotation_stats.num_ops_with_dynamic_shapes
<< " with dynamic shapes, "
<< device_annotation_stats.num_ops_with_incompatible_shapes
<< " with incompatible shapes, "
<< device_annotation_stats.num_ops_executed
<< " ops executed in total.";
}
VLOG(1) << "Per-op execution time / compute time / memory time "
<< " / intermediate memory time"
<< " (and memory usage at peak memory usage):";
for (const auto& node_port : state.mem_usage_snapshot_at_peak) {
const auto* node = node_port.first;
const auto port = node_port.second;
auto it = node_map_.find(node);
if (it != node_map_.end()) {
op_to_memory[node->op()] += GetOrCalculateOutputSize(it->second, port);
}
}
Costs::NanoSeconds total_compute_time_ns;
bool is_total_cost_accurate = true;
for (const auto& op_cost_pair : state.op_to_cost) {
const auto& op = op_cost_pair.first;
const auto& cost = op_cost_pair.second.execution_time.count();
const auto& compute_cost = op_cost_pair.second.compute_time.count();
const auto& memory_cost = op_cost_pair.second.memory_time.count();
const auto& intermediate_memory_cost =
op_cost_pair.second.intermediate_memory_time.count();
total_compute_time_ns += op_cost_pair.second.execution_time;
const bool is_op_cost_accurate = !op_cost_pair.second.inaccurate;
if (!is_op_cost_accurate) {
is_total_cost_accurate = false;
}
int64_t op_mem_usage = 0;
auto it = op_to_memory.find(op);
if (it != op_to_memory.end()) {
op_mem_usage = it->second;
}
const float mem_usage_percent =
max_memory_usage > 0 ? Round2(100.0 * op_mem_usage / max_memory_usage)
: 0.0;
if (cost || mem_usage_percent > 1.0) {
VLOG(1) << absl::StrFormat(
" + %30s : %c %10d / %10d / %10d / %10d", op.c_str(),
(is_op_cost_accurate ? ' ' : '~'), cost, compute_cost,
memory_cost, intermediate_memory_cost)
<< " (" << HumanReadableNumBytes(op_mem_usage) << " ["
<< mem_usage_percent << "%] "
<< (persistent_ops.count(op) > 0 ? ": persistent op)" : ")");
}
}
int utilization = 0;
if (wall_time_ns.count() > 0) {
utilization = total_compute_time_ns.count() * 100 / wall_time_ns.count();
}
VLOG(1) << "Device = " << name << ", total_compute_time_ns = "
<< (is_total_cost_accurate ? "" : "~")
<< total_compute_time_ns.count()
<< ", utilization = " << utilization << "%";
if (critical_path_costs.execution_time <= state.GetCurrTime()) {
critical_path_costs = state.device_costs;
critical_path_costs.persistent_memory = persistent_memory_usage;
critical_path_costs.temporary_memory = state.max_memory_usage;
critical_path_costs.max_memory = max_memory_usage;
}
}
if (VLOG_IS_ON(2)) {
VLOG(2) << "Node description, counts, cost:";
for (const auto& item : op_counts_) {
int cost;
bool is_cost_accurate;
std::tie(cost, is_cost_accurate) = op_costs_.at(item.first);
VLOG(2) << "Node: " << item.first << ", Count: " << item.second
<< ", Individual Cost: " << (is_cost_accurate ? "" : "~") << cost
<< " us";
}
}
VLOG(1) << "Critical path execution time: "
<< critical_path_costs.execution_time.count();
return critical_path_costs;
}
Costs SchedulerState::Summary(RunMetadata* metadata) {
if (metadata) GenerateRunMetadata(metadata);
return Summary();
}
void SchedulerState::GenerateRunMetadata(RunMetadata* metadata) {
StepStats* stepstats = metadata->mutable_step_stats();
for (const auto& device : device_) {
GraphDef* device_partition_graph = metadata->add_partition_graphs();
DeviceStepStats* device_stepstats = stepstats->add_dev_stats();
device_stepstats->set_device(device.first);
for (const auto& node_def : device.second.nodes_executed) {
if (node_map_.find(node_def) == node_map_.end()) {
continue;
}
const NodeState& nodestate = node_map_.at(node_def);
NodeExecStats* node_stats = device_stepstats->add_node_stats();
uint64 total_output_size = 0;
uint64_t persistent_output_size = 0;
for (int slot = 0, slot_end = nodestate.output_properties.size();
slot < slot_end; slot++) {
const auto& properties = nodestate.output_properties[slot];
NodeOutput* no = node_stats->add_output();
no->set_slot(slot);
TensorDescription* tensor_descr = no->mutable_tensor_description();
tensor_descr->set_dtype(properties.dtype());
*tensor_descr->mutable_shape() = properties.shape();
const int64_t tensor_size_requested =
CalculateOutputSize(nodestate.output_properties, slot);
const int64_t tensor_size_allocated =
GetOrCalculateOutputSize(nodestate, slot);
total_output_size += tensor_size_allocated;
if (nodestate.node_costs.persistent_output_ports.contains(slot)) {
persistent_output_size += tensor_size_allocated;
}
tensor_descr->mutable_allocation_description()->set_requested_bytes(
tensor_size_requested);
tensor_descr->mutable_allocation_description()->set_allocated_bytes(
tensor_size_allocated);
}
if (node_def->op() != "HloGenericOp") {
node_stats->set_timeline_label(node_def->op());
} else {
string timeline_label;
if (node_def->attr().count("hlo_opcode") > 0) {
absl::StrAppend(&timeline_label,
node_def->attr().at("hlo_opcode").s());
}
if (node_def->attr().count("_hlo_metadata_op_type") > 0) {
absl::StrAppend(&timeline_label, "/",
node_def->attr().at("_hlo_metadata_op_type").s());
}
node_stats->set_timeline_label(timeline_label);
}
node_stats->set_node_name(node_def->name());
node_stats->set_op_start_rel_micros(0);
node_stats->set_all_start_micros(
nodestate.time_scheduled.asMicroSeconds().count());
node_stats->set_op_end_rel_micros(
nodestate.time_finished.asMicroSeconds().count() -
nodestate.time_scheduled.asMicroSeconds().count());
node_stats->set_all_end_rel_micros(
nodestate.time_finished.asMicroSeconds().count() -
nodestate.time_scheduled.asMicroSeconds().count());
node_stats->set_op_start_rel_nanos(0);
node_stats->set_all_start_nanos(nodestate.time_scheduled.count());
node_stats->set_op_end_rel_nanos(nodestate.time_finished.count() -
nodestate.time_scheduled.count());
node_stats->set_all_end_rel_nanos(nodestate.time_finished.count() -
nodestate.time_scheduled.count());
auto* mem_stats = node_stats->mutable_memory_stats();
mem_stats->set_temp_memory_size(0);
int64_t persistent_memory_size = 0;
if (IsPersistent(*node_def)) {
persistent_memory_size = total_output_size;
} else {
persistent_memory_size = persistent_output_size;
}
mem_stats->set_persistent_memory_size(persistent_memory_size);
*device_partition_graph->add_node() = *node_def;
}
}
}
const std::unordered_map<string, int64_t> SchedulerState::GetPeakMemoryUsage()
const {
std::unordered_map<string, int64_t> result;
for (const auto& device : device_) {
const string& name = device.first;
const DeviceState& state = device.second;
result[name] = state.max_memory_usage;
}
return result;
}
const std::unordered_map<string, int64_t>
SchedulerState::GetPersistentMemoryUsage() const {
std::unordered_map<string, int64_t> result;
for (const auto& device : device_) {
const string& name = device.first;
const DeviceState& state = device.second;
int64_t persistent_memory_usage = 0;
for (const auto& node_port : state.persistent_nodes) {
const auto* node = node_port.first;
const auto port = node_port.second;
const auto& node_state = node_map_.at(node);
persistent_memory_usage += GetOrCalculateOutputSize(node_state, port);
}
result[name] = persistent_memory_usage;
}
return result;
}
void SchedulerState::SetNodeStateTimeScheduled(const NodeDef* node) {
auto& node_state = node_map_.at(node);
auto& device = device_[node_state.device_name];
node_state.time_scheduled = device.GetCurrTime();
}
int64_t SchedulerState::GetOrCalculateOutputSize(const NodeState& node_state,
int port_num) const {
auto& node_costs = node_state.node_costs;
auto it = node_costs.output_tensor_size_bytes.find(port_num);
if (it != node_costs.output_tensor_size_bytes.end()) {
return it->second;
}
return CalculateOutputSize(node_state.output_properties, port_num);
}
VirtualScheduler::~VirtualScheduler() {}
VirtualScheduler::VirtualScheduler(const bool use_static_shapes,
const bool use_aggressive_shape_inference,
Cluster* cluster,
ReadyNodeManager* ready_nodes,
std::unique_ptr<VirtualPlacer> placer)
: scheduler_state_(std::make_unique<SchedulerState>(
use_static_shapes, use_aggressive_shape_inference, cluster,
std::move(placer))),
ready_nodes_(ready_nodes) {}
VirtualScheduler::VirtualScheduler(
ReadyNodeManager* ready_nodes,
std::unique_ptr<SchedulerState> scheduler_state)
: scheduler_state_(std::move(scheduler_state)), ready_nodes_(ready_nodes) {}
Status VirtualScheduler::Init(const GrapplerItem* item) {
TF_RETURN_IF_ERROR(ready_nodes_->Init(GetNodeStates()));
std::vector<const NodeDef*> initial_nodes;
auto status = scheduler_state_->Init(item, &initial_nodes);
if (status.ok()) {
for (auto node : initial_nodes) {
ready_nodes_->AddNode(node);
}
}
return status;
}
OpContext VirtualScheduler::GetCurrNode() {
const NodeDef* node = ready_nodes_->GetCurrNode();
return scheduler_state_->CreateOpContext(node);
}
bool VirtualScheduler::MarkCurrNodeExecuted(const Costs& node_costs) {
const NodeDef* node = ready_nodes_->GetCurrNode();
auto new_nodes = scheduler_state_->MarkNodeExecuted(
node, node_costs,
scheduler_state_->CreateOpContext(ready_nodes_->GetCurrNode()));
for (auto node : new_nodes) {
ready_nodes_->AddNode(node);
}
ready_nodes_->RemoveCurrNode();
return !ready_nodes_->Empty();
}
}
} | #include "tensorflow/core/grappler/costs/virtual_scheduler.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/allocation_description.pb.h"
#include "tensorflow/core/framework/tensor_description.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/costs/utils.h"
#include "tensorflow/core/grappler/costs/virtual_placer.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kCPU0[] = "/job:localhost/replica:0/task:0/cpu:0";
constexpr char kCPU1[] = "/job:localhost/replica:0/task:0/cpu:1";
constexpr char kChannelFrom0To1[] = "Channel from CPU0 to CPU1";
constexpr char kChannelFrom1To0[] = "Channel from CPU1 to CPU0";
constexpr char kConv2D[] = "Conv2D";
constexpr char kSend[] = "_Send";
constexpr char kRecv[] = "_Recv";
class ReadyNodeManagerTest : public ::testing::Test {
protected:
ReadyNodeManagerTest() {
NodeSetUp("Node1", kConv2D, kCPU0, 6000, &node1_);
NodeSetUp("Node2", kConv2D, kCPU0, 5000, &node2_);
NodeSetUp("Node3", kConv2D, kCPU0, 4000, &node3_);
NodeSetUp("Node4", kConv2D, kCPU0, 3000, &node4_);
NodeSetUp("Node5", kConv2D, kCPU0, 2000, &node5_);
NodeSetUp("Node6", kConv2D, kCPU0, 1000, &node6_);
}
void NodeSetUp(const string& name, const string& op_name,
const string& device_name, const uint64 time_ready,
NodeDef* node) {
node->set_name(name);
node->set_op(op_name);
node->set_device(device_name);
node_states_[node] = NodeState();
node_states_[node].time_ready = time_ready;
node_states_[node].device_name = device_name;
}
NodeDef node1_, node2_, node3_, node4_, node5_, node6_;
std::unordered_map<const NodeDef*, NodeState> node_states_;
};
TEST_F(ReadyNodeManagerTest, GetSingleNodeFIFOManager) {
FIFOManager manager = FIFOManager();
manager.AddNode(&node1_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
}
TEST_F(ReadyNodeManagerTest, RemoveSingleNodeFIFOManager) {
FIFOManager manager = FIFOManager();
manager.AddNode(&node1_);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetAndRemoveMultipleFIFOManager) {
FIFOManager manager = FIFOManager();
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, AddAndRemoveMultipleFIFOManager) {
FIFOManager manager = FIFOManager();
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.AddNode(&node5_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.AddNode(&node6_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetSingleNodeLIFOManager) {
LIFOManager manager = LIFOManager();
manager.AddNode(&node1_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
}
TEST_F(ReadyNodeManagerTest, RemoveSingleNodeLIFOManager) {
LIFOManager manager = LIFOManager();
manager.AddNode(&node1_);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetAndRemoveMultipleLIFOManager) {
LIFOManager manager = LIFOManager();
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, AddAndRemoveMultipleLIFOManager) {
LIFOManager manager = LIFOManager();
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.AddNode(&node5_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.AddNode(&node6_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, MergeOrderInLIFOManager) {
LIFOManager manager = LIFOManager();
node3_.set_op("Merge");
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
}
TEST_F(ReadyNodeManagerTest, GetSingleNodeFirstReadyManager) {
FirstReadyManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node1_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
}
TEST_F(ReadyNodeManagerTest, RemoveSingleNodeFirstReadyManager) {
FirstReadyManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node1_);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetAndRemoveMultipleFirstReadyManager) {
FirstReadyManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node2_);
manager.AddNode(&node1_);
manager.AddNode(&node4_);
manager.AddNode(&node5_);
manager.AddNode(&node3_);
manager.AddNode(&node6_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetCurrNodeFirstReadyManager) {
FirstReadyManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node2_);
manager.AddNode(&node1_);
manager.AddNode(&node4_);
manager.AddNode(&node5_);
manager.AddNode(&node3_);
manager.AddNode(&node6_);
EXPECT_EQ("Node6", manager.GetCurrNode()->name());
NodeDef node7;
NodeDef node8;
NodeDef node9;
NodeSetUp("Node7", kConv2D, kCPU0, 5, &node7);
NodeSetUp("Node8", kConv2D, kCPU0, 4, &node8);
NodeSetUp("Node9", kConv2D, kCPU0, 3, &node9);
manager.AddNode(&node7);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.AddNode(&node8);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
manager.AddNode(&node9);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node9");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node7");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, DeterminismInFirstReadyManager) {
FirstReadyManager manager1;
TF_EXPECT_OK(manager1.Init(&node_states_));
FirstReadyManager manager2;
TF_EXPECT_OK(manager2.Init(&node_states_));
NodeDef node7;
NodeDef node8;
NodeDef node9;
NodeDef node10;
NodeDef node11;
NodeDef node12;
NodeSetUp("Node7", kConv2D, kCPU0, 1000, &node7);
NodeSetUp("Node8", kConv2D, kCPU0, 1000, &node8);
NodeSetUp("Node9", kConv2D, kCPU0, 1000, &node9);
NodeSetUp("Node10", kConv2D, kCPU0, 1000, &node10);
NodeSetUp("Node11", kConv2D, kCPU0, 1000, &node11);
NodeSetUp("Node12", kConv2D, kCPU0, 1000, &node12);
manager1.AddNode(&node7);
manager1.AddNode(&node8);
manager1.AddNode(&node9);
manager1.AddNode(&node10);
manager1.AddNode(&node11);
manager1.AddNode(&node12);
manager2.AddNode(&node8);
manager2.AddNode(&node11);
manager2.AddNode(&node9);
manager2.AddNode(&node10);
manager2.AddNode(&node7);
manager2.AddNode(&node12);
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_TRUE(manager1.Empty());
EXPECT_TRUE(manager2.Empty());
}
TEST_F(ReadyNodeManagerTest, GetAndRemoveMultiplePriorityReadyManager) {
PriorityReadyManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
std::unordered_map<string, int> node_priority = {
{"Node1", 1}, {"Node2", 2}, {"Node3", 2}, {"Node4", 4}, {"Node5", 5}};
TF_EXPECT_OK(manager.SetPriority(node_priority));
manager.AddNode(&node3_);
manager.AddNode(&node1_);
manager.AddNode(&node4_);
manager.AddNode(&node5_);
manager.AddNode(&node2_);
manager.AddNode(&node6_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, RemoveSingleNodeCompositeNodeManager) {
CompositeNodeManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node1_);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetAndRemoveMultipleCompositeNodeManager) {
CompositeNodeManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.AddNode(&node5_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.AddNode(&node6_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, MultiDeviceSendRecvCompositeNodeManager) {
CompositeNodeManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
NodeDef node7;
NodeDef node8;
NodeDef node9;
NodeSetUp("Node7", kConv2D, kCPU1, 1001, &node7);
NodeSetUp("Node8", kConv2D, kCPU1, 2001, &node8);
NodeSetUp("Node9", kConv2D, kCPU1, 3001, &node9);
NodeDef send1;
NodeDef send2;
NodeDef recv1;
NodeDef recv2;
NodeSetUp("Send1", kSend, kChannelFrom0To1, 2002, &send1);
NodeSetUp("Send2", kSend, kChannelFrom1To0, 2005, &send2);
NodeSetUp("Recv1", kRecv, kCPU0, 2003, &recv1);
NodeSetUp("Recv2", kRecv, kCPU1, 2004, &recv2);
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
manager.AddNode(&node5_);
manager.AddNode(&node6_);
manager.AddNode(&node7);
manager.AddNode(&node8);
manager.AddNode(&node9);
manager.AddNode(&send1);
manager.AddNode(&send2);
manager.AddNode(&recv1);
manager.AddNode(&recv2);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Send1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Recv1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Recv2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Send2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node9");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node7");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, DeterminismInCompositeNodeManager) {
CompositeNodeManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
CompositeNodeManager manager2;
TF_EXPECT_OK(manager2.Init(&node_states_));
NodeDef node7;
NodeDef node8;
NodeDef node9;
NodeDef node10;
NodeDef node11;
NodeDef node12;
NodeSetUp("Node7", kConv2D, kCPU0, 1000, &node7);
NodeSetUp("Node8", kSend, kCPU0, 1000, &node8);
NodeSetUp("Node9", kRecv, kCPU0, 1000, &node9);
NodeSetUp("Node10", kConv2D, kCPU0, 999, &node10);
NodeSetUp("Node11", kRecv, kCPU0, 999, &node11);
NodeSetUp("Node12", kConv2D, kCPU1, 1000, &node12);
manager.AddNode(&node7);
manager.AddNode(&node8);
manager.AddNode(&node9);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
EXPECT_EQ(manager.GetCurrNode()->op(), kSend);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node9");
EXPECT_EQ(manager.GetCurrNode()->op(), kRecv);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node7");
EXPECT_EQ(manager.GetCurrNode()->op(), kConv2D);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
manager.AddNode(&node9);
manager.AddNode(&node8);
manager.AddNode(&node7);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
EXPECT_EQ(manager.GetCurrNode()->op(), kSend);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node9");
EXPECT_EQ(manager.GetCurrNode()->op(), kRecv);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node7");
EXPECT_EQ(manager.GetCurrNode()->op(), kConv2D);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
manager.AddNode(&node8);
manager.AddNode(&node10);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node10");
EXPECT_EQ(manager.GetCurrNode()->op(), kConv2D);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
EXPECT_EQ(manager.GetCurrNode()->op(), kSend);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
manager.AddNode(&node11);
manager.AddNode(&node8);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node11");
EXPECT_EQ(manager.GetCurrNode()->op(), kRecv);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
EXPECT_EQ(manager.GetCurrNode()->op(), kSend);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
manager.AddNode(&node7);
manager.AddNode(&node12);
manager2.AddNode(&node12);
manager2.AddNode(&node7);
EXPECT_EQ(manager.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
class TestVirtualScheduler : public VirtualScheduler {
public:
TestVirtualScheduler(const bool use_static_shapes,
const bool use_aggressive_shape_inference,
ReadyNodeManager* ready_node_manager, Cluster* cluster)
: VirtualScheduler(
use_static_shapes, use_aggressive_shape_inference, cluster,
ready_node_manager,
std::make_unique<VirtualPlacer>(cluster->GetDevices())) {
enable_mem_usage_tracking();
}
FRIEND_TEST(VirtualSchedulerTest, MemoryUsage);
FRIEND_TEST(VirtualSchedulerTest, ControlDependency);
FRIEND_TEST(VirtualSchedulerTest, ComplexDependency);
FRIEND_TEST(VirtualSchedulerTest, Variable);
FRIEND_TEST(VirtualSchedulerTest, InterDeviceTransfer);
};
class VirtualSchedulerTest : public ::testing::Test {
protected:
VirtualSchedulerTest() {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device = GetDummyCPUDevice();
devices[kCPU0] = cpu_device;
devices[kCPU1] = cpu_device;
cluster_ = std::make_unique<VirtualCluster>(devices);
scheduler_ = std::make_unique<TestVirtualScheduler>(
true,
true, &first_ready_manager_,
cluster_.get());
}
DeviceProperties GetDummyCPUDevice() {
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_frequency(4000);
cpu_device.set_num_cores(2);
cpu_device.set_bandwidth(2000000);
return cpu_device;
}
void CreateGrapplerItemWithConv2Ds() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = ops::RandomUniform(
s.WithOpName("x"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto y = ops::RandomUniform(
s.WithOpName("y"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto z = ops::RandomUniform(
s.WithOpName("z"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto f = ops::RandomUniform(
s.WithOpName("f"), {kernel_, kernel_, depth_in_, depth_out_}, DT_FLOAT);
std::vector<int> strides = {1, 1, 1, 1};
auto c0 = ops::Conv2D(s.WithOpName("c0"), x, f, strides, "SAME");
auto c1 = ops::Conv2D(s.WithOpName("c1"), y, f, strides, "SAME");
auto c2 = ops::Conv2D(s.WithOpName("c2"), z, f, strides, "SAME");
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_conv2d_graph";
grappler_item_->fetch = {"c0", "c1"};
dependency_["c0"] = {"x", "f"};
dependency_["c1"] = {"y", "f"};
}
void CreateGrapplerItemWithConv2DAndVariable() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = ops::RandomUniform(
s.WithOpName("x"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto f = ops::Variable(s.WithOpName("f"),
{kernel_, kernel_, depth_in_, depth_out_}, DT_FLOAT);
std::vector<int> strides = {1, 1, 1, 1};
auto y = ops::Conv2D(s.WithOpName("y"), x, f, strides, "SAME");
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_conv2d_var_graph";
grappler_item_->fetch = {"y"};
dependency_["y"] = {"x", "f"};
}
void CreateGrapplerItemWithMatmulChain() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto a = ops::RandomUniform(s.WithOpName("a"), {3200, 3200}, DT_FLOAT);
auto b = ops::RandomUniform(s.WithOpName("b").WithControlDependencies(a),
{3200, 3200}, DT_FLOAT);
auto c = ops::RandomUniform(s.WithOpName("c").WithControlDependencies(b),
{3200, 3200}, DT_FLOAT);
auto d = ops::RandomUniform(s.WithOpName("d").WithControlDependencies(c),
{3200, 3200}, DT_FLOAT);
auto e = ops::RandomUniform(s.WithOpName("e").WithControlDependencies(d),
{3200, 3200}, DT_FLOAT);
auto ab = ops::MatMul(s.WithOpName("ab").WithControlDependencies(e), a, b);
auto abc = ops::MatMul(s.WithOpName("abc"), ab, c);
auto abcd = ops::MatMul(s.WithOpName("abcd"), abc, d);
auto abcde = ops::MatMul(s.WithOpName("abcde"), abcd, e);
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_matmul_sequence_graph";
grappler_item_->fetch = {"abcde"};
dependency_["ab"] = {"a", "b"};
dependency_["abc"] = {"ab", "c"};
dependency_["abcd"] = {"abc", "d"};
dependency_["abcde"] = {"abcd", "e"};
}
void CreateGrapplerItemWithAddN() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = ops::RandomUniform(s.WithOpName("x"), {10, 10, 10, 10}, DT_FLOAT);
auto y = ops::RandomUniform(s.WithOpName("y"), {10, 10, 10, 10}, DT_FLOAT);
auto z = ops::RandomUniform(s.WithOpName("z"), {10, 10, 10, 10}, DT_FLOAT);
auto w = ops::RandomUniform(s.WithOpName("w"), {10, 10, 10, 10}, DT_FLOAT);
OutputList input_tensors = {x, y, z, w};
auto add = ops::AddN(s.WithOpName("add"), input_tensors);
auto out = ops::Identity(s.WithOpName("out"), add);
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_addn_graph";
grappler_item_->fetch = {"out"};
dependency_["out"] = {"x", "y", "z", "w", "add"};
}
void CreateGrapplerItemWithUnnecessaryPlaceholderNodes() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto unnecessary = ops::Placeholder(s.WithOpName("unnecessary"), DT_FLOAT);
auto x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT);
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_extra_placeholders";
grappler_item_->fetch = {"x"};
grappler_item_->feed = {{"x", Tensor()}, {"unnecessary", Tensor()}};
}
void CreateGrapplerItemWithControlDependency() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
std::vector<string> input_noop_names = {"x", "y", "z", "w", "u", "v", "t"};
std::vector<Operation> input_tensors;
for (const auto& input : input_noop_names) {
auto x = ops::NoOp(s.WithOpName(input));
input_tensors.push_back(x.operation);
}
auto out =
ops::NoOp(s.WithControlDependencies(input_tensors).WithOpName("out"));
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_control_dependency_graph";
grappler_item_->fetch = {"out"};
dependency_["out"] = input_noop_names;
}
void CreateGrapplerItemWithAddFromOneTensor() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = tensorflow::ops::RandomUniform(
s.WithOpName("x"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto y = tensorflow::ops::Add(s.WithOpName("y"), x, x);
Output fetch = ops::Identity(s.WithOpName("fetch"), y);
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_add_from_one_tensor";
grappler_item_->fetch = {"fetch"};
dependency_["fetch"] = {"y"};
dependency_["y"] = {"x"};
}
void CreateGrapplerItemWithSwitchMergeInput() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = ops::RandomUniform(
s.WithOpName("x"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto pred = ops::Const(s.WithOpName("pred"), false, {});
auto sw = ops::Switch(s.WithOpName("switch"), x, pred);
auto b = ops::RandomUniform(
s.WithOpName("b"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto a = ops::Add(s.WithOpName("a"), sw.output_true, b);
auto m = ops::Merge(s.WithOpName("m"), {sw.output_false, a.z});
auto z = ops::RandomUniform(
s.WithOpName("z"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto y = ops::Add(s.WithOpName("y"), m.output, z);
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_add_merge_switch";
grappler_item_->fetch = {"y"};
dependency_["y"] = {"m", "z"};
}
void CreateGrapplerItemWithBatchNorm() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = ops::RandomUniform(
s.WithOpName("x"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto scale =
ops::RandomUniform(s.WithOpName("scale"), {depth_in_}, DT_FLOAT);
auto offset =
ops::RandomUniform(s.WithOpName("offset"), {depth_in_}, DT_FLOAT);
auto mean = ops::RandomUniform(s.WithOpName("mean"), {0}, DT_FLOAT);
auto var = ops::RandomUniform(s.WithOpName("var"), {0}, DT_FLOAT);
auto batch_norm = ops::FusedBatchNorm(
s.WithOpName("bn"), x, scale, offset, mean, var,
ops::FusedBatchNorm::IsTraining(true).Epsilon(0.1f));
auto y = batch_norm.y;
auto batch_mean = batch_norm.batch_mean;
auto batch_var = batch_norm.batch_variance;
auto z1 = ops::Add(s.WithOpName("z1"), x, y);
auto z2 = ops::Add(s.WithOpName("z2"), batch_var, batch_var);
auto z3 = ops::Add(s.WithOpName("z3"), batch_var, batch_var);
std::vector<Operation> input_tensors = {
batch_mean.op(),
z1.z.op(),
z2.z.op(),
z3.z.op(),
};
auto z4 = ops::NoOp(s.WithControlDependencies(batch_var).WithOpName("z4"));
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_complex_dependency_graph";
grappler_item_->fetch = {"z1", "z2", "z3", "z4"};
dependency_["bn"] = {"x", "scale", "offset", "mean", "var"};
dependency_["z1"] = {"x", "bn"};
dependency_["z2"] = {"bn"};
dependency_["z3"] = {"bn"};
dependency_["z4"] = {"bn"};
}
void CreateGrapplerItemWithSendRecv() {
const string gdef_ascii = R"EOF(
node {
name: "Const"
op: "Const"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "_output_shapes"
value {
list { shape {
dim { size: 128 }
dim { size: 32 }
}}}
}
attr {
key: "shape"
value {
list { shape {
dim { size: 128 }
dim { size: 32 }
}}}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim { size: 128 }
dim { size: 32 }
}
float_val: 3.1415
}
}
}
}
node {
name: "Send"
op: "_Send"
input: "Const"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "_output_shapes"
value {
list { shape {
dim { size: 128 }
dim { size: 32 }
}}}
}
attr {
key: "shape"
value {
list { shape {
dim { size: 128 }
dim { size: 32 }
}}}
}
attr {
key: "client_terminated"
value {
b: false
}
}
attr {
key: "recv_device"
value {
s: "/job:localhost/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device"
value {
s: "/job:localhost/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device_incarnation"
value {
i: 0
}
}
attr {
key: "tensor_name"
value {
s: "test"
}
}
}
node {
name: "Recv"
op: "_Recv"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "client_terminated"
value {
b: false
}
}
attr {
key: "_output_shapes"
value {
list { shape {
dim { size: 128 }
dim { size: 32 }
}}}
}
attr {
key: "shape"
value {
list { shape {
dim { size: 128 }
dim { size: 32 }
}}}
}
attr {
key: "recv_device"
value {
s: "/job:localhost/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device"
value {
s: "/job:localhost/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device_incarnation"
value {
i: 0
}
}
attr {
key: "tensor_name"
value {
s: "test"
}
}
attr {
key: "tensor_type"
value {
type: DT_FLOAT
}
}
}
library {
}
versions {
producer: 24
}
)EOF";
grappler_item_ = std::make_unique<GrapplerItem>();
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii,
&grappler_item_->graph));
grappler_item_->id = "test_graph";
grappler_item_->fetch = {"Recv"};
}
void CreateGrapplerItemWithRecvWithoutSend() {
const string gdef_ascii = R"EOF(
node {
name: "Recv"
op: "_Recv"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "client_terminated"
value {
b: false
}
}
attr {
key: "recv_device"
value {
s: "/job:localhost/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device"
value {
s: "/job:localhost/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device_incarnation"
value {
i: 0
}
}
attr {
key: "tensor_name"
value {
s: "test"
}
}
attr {
key: "tensor_type"
value {
type: DT_FLOAT
}
}
}
library {
}
versions {
producer: 24
}
)EOF";
grappler_item_ = std::make_unique<GrapplerItem>();
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii,
&grappler_item_->graph));
grappler_item_->id = "test_graph";
grappler_item_->fetch = {"Recv"};
}
void CreateGrapplerItemWithLoop() {
const string gdef_ascii = R"EOF(
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
}
node {
name: "ones"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 2
}
dim {
size: 2
}
}
float_val: 1.0
}
}
}
}
node {
name: "while/Enter"
op: "Enter"
input: "Const"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Enter_1"
op: "Enter"
input: "ones"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Merge"
op: "Merge"
input: "while/Enter"
input: "while/NextIteration"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Merge_1"
op: "Merge"
input: "while/Enter_1"
input: "while/NextIteration_1"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "while/Less/y"
op: "Const"
input: "^while/Merge"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 10
}
}
}
}
node {
name: "while/Less"
op: "Less"
input: "while/Merge"
input: "while/Less/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/LoopCond"
op: "LoopCond"
input: "while/Less"
}
node {
name: "while/Switch"
op: "Switch"
input: "while/Merge"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge"
}
}
}
}
node {
name: "while/Switch_1"
op: "Switch"
input: "while/Merge_1"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge_1"
}
}
}
}
node {
name: "while/Identity"
op: "Identity"
input: "while/Switch:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Identity_1"
op: "Identity"
input: "while/Switch_1:1"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "while/add/y"
op: "Const"
input: "^while/Identity"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 1
}
}
}
}
node {
name: "while/add"
op: "Add"
input: "while/Identity"
input: "while/add/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/concat/axis"
op: "Const"
input: "^while/Identity"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
}
node {
name: "while/concat"
op: "ConcatV2"
input: "while/Identity_1"
input: "while/Identity_1"
input: "while/concat/axis"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "Tidx"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration"
op: "NextIteration"
input: "while/add"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration_1"
op: "NextIteration"
input: "while/concat"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "while/Exit"
op: "Exit"
input: "while/Switch"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Exit_1"
op: "Exit"
input: "while/Switch_1"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
versions {
producer: 21
}
)EOF";
grappler_item_ = std::make_unique<GrapplerItem>();
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii,
&grappler_item_->graph));
grappler_item_->id = "test_graph";
grappler_item_->fetch = {"while/Exit", "while/Exit_1"};
}
void CreateGrapplerItemWithLoopAnnotated() {
const string gdef_ascii = R"EOF(
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
attr {
key: "_execution_count"
value {
i: 1
}
}
}
node {
name: "ones"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 2
}
dim {
size: 2
}
}
float_val: 1.0
}
}
}
attr {
key: "_execution_count"
value {
i: 1
}
}
}
node {
name: "while/Enter"
op: "Enter"
input: "Const"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
attr {
key: "_execution_count"
value {
i: 1
}
}
}
node {
name: "while/Enter_1"
op: "Enter"
input: "ones"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
attr {
key: "_execution_count"
value {
i: 1
}
}
}
node {
name: "while/Merge"
op: "Merge"
input: "while/Enter"
input: "while/NextIteration"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/Merge_1"
op: "Merge"
input: "while/Enter_1"
input: "while/NextIteration_1"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/Less/y"
op: "Const"
input: "^while/Merge"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 10
}
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/Less"
op: "Less"
input: "while/Merge"
input: "while/Less/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/LoopCond"
op: "LoopCond"
input: "while/Less"
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/Switch"
op: "Switch"
input: "while/Merge"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge"
}
}
}
attr {
key: "_execution_count"
value {
i: 11
}
}
attr {
key: "_output_slot_vector"
value {
list {
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 0
}
}
}
}
node {
name: "while/Switch_1"
op: "Switch"
input: "while/Merge_1"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge_1"
}
}
}
attr {
key: "_execution_count"
value {
i: 11
}
}
attr {
key: "_output_slot_vector"
value {
list {
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 0
}
}
}
}
node {
name: "while/Identity"
op: "Identity"
input: "while/Switch:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/Identity_1"
op: "Identity"
input: "while/Switch_1:1"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/add/y"
op: "Const"
input: "^while/Identity"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 1
}
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/add"
op: "Add"
input: "while/Identity"
input: "while/add/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/concat/axis"
op: "Const"
input: "^while/Identity"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/concat"
op: "ConcatV2"
input: "while/Identity_1"
input: "while/Identity_1"
input: "while/concat/axis"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "Tidx"
value {
type: DT_INT32
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/NextIteration"
op: "NextIteration"
input: "while/add"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/NextIteration_1"
op: "NextIteration"
input: "while/concat"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/Exit"
op: "Exit"
input: "while/Switch"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_execution_count"
value {
i: 1
}
}
}
node {
name: "while/Exit_1"
op: "Exit"
input: "while/Switch_1"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "_execution_count"
value {
i: 1
}
}
}
versions {
producer: 21
}
)EOF";
grappler_item_.reset(new GrapplerItem);
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii,
&grappler_item_->graph));
grappler_item_->id = "test_graph";
grappler_item_->fetch = {"while/Exit", "while/Exit_1"};
}
void CreateGrapplerItemWithCondition() {
const string gdef_ascii = R"EOF(
node {
name: "a"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
}
float_val: 2.0
}
}
}
}
node {
name: "Less"
op: "Const"
attr {
key: "dtype"
value {
type: DT_BOOL
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_BOOL
tensor_shape {
}
tensor_content: "\001"
}
}
}
}
node {
name: "Switch"
op: "Switch"
input: "a"
input: "Less"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "First"
op: "Identity"
input: "Switch"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "Second"
op: "Identity"
input: "Switch:1"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "Merge"
op: "Merge"
input: "First"
input: "Second"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
versions {
producer: 27
})EOF";
grappler_item_ = std::make_unique<GrapplerItem>();
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii,
&grappler_item_->graph));
grappler_item_->id = "test_graph";
grappler_item_->fetch = {"Merge"};
}
void CreateGrapplerItemWithInterDeviceTransfers() {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice(kCPU0);
auto x = ops::RandomUniform(
s.WithOpName("x"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto scale =
ops::RandomUniform(s.WithOpName("scale"), {depth_in_}, DT_FLOAT);
auto offset =
ops::RandomUniform(s.WithOpName("offset"), {depth_in_}, DT_FLOAT);
auto mean = ops::RandomUniform(s.WithOpName("mean"), {0}, DT_FLOAT);
auto var = ops::RandomUniform(s.WithOpName("var"), {0}, DT_FLOAT);
auto batch_norm = ops::FusedBatchNorm(
s.WithOpName("bn"), x, scale, offset, mean, var,
ops::FusedBatchNorm::IsTraining(true).Epsilon(0.1f));
auto y = batch_norm.y;
auto batch_mean = batch_norm.batch_mean;
auto batch_var = batch_norm.batch_variance;
auto y1 = ops::Identity(s.WithOpName("y1").WithDevice(kCPU1), y);
auto y2 = ops::Identity(s.WithOpName("y2").WithDevice(kCPU1), y);
auto batch_mean1 = ops::Identity(
s.WithOpName("batch_mean1").WithDevice(kCPU1), batch_mean);
auto batch_var1 =
ops::Identity(s.WithOpName("batch_var1").WithDevice(kCPU1), batch_var);
auto control_dep = ops::NoOp(s.WithOpName("control_dep")
.WithControlDependencies(y)
.WithDevice(kCPU1));
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_conv2d_graph";
grappler_item_->fetch = {"y1", "y2", "batch_mean1", "batch_var1",
"control_dep"};
dependency_["bn"] = {"x", "mean", "var"};
dependency_["y1"] = {"bn"};
dependency_["y2"] = {"bn"};
dependency_["batch_mean1"] = {"bn"};
dependency_["batch_var1"] = {"bn"};
dependency_["control_dep"] = {"bn"};
}
void InitScheduler() { TF_ASSERT_OK(scheduler_->Init(grappler_item_.get())); }
Costs SimplePredictCosts(const OpContext& op_context) const {
Costs c;
int64_t exec_cost = 0;
if (op_context.op_info.op() == "MatMul") {
exec_cost = 2000000000;
} else if (op_context.op_info.op() == "RandomUniform") {
exec_cost = 1000000000;
} else {
exec_cost = 1000;
}
c.execution_time = Costs::NanoSeconds(exec_cost);
return c;
}
std::unordered_map<string, OpContext> RunScheduler(
const string& target_node) {
std::unordered_map<string, OpContext> ops_executed;
bool more_nodes = true;
do {
OpContext op_context = scheduler_->GetCurrNode();
ops_executed[op_context.name] = op_context;
std::cout << op_context.name << std::endl;
Costs node_costs = SimplePredictCosts(op_context);
auto it = dependency_.find(op_context.name);
if (it != dependency_.end()) {
for (const auto& preceding_node : it->second) {
EXPECT_GT(ops_executed.count(preceding_node), 0);
}
}
more_nodes = scheduler_->MarkCurrNodeExecuted(node_costs);
if (op_context.name == target_node) {
break;
}
} while (more_nodes);
return ops_executed;
}
template <typename T>
void ExpectVectorEq(const std::vector<T>& expected,
const std::vector<T>& test_elements) {
std::set<T> expected_set(expected.begin(), expected.end());
for (const auto& element : test_elements) {
EXPECT_GT(expected_set.count(element), 0);
}
EXPECT_EQ(expected.size(), test_elements.size());
}
void ValidateNodeDefs(const std::vector<string>& expected,
const std::vector<const NodeDef*>& node_defs) {
std::vector<string> node_names;
std::transform(node_defs.begin(), node_defs.end(),
std::back_inserter(node_names),
[](const NodeDef* node) { return node->name(); });
ExpectVectorEq(expected, node_names);
}
template <typename T>
void ExpectSetEq(const std::set<T>& expected,
const std::set<T>& test_elements) {
for (const auto& element : test_elements) {
EXPECT_GT(expected.count(element), 0);
}
EXPECT_EQ(expected.size(), test_elements.size());
}
template <typename T, typename U>
void ExpectUnorderedMapEq(const std::unordered_map<T, U>& expected,
const std::unordered_map<T, U>& test_map) {
EXPECT_EQ(expected.size(), test_map.size());
for (const auto& key_val : expected) {
EXPECT_GT(test_map.count(key_val.first), 0);
EXPECT_EQ(test_map.at(key_val.first), key_val.second);
}
}
void ValidateMemoryUsageSnapshot(
const std::vector<string>& expected_names, const int port_num_expected,
const std::unordered_set<std::pair<const NodeDef*, int>,
DeviceState::NodePairHash>& mem_usage_snapshot) {
std::set<std::pair<string, int>> nodes_at_peak_mem_usage;
std::transform(
mem_usage_snapshot.begin(), mem_usage_snapshot.end(),
std::inserter(nodes_at_peak_mem_usage, nodes_at_peak_mem_usage.begin()),
[](const std::pair<const NodeDef*, int>& node_port) {
return std::make_pair(node_port.first->name(), node_port.second);
});
std::set<std::pair<string, int>> expected;
std::transform(expected_names.begin(), expected_names.end(),
std::inserter(expected, expected.begin()),
[port_num_expected](const string& name) {
return std::make_pair(name, port_num_expected);
});
ExpectSetEq(expected, nodes_at_peak_mem_usage);
}
void ValidateDependencyChain(
const std::unordered_map<string, int64_t>& start_times,
const std::vector<string>& nodes_in_dependency_order) {
int64_t prev_node_time = -1;
for (const auto& node : nodes_in_dependency_order) {
int64_t curr_node_time = start_times.at(node);
EXPECT_GE(curr_node_time, prev_node_time);
prev_node_time = curr_node_time;
}
}
std::unique_ptr<VirtualCluster> cluster_;
std::unique_ptr<TestVirtualScheduler> scheduler_;
FirstReadyManager first_ready_manager_;
CompositeNodeManager composite_node_manager_;
std::unique_ptr<GrapplerItem> grappler_item_;
std::unordered_map<string, std::vector<string>> dependency_;
const int batch_size_ = 4;
const int width_ = 10;
const int height_ = 10;
const int depth_in_ = 8;
const int kernel_ = 3;
const int depth_out_ = 16;
};
TEST_F(VirtualSchedulerTest, SummaryCostTest) {
CreateGrapplerItemWithMatmulChain();
InitScheduler();
auto ops_executed = RunScheduler("");
Costs c = scheduler_->Summary();
EXPECT_EQ(13000005, c.execution_time.asMicroSeconds().count());
EXPECT_EQ(grappler_item_->graph.node_size(), c.num_ops_total);
EXPECT_FALSE(c.inaccurate);
EXPECT_EQ(0, c.num_ops_with_unknown_shapes);
}
TEST_F(VirtualSchedulerTest, SummaryCostStepStatsTest) {
CreateGrapplerItemWithMatmulChain();
InitScheduler();
auto ops_executed = RunScheduler("");
RunMetadata metadata;
Costs c = scheduler_->Summary(&metadata);
StepStats stepstats = metadata.step_stats();
EXPECT_EQ(13000005, c.execution_time.asMicroSeconds().count());
EXPECT_EQ(grappler_item_->graph.node_size(), c.num_ops_total);
EXPECT_FALSE(c.inaccurate);
EXPECT_EQ(0, c.num_ops_with_unknown_shapes);
EXPECT_EQ(1, stepstats.dev_stats().size());
std::map<string, std::pair<int64_t, int64_t>> start_end_times;
for (const auto& device_step_stats : stepstats.dev_stats()) {
for (const auto& stats : device_step_stats.node_stats()) {
int64_t start = stats.all_start_micros();
int64_t end = start + stats.all_end_rel_micros();
start_end_times[stats.node_name()] =
std::pair<int64_t, int64_t>(start, end);
if (stats.timeline_label() == "MatMul" ||
stats.timeline_label() == "RandomUniform") {
EXPECT_EQ(1, stats.output().size());
for (const auto& output : stats.output()) {
EXPECT_EQ(DT_FLOAT, output.tensor_description().dtype());
EXPECT_EQ(2, output.tensor_description().shape().dim().size());
for (const auto& dim : output.tensor_description().shape().dim()) {
EXPECT_EQ(3200, dim.size());
}
}
}
}
}
int64_t cur_time = static_cast<int64_t>(5000005);
int64_t increment = static_cast<int64_t>(2000000);
auto op_names = {"ab", "abc", "abcd", "abcde"};
for (const auto& op_name : op_names) {
int64_t actual_start = start_end_times[op_name].first;
int64_t actual_end = start_end_times[op_name].second;
int64_t expected_start = cur_time;
int64_t expected_end = cur_time + increment;
EXPECT_EQ(expected_start, actual_start);
EXPECT_EQ(expected_end, actual_end);
cur_time += increment;
}
}
TEST_F(VirtualSchedulerTest, InitAndBasicScheduling) {
CreateGrapplerItemWithConv2Ds();
InitScheduler();
auto ops_executed = RunScheduler("");
EXPECT_EQ(8, ops_executed.size());
EXPECT_GT(ops_executed.count("x"), 0);
EXPECT_GT(ops_executed.count("y"), 0);
EXPECT_GT(ops_executed.count("f"), 0);
EXPECT_GT(ops_executed.count("c0"), 0);
EXPECT_GT(ops_executed.count("c1"), 0);
EXPECT_EQ(ops_executed.count("z"), 0);
EXPECT_EQ(ops_executed.count("c2"), 0);
EXPECT_EQ(1, ops_executed["x"].op_info.outputs_size());
EXPECT_EQ(1, ops_executed["y"].op_info.outputs_size());
EXPECT_EQ(1, ops_executed["f"].op_info.outputs_size());
EXPECT_EQ(2, ops_executed["c0"].op_info.inputs_size());
EXPECT_EQ(2, ops_executed["c1"].op_info.inputs_size());
}
TEST_F(VirtualSchedulerTest, MemoryUsage) {
CreateGrapplerItemWithAddN();
InitScheduler();
RunScheduler("");
const auto* device_states = scheduler_->GetDeviceStates();
const auto& cpu_state = device_states->at(kCPU0);
int64_t one_input_node_size = 4 * 10 * 10 * 10 * 10;
const std::vector<string> expected_names = {"x", "y", "z", "w", "add"};
EXPECT_EQ(expected_names.size() * one_input_node_size,
cpu_state.max_memory_usage);
ValidateMemoryUsageSnapshot(expected_names, 0 ,
cpu_state.mem_usage_snapshot_at_peak);
ASSERT_EQ(cpu_state.temporary_memory_usage_trace.size(), 10);
const std::pair<std::string, int64_t>& x_usage =
cpu_state.temporary_memory_usage_trace.at(4);
EXPECT_EQ(x_usage.first, "x");
EXPECT_EQ(x_usage.second, one_input_node_size);
const std::pair<std::string, int64_t>& add_usage =
cpu_state.temporary_memory_usage_trace.at(8);
EXPECT_EQ(add_usage.first, "add");
EXPECT_EQ(add_usage.second, 5 * one_input_node_size);
const std::pair<std::string, int64_t>& out_usage =
cpu_state.temporary_memory_usage_trace.at(9);
EXPECT_EQ(out_usage.first, "out");
EXPECT_EQ(out_usage.second, one_input_node_size);
ExpectUnorderedMapEq(
{std::make_pair("/job:localhost/replica:0/task:0/cpu:0", 64)},
scheduler_->GetPersistentMemoryUsage());
ExpectUnorderedMapEq(
{std::make_pair("/job:localhost/replica:0/task:0/cpu:0", 200000)},
scheduler_->GetPeakMemoryUsage());
}
TEST_F(VirtualSchedulerTest, MemoryUsageForStreamingOps) {
CreateGrapplerItemWithAddN();
auto& graph = grappler_item_->graph;
for (auto& node : *graph.mutable_node()) {
if (node.name() == "out" || node.name() == "add") {
node.set_device(kCPU1);
}
if (node.name() == "z" || node.name() == "w")
(*node.mutable_attr())[kStreaming].mutable_list()->add_b(true);
}
InitScheduler();
auto ops_executed = RunScheduler("");
const auto* device_states = scheduler_->GetDeviceStates();
const auto& cpu_state_0 = device_states->at(kCPU0);
const auto& cpu_state_1 = device_states->at(kCPU1);
int64_t one_input_node_size = 4 * 10 * 10 * 10 * 10;
const std::vector<string> cpu_0_expected_tensors = {"x", "y"};
const std::vector<string> cpu_1_expected_tensors = {"x", "y", "add"};
EXPECT_EQ(cpu_0_expected_tensors.size() * one_input_node_size,
cpu_state_0.max_memory_usage);
EXPECT_EQ(cpu_1_expected_tensors.size() * one_input_node_size,
cpu_state_1.max_memory_usage);
EXPECT_EQ(cpu_state_0.memory_usage, 0);
EXPECT_EQ(cpu_state_1.memory_usage, 0);
}
TEST_F(VirtualSchedulerTest, MemoryUsageWithExecutionCount) {
CreateGrapplerItemWithAddN();
auto& graph = grappler_item_->graph;
for (auto& node : *graph.mutable_node()) {
(*node.mutable_attr())[kExecutionCount].set_i(10000);
}
InitScheduler();
auto ops_executed = RunScheduler("");
const auto* device_states = scheduler_->GetDeviceStates();
const auto& cpu_state_0 = device_states->at(kCPU0);
int64_t one_input_node_size = 4 * 10 * 10 * 10 * 10;
const std::vector<string> expected_names = {"x", "y", "z", "w", "add"};
EXPECT_EQ(expected_names.size() * one_input_node_size,
cpu_state_0.max_memory_usage);
EXPECT_EQ(cpu_state_0.memory_usage, 0);
Costs c = scheduler_->Summary();
EXPECT_EQ(64, c.persistent_memory);
EXPECT_EQ(200000, c.temporary_memory);
EXPECT_EQ(200064, c.max_memory);
}
TEST_F(VirtualSchedulerTest, UnnecessaryFeedNodes) {
CreateGrapplerItemWithUnnecessaryPlaceholderNodes();
InitScheduler();
auto ops_executed = RunScheduler("");
ASSERT_EQ(1, ops_executed.size());
ASSERT_EQ(ops_executed.count("x"), 1);
}
TEST_F(VirtualSchedulerTest, ControlDependency) {
CreateGrapplerItemWithControlDependency();
InitScheduler();
RunScheduler("");
const auto* device_states = scheduler_->GetDeviceStates();
const auto& cpu_state = device_states->at(kCPU0);
int64_t one_input_node_size = 4;
const std::vector<string> expected_names = {"x", "y", "z", "w",
"u", "v", "t"};
EXPECT_EQ(expected_names.size() * one_input_node_size,
cpu_state.max_memory_usage);
ValidateMemoryUsageSnapshot(expected_names, -1 ,
cpu_state.mem_usage_snapshot_at_peak);
ExpectUnorderedMapEq(
{std::make_pair("/job:localhost/replica:0/task:0/cpu:0", 0)},
scheduler_->GetPersistentMemoryUsage());
ExpectUnorderedMapEq(
{std::make_pair("/job:localhost/replica:0/task:0/cpu:0", 28)},
scheduler_->GetPeakMemoryUsage());
}
TEST_F(VirtualSchedulerTest, ComplexDependency) {
CreateGrapplerItemWithBatchNorm();
InitScheduler();
RunScheduler("bn");
const auto& device_states = scheduler_->GetDeviceStates();
const auto& cpu_state = device_states->at(kCPU0);
const int x_size = batch_size_ * width_ * height_ * depth_in_;
int64_t expected_size =
4 * (2 * x_size + depth_in_ +
1 );
EXPECT_EQ(expected_size, cpu_state.memory_usage);
std::set<std::pair<string, int>> nodes_in_memory;
std::transform(
cpu_state.nodes_in_memory.begin(), cpu_state.nodes_in_memory.end(),
std::inserter(nodes_in_memory, nodes_in_memory.begin()),
[](const std::pair<const NodeDef*, int>& node_port) {
return std::make_pair(node_port.first->name(), node_port.second);
});
std::set<std::pair<string, int>> expected = {
std::make_pair("bn", -1),
std::make_pair("bn", 0),
std::make_pair("bn", 2),
std::make_pair("x", 0),
};
ExpectSetEq(expected, nodes_in_memory);
const auto* node_states = scheduler_->GetNodeStates();
const NodeState* bn_node = nullptr;
const NodeState* x_node = nullptr;
for (const auto& nodedef_node_state : *node_states) {
const NodeDef* node = nodedef_node_state.first;
const NodeState& node_state = nodedef_node_state.second;
if (node->name() == "bn") {
bn_node = &node_state;
}
if (node->name() == "x") {
x_node = &node_state;
}
}
CHECK_NOTNULL(bn_node);
CHECK_NOTNULL(x_node);
ValidateNodeDefs({"bn", "z1"}, x_node->outputs.at(0));
ValidateNodeDefs({"z4"}, bn_node->outputs.at(-1));
ValidateNodeDefs({"z1"}, bn_node->outputs.at(0));
ValidateNodeDefs({"z2", "z3", "z2", "z3"}, bn_node->outputs.at(2));
}
TEST_F(VirtualSchedulerTest, Variable) {
CreateGrapplerItemWithConv2DAndVariable();
InitScheduler();
RunScheduler("");
const auto* device_states = scheduler_->GetDeviceStates();
const auto& cpu_state = device_states->at(kCPU0);
ValidateMemoryUsageSnapshot({"f", "Const/Const"}, 0,
cpu_state.persistent_nodes);
ValidateMemoryUsageSnapshot({"x"}, 0,
cpu_state.mem_usage_snapshot_at_peak);
ExpectUnorderedMapEq(
{std::make_pair("/job:localhost/replica:0/task:0/cpu:0", 4624)},
scheduler_->GetPersistentMemoryUsage());
ExpectUnorderedMapEq(
{std::make_pair("/job:localhost/replica:0/task:0/cpu:0", 12800)},
scheduler_->GetPeakMemoryUsage());
}
TEST_F(VirtualSchedulerTest, WhileLoop) {
CreateGrapplerItemWithLoop();
InitScheduler();
RunScheduler("");
RunMetadata metadata;
scheduler_->Summary(&metadata);
int num_next_iteration = 0;
int num_next_iteration_1 = 0;
int num_exit = 0;
int num_exit_1 = 0;
int64_t next_iter_start_micro;
int64_t next_iter_1_start_micro;
int64_t exit_start_micro;
int64_t exit_1_start_micro;
std::unordered_map<string, int64_t> start_times;
for (const auto& device_step_stats : metadata.step_stats().dev_stats()) {
for (const auto& stats : device_step_stats.node_stats()) {
start_times[stats.node_name()] = stats.all_start_micros();
if (stats.node_name() == "while/NextIteration") {
++num_next_iteration;
next_iter_start_micro = stats.all_start_micros();
} else if (stats.node_name() == "while/NextIteration_1") {
++num_next_iteration_1;
next_iter_1_start_micro = stats.all_start_micros();
} else if (stats.node_name() == "while/Exit") {
++num_exit;
exit_start_micro = stats.all_start_micros();
} else if (stats.node_name() == "while/Exit_1") {
++num_exit_1;
exit_1_start_micro = stats.all_start_micros();
}
}
}
EXPECT_EQ(1, num_next_iteration);
EXPECT_EQ(1, num_next_iteration_1);
EXPECT_EQ(1, num_exit);
EXPECT_EQ(1, num_exit_1);
EXPECT_NE(next_iter_start_micro, next_iter_1_start_micro);
EXPECT_NE(exit_start_micro, exit_1_start_micro);
ValidateDependencyChain(
start_times,
{"Const", "while/Enter",
"while/Less/y", "while/Less", "while/LoopCond", "while/Switch",
"while/Identity", "while/add/y", "while/add", "while/NextIteration"});
ValidateDependencyChain(start_times,
{"ones", "while/Enter_1",
"while/Switch_1", "while/Identity_1", "while/concat",
"while/NextIteration_1"});
ValidateDependencyChain(start_times, {"while/Switch", "while/Exit"});
ValidateDependencyChain(
start_times, {"while/Identity", "while/concat/axis", "while/concat"});
ValidateDependencyChain(start_times, {"while/Identity", "while/add"});
ValidateDependencyChain(start_times, {"while/Switch_1", "while/Exit_1"});
}
TEST_F(VirtualSchedulerTest, AnnotatedWhileLoop) {
{
CreateGrapplerItemWithLoop();
InitScheduler();
RunScheduler("");
Costs c = scheduler_->Summary();
EXPECT_EQ(23, c.execution_time.asMicroSeconds().count());
EXPECT_EQ(grappler_item_->graph.node_size() + 2, c.num_ops_total);
EXPECT_FALSE(c.inaccurate);
EXPECT_EQ(0, c.num_ops_with_unknown_shapes);
}
{
CreateGrapplerItemWithLoopAnnotated();
InitScheduler();
RunScheduler("");
Costs c = scheduler_->Summary();
EXPECT_EQ(178, c.execution_time.asMicroSeconds().count());
EXPECT_EQ(grappler_item_->graph.node_size() + 2, c.num_ops_total);
EXPECT_FALSE(c.inaccurate);
EXPECT_EQ(0, c.num_ops_with_unknown_shapes);
}
}
TEST_F(VirtualSchedulerTest, Condition) {
{
CreateGrapplerItemWithCondition();
InitScheduler();
RunScheduler("");
RunMetadata metadata;
Costs c = scheduler_->Summary(&metadata);
int num_a = 0;
int num_less = 0;
int num_switch = 0;
int num_first = 0;
int num_second = 0;
int num_merge = 0;
for (const auto& device_step_stats : metadata.step_stats().dev_stats()) {
for (const auto& stats : device_step_stats.node_stats()) {
if (stats.node_name() == "a") {
++num_a;
} else if (stats.node_name() == "Less") {
++num_less;
} else if (stats.node_name() == "Switch") {
++num_switch;
} else if (stats.node_name() == "First") {
++num_first;
} else if (stats.node_name() == "Second") {
++num_second;
} else if (stats.node_name() == "Merge") {
++num_merge;
}
}
}
EXPECT_EQ(1, num_a);
EXPECT_EQ(1, num_less);
EXPECT_EQ(1, num_switch);
EXPECT_EQ(1, num_first);
EXPECT_EQ(1, num_second);
EXPECT_EQ(2, num_merge);
EXPECT_EQ(7, c.execution_time.asMicroSeconds().count());
EXPECT_EQ(grappler_item_->graph.node_size() + 1, c.num_ops_total);
EXPECT_FALSE(c.inaccurate);
EXPECT_EQ(0, c.num_ops_with_unknown_shapes);
}
{
CreateGrapplerItemWithCondition();
for (auto& node : *grappler_item_->graph.mutable_node()) {
if (node.name() == "Switch") {
AttrValue attr_output_info;
(*attr_output_info.mutable_list()).add_i(0);
AddNodeAttr(kOutputSlots, attr_output_info, &node);
}
}
InitScheduler();
RunScheduler("");
RunMetadata metadata;
Costs c = scheduler_->Summary(&metadata);
int num_a = 0;
int num_less = 0;
int num_switch = 0;
int num_first = 0;
int num_second = 0;
int num_merge = 0;
for (const auto& device_step_stats : metadata.step_stats().dev_stats()) {
for (const auto& stats : device_step_stats.node_stats()) {
if (stats.node_name() == "a") {
++num_a;
} else if (stats.node_name() == "Less") {
++num_less;
} else if (stats.node_name() == "Switch") {
++num_switch;
} else if (stats.node_name() == "First") {
++num_first;
} else if (stats.node_name() == "Second") {
++num_second;
} else if (stats.node_name() == "Merge") {
++num_merge;
}
}
}
EXPECT_EQ(1, num_a);
EXPECT_EQ(1, num_less);
EXPECT_EQ(1, num_switch);
EXPECT_EQ(1, num_first);
EXPECT_EQ(0, num_second);
EXPECT_EQ(1, num_merge);
EXPECT_EQ(5, c.execution_time.asMicroSeconds().count());
EXPECT_EQ(grappler_item_->graph.node_size() - 1, c.num_ops_total);
EXPECT_FALSE(c.inaccurate);
EXPECT_EQ(0, c.num_ops_with_unknown_shapes);
}
}
TEST_F(VirtualSchedulerTest, InterDeviceTransfer) {
CreateGrapplerItemWithInterDeviceTransfers();
InitScheduler();
auto ops_executed = RunScheduler("");
auto get_port_num = [](const string& name) -> int {
if (absl::StrContains(name, "bn_0")) {
return 0;
} else if (absl::StrContains(name, "bn_1")) {
return 1;
} else if (absl::StrContains(name, "bn_2")) {
return 2;
} else if (absl::StrContains(name, "bn_minus1")) {
return -1;
}
return -999;
};
std::unordered_map<string, int> op_count;
std::unordered_map<int, string> recv_op_names;
std::unordered_map<int, string> send_op_names;
for (const auto& x : ops_executed) {
const auto& name = x.first;
const auto& node_info = x.second;
const auto& op = node_info.op_info.op();
if (op == kRecv) {
recv_op_names[get_port_num(name)] = name;
} else if (op == kSend) {
send_op_names[get_port_num(name)] = name;
}
op_count[op]++;
}
EXPECT_EQ(op_count.at(kSend), op_count.at(kRecv));
EXPECT_EQ(op_count.at(kRecv), 3);
EXPECT_EQ(op_count.at(kSend), 3);
auto get_output_size = [this, ops_executed](const string& name) -> int64 {
const auto& output_properties_ = ops_executed.at(name).op_info.outputs();
std::vector<OpInfo::TensorProperties> output_properties;
for (const auto& output_property : output_properties_) {
output_properties.push_back(output_property);
}
return CalculateOutputSize(output_properties, 0);
};
int input_size = 4 * batch_size_ * width_ * height_ * depth_in_;
EXPECT_EQ(get_output_size(recv_op_names[0]), input_size);
EXPECT_EQ(get_output_size(send_op_names[0]), input_size);
EXPECT_EQ(get_output_size(recv_op_names[1]), 4 * depth_in_);
EXPECT_EQ(get_output_size(send_op_names[1]), 4 * depth_in_);
EXPECT_EQ(get_output_size(recv_op_names[2]), 4 * depth_in_);
EXPECT_EQ(get_output_size(send_op_names[2]), 4 * depth_in_);
}
TEST_F(VirtualSchedulerTest, GraphWithSendRecv) {
CreateGrapplerItemWithSendRecv();
InitScheduler();
auto ops_executed = RunScheduler("");
EXPECT_GT(ops_executed.count("Const"), 0);
EXPECT_GT(ops_executed.count("Send"), 0);
EXPECT_GT(ops_executed.count("Recv"), 0);
}
TEST_F(VirtualSchedulerTest, GraphWithSendRecvDifferentDevice) {
CreateGrapplerItemWithSendRecv();
auto& graph = grappler_item_->graph;
const string recv_device = kCPU1;
for (int i = 0; i < graph.node_size(); i++) {
auto* node = graph.mutable_node(i);
if (node->name() == "Recv") {
node->set_device(recv_device);
auto* attr = node->mutable_attr();
(*attr)["recv_device"].set_s(recv_device);
} else if (node->name() == "Send") {
auto* attr = node->mutable_attr();
(*attr)["recv_device"].set_s(recv_device);
}
}
InitScheduler();
auto ops_executed = RunScheduler("");
EXPECT_GT(ops_executed.count("Const"), 0);
EXPECT_GT(ops_executed.count("Send"), 0);
EXPECT_GT(ops_executed.count("Send_Send_0_from_/job_localhost/replica_0/"
"task_0/cpu_0_to_/job_localhost"
"/replica_0/task_0/cpu_1"),
0);
EXPECT_GT(ops_executed.count(
"Recv_Send_0_on_/job_localhost/replica_0/task_0/cpu_1"),
0);
EXPECT_GT(ops_executed.count("Recv"), 0);
}
TEST_F(VirtualSchedulerTest, GraphWihtOnlyRecv) {
CreateGrapplerItemWithRecvWithoutSend();
InitScheduler();
auto ops_executed = RunScheduler("");
EXPECT_GT(ops_executed.count("Recv"), 0);
}
TEST_F(VirtualSchedulerTest, AddMergeSwitch) {
scheduler_ = std::make_unique<TestVirtualScheduler>(
true,
true, &composite_node_manager_,
cluster_.get());
CreateGrapplerItemWithSwitchMergeInput();
InitScheduler();
auto ops_executed = RunScheduler("");
EXPECT_GT(ops_executed.count("z"), 0);
}
TEST_F(VirtualSchedulerTest, AddFromOneTensor) {
CreateGrapplerItemWithAddFromOneTensor();
InitScheduler();
auto ops_executed = RunScheduler("");
EXPECT_GT(ops_executed.count("y"), 0);
EXPECT_GT(ops_executed.count("x"), 0);
}
TEST_F(VirtualSchedulerTest, TestNodeCostOutputTensorSize) {
CreateGrapplerItemWithMatmulChain();
InitScheduler();
RunScheduler("ab");
int32_t persistent_memory_before =
scheduler_->GetPersistentMemoryUsage().at(kCPU0);
auto* device_states = scheduler_->GetDeviceStates();
int32_t memory_usage = device_states->at(kCPU0).memory_usage;
Costs node_costs = Costs::ZeroCosts(false);
const int32_t node_one_cost = 12345;
const int32_t node_two_cost = 98765;
const int32_t input_size = 4 * 3200 * 3200;
node_costs.persistent_memory = node_one_cost;
node_costs.temporary_memory = 0;
node_costs.output_tensor_size_bytes = {{0, node_one_cost}};
node_costs.persistent_output_ports = {0};
scheduler_->MarkCurrNodeExecuted(node_costs);
device_states = scheduler_->GetDeviceStates();
const auto& cpu_state_0 = device_states->at(kCPU0);
memory_usage -= 2 * input_size;
EXPECT_EQ(cpu_state_0.memory_usage, memory_usage);
int64_t persistent_memory = node_one_cost + persistent_memory_before;
EXPECT_EQ(scheduler_->GetPersistentMemoryUsage().at(kCPU0),
persistent_memory);
node_costs = Costs::ZeroCosts(false);
node_costs.persistent_memory = 0;
node_costs.temporary_memory = node_two_cost;
node_costs.output_tensor_size_bytes = {{0, node_two_cost}};
scheduler_->MarkCurrNodeExecuted(node_costs);
device_states = scheduler_->GetDeviceStates();
const auto& cpu_state_1 = device_states->at(kCPU0);
memory_usage += node_two_cost - input_size;
EXPECT_EQ(cpu_state_1.memory_usage, memory_usage);
EXPECT_EQ(scheduler_->GetPersistentMemoryUsage().at(kCPU0),
persistent_memory);
bool more_nodes = true;
do {
OpContext op_context = scheduler_->GetCurrNode();
node_costs = SimplePredictCosts(op_context);
more_nodes = scheduler_->MarkCurrNodeExecuted(node_costs);
} while (more_nodes);
RunMetadata metadata;
Costs final_cost = scheduler_->Summary(&metadata);
EXPECT_EQ(final_cost.persistent_memory, persistent_memory);
StepStats stepstats = metadata.step_stats();
for (const auto& device_step_stats : stepstats.dev_stats()) {
for (const auto& stats : device_step_stats.node_stats()) {
const auto& allocation_description =
stats.output().at(0).tensor_description().allocation_description();
if (stats.node_name() == "abc") {
EXPECT_NE(allocation_description.allocated_bytes(),
allocation_description.requested_bytes());
const auto& mem_stats = stats.memory_stats();
EXPECT_EQ(mem_stats.persistent_memory_size(), node_one_cost);
} else if (stats.node_name() == "abcd") {
EXPECT_NE(allocation_description.allocated_bytes(),
allocation_description.requested_bytes());
} else {
EXPECT_EQ(allocation_description.allocated_bytes(),
allocation_description.requested_bytes());
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/virtual_scheduler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/virtual_scheduler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6e37f9e9-a68a-4e08-8899-9c880461c192 | cpp | google/tensorstore | tsgrpc | tensorstore/kvstore/tsgrpc/tsgrpc.cc | tensorstore/kvstore/tsgrpc/tsgrpc_test.cc | #include <stdint.h>
#include <atomic>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "grpcpp/channel.h"
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/support/status.h"
#include "grpcpp/support/sync_stream.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/concurrency_resource.h"
#include "tensorstore/internal/context_binding.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/grpc/client_credentials.h"
#include "tensorstore/internal/grpc/utils.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/common_metrics.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/tsgrpc/common.h"
#include "tensorstore/proto/encode_time.h"
#include "tensorstore/proto/proto_util.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/internal/cache_key/absl_time.h"
#include "tensorstore/internal/cache_key/std_optional.h"
#include "tensorstore/internal/json_binding/absl_time.h"
#include "tensorstore/serialization/absl_time.h"
#include "tensorstore/kvstore/tsgrpc/common.pb.h"
#include "tensorstore/kvstore/tsgrpc/kvstore.grpc.pb.h"
#include "tensorstore/kvstore/tsgrpc/kvstore.pb.h"
using ::tensorstore::GrpcClientCredentials;
using ::tensorstore::internal::AbslTimeToProto;
using ::tensorstore::internal::DataCopyConcurrencyResource;
using ::tensorstore::internal::GrpcStatusToAbslStatus;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListReceiver;
using ::tensorstore_grpc::DecodeGenerationAndTimestamp;
using ::tensorstore_grpc::GetMessageStatus;
using ::tensorstore_grpc::kvstore::DeleteRequest;
using ::tensorstore_grpc::kvstore::DeleteResponse;
using ::tensorstore_grpc::kvstore::ListRequest;
using ::tensorstore_grpc::kvstore::ListResponse;
using ::tensorstore_grpc::kvstore::ReadRequest;
using ::tensorstore_grpc::kvstore::ReadResponse;
using ::tensorstore_grpc::kvstore::WriteRequest;
using ::tensorstore_grpc::kvstore::WriteResponse;
using ::tensorstore_grpc::kvstore::grpc_gen::KvStoreService;
namespace tensorstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
struct TsGrpcMetrics : public internal_kvstore::CommonReadMetrics,
public internal_kvstore::CommonWriteMetrics {
internal_metrics::Counter<int64_t>& delete_calls;
};
auto tsgrpc_metrics = []() -> TsGrpcMetrics {
return {TENSORSTORE_KVSTORE_COMMON_READ_METRICS(tsgrpc),
TENSORSTORE_KVSTORE_COMMON_WRITE_METRICS(tsgrpc),
TENSORSTORE_KVSTORE_COUNTER_IMPL(
tsgrpc, delete_calls, "kvstore::Write calls deleting a key")};
}();
ABSL_CONST_INIT internal_log::VerboseFlag verbose_logging("tsgrpc_kvstore");
struct TsGrpcKeyValueStoreSpecData {
std::string address;
absl::Duration timeout;
Context::Resource<GrpcClientCredentials> credentials;
Context::Resource<DataCopyConcurrencyResource> data_copy_concurrency;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.address, x.timeout, x.credentials, x.data_copy_concurrency);
};
constexpr static auto default_json_binder = jb::Object(
jb::Member(GrpcClientCredentials::id,
jb::Projection<&TsGrpcKeyValueStoreSpecData::credentials>()),
jb::Member("address",
jb::Projection<&TsGrpcKeyValueStoreSpecData::address>()),
jb::Member("timeout",
jb::Projection<&TsGrpcKeyValueStoreSpecData::timeout>(
jb::DefaultValue<jb::kNeverIncludeDefaults>(
[](auto* x) { *x = absl::Seconds(60); }))),
jb::Member(
DataCopyConcurrencyResource::id,
jb::Projection<
&TsGrpcKeyValueStoreSpecData::data_copy_concurrency>())
);
};
class TsGrpcKeyValueStoreSpec
: public internal_kvstore::RegisteredDriverSpec<
TsGrpcKeyValueStoreSpec, TsGrpcKeyValueStoreSpecData> {
public:
static constexpr char id[] = "tsgrpc_kvstore";
Future<kvstore::DriverPtr> DoOpen() const override;
};
class TsGrpcKeyValueStore
: public internal_kvstore::RegisteredDriver<TsGrpcKeyValueStore,
TsGrpcKeyValueStoreSpec> {
public:
void MaybeSetDeadline(grpc::ClientContext& context) {
if (spec_.timeout > absl::ZeroDuration() &&
spec_.timeout != absl::InfiniteDuration()) {
context.set_deadline(absl::ToChronoTime(absl::Now() + spec_.timeout));
}
}
const Executor& executor() const {
return spec_.data_copy_concurrency->executor;
}
KvStoreService::StubInterface* stub() { return stub_.get(); }
absl::Status GetBoundSpecData(SpecData& spec) const {
spec = spec_;
return absl::OkStatus();
}
Future<ReadResult> Read(Key key, ReadOptions options) override;
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override;
Future<const void> DeleteRange(KeyRange range) override;
void ListImpl(ListOptions options, ListReceiver receiver) override;
SpecData spec_;
std::shared_ptr<grpc::Channel> channel_;
std::unique_ptr<KvStoreService::StubInterface> stub_;
};
struct ReadTask : public internal::AtomicReferenceCount<ReadTask> {
internal::IntrusivePtr<TsGrpcKeyValueStore> driver;
grpc::ClientContext context;
ReadRequest request;
ReadResponse response;
Future<kvstore::ReadResult> Start(kvstore::Key key,
const kvstore::ReadOptions& options) {
request.set_key(std::move(key));
request.set_generation_if_equal(
options.generation_conditions.if_equal.value);
request.set_generation_if_not_equal(
options.generation_conditions.if_not_equal.value);
if (!options.byte_range.IsFull()) {
request.mutable_byte_range()->set_inclusive_min(
options.byte_range.inclusive_min);
request.mutable_byte_range()->set_exclusive_max(
options.byte_range.exclusive_max);
}
if (options.staleness_bound != absl::InfiniteFuture()) {
AbslTimeToProto(options.staleness_bound,
request.mutable_staleness_bound());
}
driver->MaybeSetDeadline(context);
internal::IntrusivePtr<ReadTask> self(this);
auto pair = tensorstore::PromiseFuturePair<kvstore::ReadResult>::Make();
pair.promise.ExecuteWhenNotNeeded([self] { self->context.TryCancel(); });
driver->stub()->async()->Read(
&context, &request, &response,
WithExecutor(driver->executor(), [self = std::move(self),
promise = std::move(pair.promise)](
::grpc::Status s) {
if (!promise.result_needed()) return;
promise.SetResult(self->Ready(GrpcStatusToAbslStatus(s)));
}));
return std::move(pair.future);
}
Result<kvstore::ReadResult> Ready(absl::Status status) {
ABSL_LOG_IF(INFO, verbose_logging)
<< "ReadTask::Ready " << ConciseDebugString(response) << " " << status;
TENSORSTORE_RETURN_IF_ERROR(status);
TENSORSTORE_RETURN_IF_ERROR(GetMessageStatus(response));
TENSORSTORE_ASSIGN_OR_RETURN(auto stamp,
DecodeGenerationAndTimestamp(response));
return kvstore::ReadResult{
static_cast<kvstore::ReadResult::State>(response.state()),
absl::Cord(response.value()),
std::move(stamp),
};
}
};
struct WriteTask : public internal::AtomicReferenceCount<WriteTask> {
internal::IntrusivePtr<TsGrpcKeyValueStore> driver;
grpc::ClientContext context;
WriteRequest request;
WriteResponse response;
Future<TimestampedStorageGeneration> Start(
kvstore::Key key, const absl::Cord value,
const kvstore::WriteOptions& options) {
request.set_key(std::move(key));
request.set_value(value);
request.set_generation_if_equal(
options.generation_conditions.if_equal.value);
driver->MaybeSetDeadline(context);
internal::IntrusivePtr<WriteTask> self(this);
auto pair =
tensorstore::PromiseFuturePair<TimestampedStorageGeneration>::Make();
pair.promise.ExecuteWhenNotNeeded([self] { self->context.TryCancel(); });
driver->stub()->async()->Write(
&context, &request, &response,
WithExecutor(driver->executor(), [self = std::move(self),
promise = std::move(pair.promise)](
::grpc::Status s) {
if (!promise.result_needed()) return;
promise.SetResult(self->Ready(GrpcStatusToAbslStatus(s)));
}));
return std::move(pair.future);
}
Result<TimestampedStorageGeneration> Ready(absl::Status status) {
ABSL_LOG_IF(INFO, verbose_logging)
<< "WriteTask::Ready " << ConciseDebugString(response) << " " << status;
TENSORSTORE_RETURN_IF_ERROR(status);
TENSORSTORE_RETURN_IF_ERROR(GetMessageStatus(response));
return DecodeGenerationAndTimestamp(response);
}
};
struct DeleteTask : public internal::AtomicReferenceCount<DeleteTask> {
internal::IntrusivePtr<TsGrpcKeyValueStore> driver;
grpc::ClientContext context;
DeleteRequest request;
DeleteResponse response;
Future<TimestampedStorageGeneration> Start(
kvstore::Key key, const kvstore::WriteOptions options) {
request.set_key(std::move(key));
request.set_generation_if_equal(
options.generation_conditions.if_equal.value);
return StartImpl();
}
Future<TimestampedStorageGeneration> StartRange(KeyRange range) {
request.mutable_range()->set_inclusive_min(range.inclusive_min);
request.mutable_range()->set_exclusive_max(range.exclusive_max);
return StartImpl();
}
Future<TimestampedStorageGeneration> StartImpl() {
driver->MaybeSetDeadline(context);
internal::IntrusivePtr<DeleteTask> self(this);
auto pair =
tensorstore::PromiseFuturePair<TimestampedStorageGeneration>::Make();
pair.promise.ExecuteWhenNotNeeded([self] { self->context.TryCancel(); });
driver->stub()->async()->Delete(
&context, &request, &response,
WithExecutor(driver->executor(), [self = std::move(self),
promise = std::move(pair.promise)](
::grpc::Status s) {
if (!promise.result_needed()) return;
promise.SetResult(self->Ready(GrpcStatusToAbslStatus(s)));
}));
return std::move(pair.future);
}
Result<TimestampedStorageGeneration> Ready(absl::Status status) {
ABSL_LOG_IF(INFO, verbose_logging)
<< "DeleteTask::Ready " << ConciseDebugString(response) << " "
<< status;
TENSORSTORE_RETURN_IF_ERROR(status);
TENSORSTORE_RETURN_IF_ERROR(GetMessageStatus(response));
return DecodeGenerationAndTimestamp(response);
}
};
struct ListTask {
internal::IntrusivePtr<TsGrpcKeyValueStore> driver;
ListReceiver receiver;
grpc::ClientContext context;
std::atomic<bool> cancelled = false;
ListRequest request;
ListTask(internal::IntrusivePtr<TsGrpcKeyValueStore>&& driver,
ListReceiver&& receiver)
: driver(std::move(driver)), receiver(std::move(receiver)) {}
bool is_cancelled() { return cancelled.load(std::memory_order_relaxed); }
void try_cancel() {
if (!cancelled.load()) {
cancelled.store(true, std::memory_order_relaxed);
context.TryCancel();
}
}
void Run() {
driver->MaybeSetDeadline(context);
auto reader = driver->stub()->List(&context, request);
execution::set_starting(receiver, [this] { try_cancel(); });
absl::Status msg_status;
ListResponse response;
while (reader->Read(&response)) {
msg_status = GetMessageStatus(response);
if (!msg_status.ok()) {
try_cancel();
break;
}
for (const auto& entry : response.entry()) {
execution::set_value(receiver, ListEntry{entry.key(), entry.size()});
if (is_cancelled()) break;
}
if (is_cancelled()) break;
}
auto s = reader->Finish();
if (!msg_status.ok()) {
execution::set_error(receiver, msg_status);
} else if (s.ok() || is_cancelled()) {
execution::set_done(receiver);
} else {
execution::set_error(receiver, GrpcStatusToAbslStatus(s));
}
execution::set_stopping(receiver);
}
};
Future<kvstore::ReadResult> TsGrpcKeyValueStore::Read(Key key,
ReadOptions options) {
tsgrpc_metrics.read.Increment();
auto task = internal::MakeIntrusivePtr<ReadTask>();
task->driver = internal::IntrusivePtr<TsGrpcKeyValueStore>(this);
return task->Start(std::move(key), options);
}
Future<TimestampedStorageGeneration> TsGrpcKeyValueStore::Write(
Key key, std::optional<Value> value, WriteOptions options) {
if (value) {
tsgrpc_metrics.write.Increment();
auto task = internal::MakeIntrusivePtr<WriteTask>();
task->driver = internal::IntrusivePtr<TsGrpcKeyValueStore>(this);
return task->Start(std::move(key), value.value(), options);
} else {
tsgrpc_metrics.delete_calls.Increment();
auto task = internal::MakeIntrusivePtr<DeleteTask>();
task->driver = internal::IntrusivePtr<TsGrpcKeyValueStore>(this);
return task->Start(std::move(key), options);
}
}
Future<const void> TsGrpcKeyValueStore::DeleteRange(KeyRange range) {
if (range.empty()) return absl::OkStatus();
tsgrpc_metrics.delete_range.Increment();
auto task = internal::MakeIntrusivePtr<DeleteTask>();
task->driver = internal::IntrusivePtr<TsGrpcKeyValueStore>(this);
return MapFuture(
InlineExecutor{},
[](const Result<TimestampedStorageGeneration>& result) {
return MakeResult(result.status());
},
task->StartRange(std::move(range)));
}
void TsGrpcKeyValueStore::ListImpl(ListOptions options, ListReceiver receiver) {
if (options.range.empty()) {
execution::set_starting(receiver, [] {});
execution::set_done(receiver);
execution::set_stopping(receiver);
return;
}
tsgrpc_metrics.list.Increment();
auto task = std::make_unique<ListTask>(
internal::IntrusivePtr<TsGrpcKeyValueStore>(this), std::move(receiver));
task->request.mutable_range()->set_inclusive_min(options.range.inclusive_min);
task->request.mutable_range()->set_exclusive_max(options.range.exclusive_max);
task->request.set_strip_prefix_length(options.strip_prefix_length);
if (options.staleness_bound != absl::InfiniteFuture()) {
AbslTimeToProto(options.staleness_bound,
task->request.mutable_staleness_bound());
}
executor()([task = std::move(task)] { task->Run(); });
}
Future<kvstore::DriverPtr> TsGrpcKeyValueStoreSpec::DoOpen() const {
auto driver = internal::MakeIntrusivePtr<TsGrpcKeyValueStore>();
driver->spec_ = data_;
ABSL_LOG_IF(INFO, verbose_logging)
<< "tsgrpc_kvstore address=" << data_.address;
driver->channel_ =
grpc::CreateChannel(data_.address, data_.credentials->GetCredentials());
driver->stub_ = KvStoreService::NewStub(driver->channel_);
return driver;
}
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(
tensorstore::TsGrpcKeyValueStore)
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::TsGrpcKeyValueStoreSpec>
registration;
} | #include <optional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "absl/synchronization/notification.h"
#include "absl/time/time.h"
#include "grpcpp/grpcpp.h"
#include "grpcpp/support/status.h"
#include "grpcpp/support/sync_stream.h"
#include "tensorstore/internal/grpc/grpc_mock.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/tsgrpc/mock_kvstore_service.h"
#include "tensorstore/proto/parse_text_proto_or_die.h"
#include "tensorstore/proto/protobuf_matchers.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/kvstore/tsgrpc/kvstore.grpc.pb.h"
#include "tensorstore/kvstore/tsgrpc/kvstore.pb.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::protobuf_matchers::EqualsProto;
using ::tensorstore::KeyRange;
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::ParseTextProtoOrDie;
using ::tensorstore::StorageGeneration;
using ::testing::_;
using ::testing::DoAll;
using ::testing::Return;
using ::testing::SetArgPointee;
using ::tensorstore_grpc::MockKvStoreService;
using ::tensorstore_grpc::kvstore::DeleteRequest;
using ::tensorstore_grpc::kvstore::DeleteResponse;
using ::tensorstore_grpc::kvstore::ListRequest;
using ::tensorstore_grpc::kvstore::ListResponse;
using ::tensorstore_grpc::kvstore::ReadRequest;
using ::tensorstore_grpc::kvstore::ReadResponse;
using ::tensorstore_grpc::kvstore::WriteRequest;
using ::tensorstore_grpc::kvstore::WriteResponse;
class TsGrpcMockTest : public testing::Test {
public:
~TsGrpcMockTest() override { mock_service_.Shutdown(); }
TsGrpcMockTest() {
ON_CALL(mock(), Read).WillByDefault(Return(grpc::Status::CANCELLED));
ON_CALL(mock(), Write).WillByDefault(Return(grpc::Status::CANCELLED));
ON_CALL(mock(), Delete).WillByDefault(Return(grpc::Status::CANCELLED));
ON_CALL(mock(), List).WillByDefault(Return(grpc::Status::CANCELLED));
}
tensorstore::KvStore OpenStore() {
return kvstore::Open({
{"driver", "tsgrpc_kvstore"},
{"address", mock_service_.server_address()},
})
.value();
}
MockKvStoreService& mock() { return *mock_service_.service(); }
tensorstore::grpc_mocker::MockGrpcServer<MockKvStoreService> mock_service_;
};
TEST_F(TsGrpcMockTest, Read) {
ReadRequest expected_request = ParseTextProtoOrDie(R"pb(
key: 'abc'
)pb");
ReadResponse response = ParseTextProtoOrDie(R"pb(
state: 2
value: '1234'
generation_and_timestamp {
generation: '1\001'
timestamp { seconds: 1634327736 nanos: 123456 }
}
)pb");
EXPECT_CALL(mock(), Read(_, EqualsProto(expected_request), _))
.WillOnce(DoAll(SetArgPointee<2>(response), Return(grpc::Status::OK)));
kvstore::ReadResult result;
{
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
result, kvstore::Read(store, expected_request.key()).result());
}
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value, "1234");
EXPECT_EQ(result.stamp.time,
absl::FromUnixSeconds(1634327736) + absl::Nanoseconds(123456));
EXPECT_EQ(result.stamp.generation, StorageGeneration::FromString("1"));
}
TEST_F(TsGrpcMockTest, ReadWithOptions) {
ReadRequest expected_request = ParseTextProtoOrDie(R"pb(
key: "abc"
generation_if_not_equal: "abc\001"
generation_if_equal: "xyz\001"
byte_range { inclusive_min: 1 exclusive_max: 10 }
)pb");
EXPECT_CALL(mock(), Read(_, EqualsProto(expected_request), _))
.WillOnce(Return(grpc::Status::OK));
kvstore::ReadResult result;
{
kvstore::ReadOptions options;
options.generation_conditions.if_not_equal =
StorageGeneration::FromString("abc");
options.generation_conditions.if_equal =
StorageGeneration::FromString("xyz");
options.staleness_bound = absl::InfiniteFuture();
options.byte_range = OptionalByteRangeRequest{1, 10};
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
result, kvstore::Read(store, expected_request.key(), options).result());
}
EXPECT_EQ(result.stamp.generation, StorageGeneration::Unknown());
}
TEST_F(TsGrpcMockTest, Write) {
WriteRequest expected_request = ParseTextProtoOrDie(R"pb(
key: 'abc'
value: '1234'
)pb");
WriteResponse response = ParseTextProtoOrDie(R"pb(
generation_and_timestamp {
generation: '1\001'
timestamp { seconds: 1634327736 nanos: 123456 }
}
)pb");
EXPECT_CALL(mock(), Write(_, EqualsProto(expected_request), _))
.WillOnce(DoAll(SetArgPointee<2>(response), Return(grpc::Status::OK)));
tensorstore::TimestampedStorageGeneration result;
{
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
result, kvstore::Write(store, expected_request.key(),
absl::Cord(expected_request.value()))
.result());
}
EXPECT_EQ(result.generation, StorageGeneration::FromString("1"));
}
TEST_F(TsGrpcMockTest, WriteEmpty) {
WriteRequest expected_request = ParseTextProtoOrDie(R"pb(
key: 'abc'
generation_if_equal: '\005'
)pb");
WriteResponse response = ParseTextProtoOrDie(R"pb(
generation_and_timestamp {
generation: '1\001'
timestamp { seconds: 1634327736 nanos: 123456 }
}
)pb");
EXPECT_CALL(mock(), Write(_, EqualsProto(expected_request), _))
.WillOnce(DoAll(SetArgPointee<2>(response), Return(grpc::Status::OK)));
tensorstore::TimestampedStorageGeneration result;
{
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
result, kvstore::Write(store, expected_request.key(), absl::Cord(),
{StorageGeneration::NoValue()})
.result());
}
EXPECT_EQ(result.generation, StorageGeneration::FromString("1"));
}
TEST_F(TsGrpcMockTest, WriteWithOptions) {
WriteRequest expected_request = ParseTextProtoOrDie(R"pb(
key: 'abc'
value: '1234'
generation_if_equal: "abc\001"
)pb");
WriteResponse response = ParseTextProtoOrDie(R"pb(
generation_and_timestamp {
generation: '1\001'
timestamp { seconds: 1634327736 nanos: 123456 }
}
)pb");
EXPECT_CALL(mock(), Write(_, EqualsProto(expected_request), _))
.WillOnce(DoAll(SetArgPointee<2>(response), Return(grpc::Status::OK)));
tensorstore::TimestampedStorageGeneration result;
{
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
result, kvstore::Write(store, expected_request.key(),
absl::Cord(expected_request.value()),
{StorageGeneration::FromString("abc")})
.result());
}
EXPECT_EQ(result.generation, StorageGeneration::FromString("1"));
}
TEST_F(TsGrpcMockTest, WriteNullopt) {
DeleteRequest expected_request = ParseTextProtoOrDie(R"pb(
key: 'abc'
generation_if_equal: '\005'
)pb");
DeleteResponse response = ParseTextProtoOrDie(R"pb(
generation_and_timestamp {
generation: '1\001'
timestamp { seconds: 1634327736 nanos: 123456 }
}
)pb");
EXPECT_CALL(mock(), Delete(_, EqualsProto(expected_request), _))
.WillOnce(DoAll(SetArgPointee<2>(response), Return(grpc::Status::OK)));
tensorstore::TimestampedStorageGeneration result;
{
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
result, kvstore::Write(store, expected_request.key(), std::nullopt,
{StorageGeneration::NoValue()})
.result());
}
EXPECT_EQ(result.generation, StorageGeneration::FromString("1"));
}
TEST_F(TsGrpcMockTest, Delete) {
DeleteRequest expected_request = ParseTextProtoOrDie(R"pb(
key: 'abc'
)pb");
DeleteResponse response = ParseTextProtoOrDie(R"pb(
generation_and_timestamp {
generation: '1\001'
timestamp { seconds: 1634327736 nanos: 123456 }
}
)pb");
EXPECT_CALL(mock(), Delete(_, EqualsProto(expected_request), _))
.WillOnce(DoAll(SetArgPointee<2>(response), Return(grpc::Status::OK)));
tensorstore::TimestampedStorageGeneration result;
{
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
result, kvstore::Delete(store, expected_request.key()).result());
}
EXPECT_EQ(result.generation, StorageGeneration::FromString("1"));
}
TEST_F(TsGrpcMockTest, DeleteWithOptions) {
DeleteRequest expected_request = ParseTextProtoOrDie(R"pb(
key: 'abc'
generation_if_equal: "abc\001"
)pb");
DeleteResponse response = ParseTextProtoOrDie(R"pb(
generation_and_timestamp {
generation: '1\001'
timestamp { seconds: 1634327736 nanos: 123456 }
}
)pb");
EXPECT_CALL(mock(), Delete(_, EqualsProto(expected_request), _))
.WillOnce(DoAll(SetArgPointee<2>(response), Return(grpc::Status::OK)));
tensorstore::TimestampedStorageGeneration result;
{
auto store = OpenStore();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
result, kvstore::Delete(store, expected_request.key(),
{StorageGeneration::FromString("abc")})
.result());
}
EXPECT_EQ(result.generation, StorageGeneration::FromString("1"));
}
TEST_F(TsGrpcMockTest, DeleteRange) {
DeleteRequest expected_request = ParseTextProtoOrDie(R"pb(
range { inclusive_min: 'a/c' exclusive_max: 'a/d' }
)pb");
EXPECT_CALL(mock(), Delete(_, EqualsProto(expected_request), _))
.WillOnce(Return(grpc::Status::OK));
{
auto store = OpenStore();
TENSORSTORE_EXPECT_OK(
kvstore::DeleteRange(store, KeyRange::Prefix("a/c")).result());
}
}
TEST_F(TsGrpcMockTest, List) {
ListRequest expected_request = ParseTextProtoOrDie(R"pb(
range: {}
)pb");
ListResponse response = ParseTextProtoOrDie(R"pb(
entry { key: 'a' }
entry { key: 'b' }
entry { key: 'c' }
)pb");
EXPECT_CALL(mock(), List(_, EqualsProto(expected_request), _))
.WillOnce(testing::Invoke(
[=](auto*, auto*,
grpc::ServerWriter<ListResponse>* resp) -> ::grpc::Status {
resp->Write(response);
return grpc::Status::OK;
}));
std::vector<std::string> log;
{
auto store = OpenStore();
absl::Notification notification;
tensorstore::execution::submit(
tensorstore::kvstore::List(store, {}),
tensorstore::CompletionNotifyingReceiver{
¬ification, tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
}
EXPECT_THAT(log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: a", "set_value: b",
"set_value: c", "set_done", "set_stopping"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/tsgrpc/tsgrpc.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/tsgrpc/tsgrpc_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
4006e144-3709-4d64-8719-d31365871b0e | cpp | google/tensorstore | thread | tensorstore/internal/thread/thread.cc | tensorstore/internal/thread/thread_test.cc | #if defined(__linux__) || defined(__APPLE__)
#include <pthread.h>
#endif
#include <thread>
#include <type_traits>
namespace tensorstore {
namespace internal {
void TrySetCurrentThreadName(const char* name) {
if (name == nullptr) return;
#if defined(__linux__)
pthread_setname_np(pthread_self(), name);
#endif
#if defined(__APPLE__)
pthread_setname_np(name);
#endif
}
}
} | #include "tensorstore/internal/thread/thread.h"
#include <gtest/gtest.h>
namespace {
TEST(ThreadTest, Basic) {
tensorstore::internal::Thread my_thread;
int x = 0;
tensorstore::internal::Thread::Id id[2];
my_thread = tensorstore::internal::Thread({}, [&x, &id]() {
x = 1;
id[1] = tensorstore::internal::Thread::this_thread_id();
});
id[0] = my_thread.get_id();
my_thread.Join();
EXPECT_EQ(id[0], id[1]);
EXPECT_EQ(1, x);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/thread/thread.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/thread/thread_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
76ea1194-3d44-40fa-8117-0f0b73c3424e | cpp | google/arolla | side_output | arolla/expr/eval/side_output.cc | arolla/expr/eval/side_output_test.cc | #include "arolla/expr/eval/side_output.h"
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "arolla/expr/annotation_utils.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/expr/operators/bootstrap_operators.h"
#include "arolla/io/slot_listener.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
absl::StatusOr<ExprWithSideOutputs> ExtractSideOutputs(ExprNodePtr expr) {
ExprWithSideOutputs result;
ASSIGN_OR_RETURN(
result.expr,
Transform(expr, [&](ExprNodePtr node) -> absl::StatusOr<ExprNodePtr> {
if (!IsExportAnnotation(node)) {
return node;
}
DCHECK_GE(node->node_deps().size(), 2);
auto unwrapped_node = node->node_deps()[0];
auto tag = ReadExportAnnotationTag(node);
auto value_expr = ReadExportAnnotationValue(node);
DCHECK_NE(unwrapped_node, nullptr);
DCHECK_NE(value_expr, nullptr);
if (auto [it, inserted] = result.side_outputs.emplace(tag, value_expr);
!inserted) {
return absl::FailedPreconditionError(absl::StrCat(
"duplicated export name ", tag, ": ", GetDebugSnippet(value_expr),
" vs ", GetDebugSnippet(it->second)));
}
return unwrapped_node;
}));
return result;
}
absl::StatusOr<absl::flat_hash_map<std::string, ExprNodePtr>>
PrepareSideOutputsForListener(
const absl::flat_hash_map<std::string, ExprNodePtr>& side_outputs,
const SlotListenerBase& slot_listener) {
absl::flat_hash_map<std::string, ExprNodePtr> result;
for (auto [name, expr] : side_outputs) {
if (auto qtype = slot_listener.GetQTypeOf(name); qtype != nullptr) {
ASSIGN_OR_RETURN(expr, expr_operators::CoreCast(expr, Literal(qtype)));
}
result.emplace(name, std::move(expr));
}
return result;
}
} | #include "arolla/expr/eval/side_output.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/testing/testing.h"
namespace arolla::expr {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::WithExportAnnotation;
using ::arolla::testing::WithExportValueAnnotation;
using ::testing::Field;
using ::testing::MatchesRegex;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
TEST(SideOutputTest, ExtractSideOutputs) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{WithExportAnnotation(
CallOp("math.add", {WithExportValueAnnotation(
Leaf("x"), "out_z", Leaf("z")),
Leaf("y")}),
"out_xpy"),
Leaf("y")}));
ASSERT_OK_AND_ASSIGN(
auto expected_expr,
CallOp("math.add",
{CallOp("math.add", {Leaf("x"), Leaf("y")}), Leaf("y")}));
auto expected_out_z = Leaf("z");
ASSERT_OK_AND_ASSIGN(auto expected_out_xpy,
CallOp("math.add", {Leaf("x"), Leaf("y")}));
EXPECT_THAT(ExtractSideOutputs(expr),
IsOkAndHolds(AllOf(
Field(&ExprWithSideOutputs::expr, EqualsExpr(expected_expr)),
Field(&ExprWithSideOutputs::side_outputs,
UnorderedElementsAre(
Pair("out_z", EqualsExpr(expected_out_z)),
Pair("out_xpy", EqualsExpr(expected_out_xpy)))))));
}
TEST(SideOutputTest, ExtractSideOutputsExportValueDuplicateNamesError) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{WithExportValueAnnotation(Leaf("x"), "out_z", Leaf("z")),
WithExportValueAnnotation(Leaf("y"), "out_z", Leaf("x"))}));
EXPECT_THAT(ExtractSideOutputs(expr),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex("duplicated export name.*out_z.*")));
}
TEST(SideOutputTest, ExtractSideOutputsExportDuplicateNamesError) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add", {WithExportAnnotation(Leaf("x"), "out_z"),
WithExportAnnotation(Leaf("y"), "out_z")}));
EXPECT_THAT(ExtractSideOutputs(expr),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex("duplicated export name.*out_z.*")));
}
TEST(SideOutputTest, ExtractSideOutputsExportVsExportValueDuplicateNamesError) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{WithExportValueAnnotation(Leaf("x"), "out_z", Leaf("z")),
WithExportAnnotation(Leaf("y"), "out_z")}));
EXPECT_THAT(ExtractSideOutputs(expr),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex("duplicated export name.*out_z.*")));
}
TEST(SideOutputTest,
ExtractSideOutputsExportVsExportValueDuplicateNamesSameExprError) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{WithExportValueAnnotation(Leaf("x"), "out_z", Leaf("z")),
WithExportAnnotation(Leaf("z"), "out_z")}));
ASSERT_OK_AND_ASSIGN(auto expected_expr,
CallOp("math.add", {Leaf("x"), Leaf("z")}));
EXPECT_THAT(ExtractSideOutputs(expr),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex("duplicated export name.*out_z.*")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/side_output.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/side_output_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
5fda23d4-48b0-4d51-81c8-7f86a164d7f4 | cpp | google/quiche | moqt_priority | quiche/quic/moqt/moqt_priority.cc | quiche/quic/moqt/moqt_priority_test.cc | #include "quiche/quic/moqt/moqt_priority.h"
#include <cstdint>
#include <limits>
#include "quiche/web_transport/web_transport.h"
namespace moqt {
namespace {
template <uint64_t NumBits>
constexpr uint64_t Flip(uint64_t number) {
static_assert(NumBits <= 63);
return (1ull << NumBits) - 1 - number;
}
template <uint64_t N>
constexpr uint64_t OnlyLowestNBits(uint64_t value) {
static_assert(N <= 62);
return value & ((1ull << (N + 1)) - 1);
}
}
webtransport::SendOrder SendOrderForStream(MoqtPriority subscriber_priority,
MoqtPriority publisher_priority,
uint64_t group_id,
MoqtDeliveryOrder delivery_order) {
const int64_t track_bits = (Flip<8>(subscriber_priority) << 54) |
(Flip<8>(publisher_priority) << 46);
group_id = OnlyLowestNBits<46>(group_id);
if (delivery_order == MoqtDeliveryOrder::kAscending) {
group_id = Flip<46>(group_id);
}
return track_bits | group_id;
}
webtransport::SendOrder SendOrderForStream(MoqtPriority subscriber_priority,
MoqtPriority publisher_priority,
uint64_t group_id,
uint64_t subgroup_id,
MoqtDeliveryOrder delivery_order) {
const int64_t track_bits = (Flip<8>(subscriber_priority) << 54) |
(Flip<8>(publisher_priority) << 46);
group_id = OnlyLowestNBits<26>(group_id);
subgroup_id = OnlyLowestNBits<20>(subgroup_id);
if (delivery_order == MoqtDeliveryOrder::kAscending) {
group_id = Flip<26>(group_id);
}
subgroup_id = Flip<20>(subgroup_id);
return track_bits | (group_id << 20) | subgroup_id;
}
webtransport::SendOrder UpdateSendOrderForSubscriberPriority(
const webtransport::SendOrder send_order,
MoqtPriority subscriber_priority) {
webtransport::SendOrder new_send_order = OnlyLowestNBits<54>(send_order);
const int64_t sub_bits = Flip<8>(subscriber_priority) << 54;
new_send_order |= sub_bits;
return new_send_order;
}
const webtransport::SendOrder kMoqtControlStreamSendOrder =
std::numeric_limits<webtransport::SendOrder>::max();
} | #include "quiche/quic/moqt/moqt_priority.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace moqt {
namespace {
TEST(MoqtPrioirtyTest, TrackPriorities) {
EXPECT_GT(SendOrderForStream(0x10, 0x80, 0, MoqtDeliveryOrder::kAscending),
SendOrderForStream(0x80, 0x80, 0, MoqtDeliveryOrder::kAscending));
EXPECT_GT(SendOrderForStream(0x80, 0x10, 0, MoqtDeliveryOrder::kAscending),
SendOrderForStream(0x80, 0x80, 0, MoqtDeliveryOrder::kAscending));
EXPECT_GT(SendOrderForStream(0x10, 0x80, 0, MoqtDeliveryOrder::kAscending),
SendOrderForStream(0x80, 0x10, 0, MoqtDeliveryOrder::kAscending));
EXPECT_GT(SendOrderForStream(0x00, 0x80, 0, MoqtDeliveryOrder::kAscending),
SendOrderForStream(0xff, 0x80, 0, MoqtDeliveryOrder::kAscending));
EXPECT_GT(SendOrderForStream(0x80, 0x00, 0, MoqtDeliveryOrder::kAscending),
SendOrderForStream(0x80, 0xff, 0, MoqtDeliveryOrder::kAscending));
}
TEST(MoqtPrioirtyTest, ControlStream) {
EXPECT_GT(kMoqtControlStreamSendOrder,
SendOrderForStream(0x00, 0x00, 0, MoqtDeliveryOrder::kAscending));
}
TEST(MoqtPriorityTest, StreamPerGroup) {
EXPECT_GT(SendOrderForStream(0x80, 0x80, 0, MoqtDeliveryOrder::kAscending),
SendOrderForStream(0x80, 0x80, 1, MoqtDeliveryOrder::kAscending));
EXPECT_GT(SendOrderForStream(0x80, 0x80, 1, MoqtDeliveryOrder::kDescending),
SendOrderForStream(0x80, 0x80, 0, MoqtDeliveryOrder::kDescending));
}
TEST(MoqtPriorityTest, StreamPerObject) {
EXPECT_GT(
SendOrderForStream(0x80, 0x80, 0, 0, MoqtDeliveryOrder::kAscending),
SendOrderForStream(0x80, 0x80, 0, 1, MoqtDeliveryOrder::kAscending));
EXPECT_GT(
SendOrderForStream(0x80, 0x80, 0, 0, MoqtDeliveryOrder::kDescending),
SendOrderForStream(0x80, 0x80, 0, 1, MoqtDeliveryOrder::kDescending));
EXPECT_GT(
SendOrderForStream(0x80, 0x80, 0, 1, MoqtDeliveryOrder::kAscending),
SendOrderForStream(0x80, 0x80, 1, 0, MoqtDeliveryOrder::kAscending));
EXPECT_GT(
SendOrderForStream(0x80, 0x80, 1, 1, MoqtDeliveryOrder::kDescending),
SendOrderForStream(0x80, 0x80, 0, 0, MoqtDeliveryOrder::kDescending));
}
TEST(MoqtPriorityTest, UpdateSendOrderForSubscriberPriority) {
EXPECT_EQ(
UpdateSendOrderForSubscriberPriority(
SendOrderForStream(0x80, 0x80, 0, MoqtDeliveryOrder::kAscending),
0x10),
SendOrderForStream(0x10, 0x80, 0, MoqtDeliveryOrder::kAscending));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_priority.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_priority_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
9674d4bc-cfba-40fa-9ad2-05cea053501d | cpp | tensorflow/tensorflow | prepare_reference_module | third_party/xla/xla/tools/prepare_reference_module.cc | third_party/xla/xla/tools/prepare_reference_module_test.cc | #include "xla/tools/prepare_reference_module.h"
#include <functional>
#include <memory>
#include "absl/status/statusor.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/despecializer.h"
#include "xla/service/hlo_module_config.h"
#include "xla/stream_executor/platform.h"
#include "xla/xla.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace xla {
absl::StatusOr<std::unique_ptr<HloModule>> PrepareReferenceModule(
const HloModule& test_module, HloRunnerInterface* test_runner,
const std::function<void(HloModuleConfig*)>& config_modifier_hook,
const std::function<absl::Status(const HloModule&, HloRunnerInterface*,
HloModule*)>& module_modifier_hook,
bool skip_despecialization) {
DebugOptions debug_options = GetDebugOptionsFromFlags();
debug_options.set_xla_cpu_enable_fast_math(false);
debug_options.set_xla_gpu_enable_fast_min_max(false);
HloModuleConfig reference_config = test_module.config();
reference_config.set_debug_options(debug_options);
if (config_modifier_hook) {
config_modifier_hook(&reference_config);
}
std::unique_ptr<HloModule> reference_module =
test_module.Clone(reference_config, "reference");
if (module_modifier_hook) {
TF_RETURN_IF_ERROR(
module_modifier_hook(test_module, test_runner, reference_module.get()));
} else if (!skip_despecialization) {
TF_RETURN_IF_ERROR(Despecializer().Run(reference_module.get()).status());
}
return std::move(reference_module);
}
}; | #include "xla/tools/prepare_reference_module.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
const char* const kModuleStr = R"(
HloModule jit_step
%fused_computation (param_0.2: f32[1,4]) -> f32[1,3] {
%param_0.2 = f32[1,4]{1,0} parameter(0)
ROOT %slice.11 = f32[1,3]{1,0} slice(f32[1,4]{1,0} %param_0.2),
slice={[0:1], [0:3]}
}
ENTRY %main.3491 (Arg_0.0: f32[1,4]) -> f32[1,3] {
%Arg_0.0 = f32[1,4]{1,0} parameter(0)
ROOT %fusion = f32[1,3]{1,0} fusion(f32[1,4]{1,0} %Arg_0.0), kind=kLoop,
calls=%fused_computation
}
)";
using PrepareReferenceModuleTest = HloTestBase;
TEST_F(PrepareReferenceModuleTest, PerformDespecialization) {
TF_ASSERT_OK_AND_ASSIGN(auto test_module,
ParseAndReturnVerifiedModule(kModuleStr));
TF_ASSERT_OK_AND_ASSIGN(
auto reference_module,
PrepareReferenceModule(*test_module, nullptr, {}, {},
false));
EXPECT_THAT(reference_module->ToString(),
Not(::testing::HasSubstr("fusion")));
}
TEST_F(PrepareReferenceModuleTest, SkipDespecialization) {
TF_ASSERT_OK_AND_ASSIGN(auto test_module,
ParseAndReturnVerifiedModule(kModuleStr));
TF_ASSERT_OK_AND_ASSIGN(
auto reference_module,
PrepareReferenceModule(*test_module, nullptr, {}, {},
true));
EXPECT_THAT(reference_module->ToString(), ::testing::HasSubstr("fusion"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/prepare_reference_module.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/prepare_reference_module_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3c493440-0107-46b2-90da-81f8f83f4b45 | cpp | google/cel-cpp | create_map_step | eval/eval/create_map_step.cc | eval/eval/create_map_step_test.cc | #include "eval/eval/create_map_step.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "common/casting.h"
#include "common/memory.h"
#include "common/type.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "eval/eval/attribute_trail.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "eval/eval/expression_step_base.h"
#include "internal/status_macros.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::Cast;
using ::cel::ErrorValue;
using ::cel::InstanceOf;
using ::cel::StructValueBuilderInterface;
using ::cel::UnknownValue;
using ::cel::Value;
class CreateStructStepForMap final : public ExpressionStepBase {
public:
CreateStructStepForMap(int64_t expr_id, size_t entry_count,
absl::flat_hash_set<int32_t> optional_indices)
: ExpressionStepBase(expr_id),
entry_count_(entry_count),
optional_indices_(std::move(optional_indices)) {}
absl::Status Evaluate(ExecutionFrame* frame) const override;
private:
absl::StatusOr<Value> DoEvaluate(ExecutionFrame* frame) const;
size_t entry_count_;
absl::flat_hash_set<int32_t> optional_indices_;
};
absl::StatusOr<Value> CreateStructStepForMap::DoEvaluate(
ExecutionFrame* frame) const {
auto args = frame->value_stack().GetSpan(2 * entry_count_);
if (frame->enable_unknowns()) {
absl::optional<UnknownValue> unknown_set =
frame->attribute_utility().IdentifyAndMergeUnknowns(
args, frame->value_stack().GetAttributeSpan(args.size()), true);
if (unknown_set.has_value()) {
return *unknown_set;
}
}
CEL_ASSIGN_OR_RETURN(
auto builder, frame->value_manager().NewMapValueBuilder(cel::MapType{}));
builder->Reserve(entry_count_);
for (size_t i = 0; i < entry_count_; i += 1) {
auto& map_key = args[2 * i];
CEL_RETURN_IF_ERROR(cel::CheckMapKey(map_key));
auto& map_value = args[(2 * i) + 1];
if (optional_indices_.contains(static_cast<int32_t>(i))) {
if (auto optional_map_value = cel::As<cel::OptionalValue>(map_value);
optional_map_value) {
if (!optional_map_value->HasValue()) {
continue;
}
auto key_status =
builder->Put(std::move(map_key), optional_map_value->Value());
if (!key_status.ok()) {
return frame->value_factory().CreateErrorValue(key_status);
}
} else {
return cel::TypeConversionError(map_value.DebugString(),
"optional_type")
.NativeValue();
}
} else {
auto key_status = builder->Put(std::move(map_key), std::move(map_value));
if (!key_status.ok()) {
return frame->value_factory().CreateErrorValue(key_status);
}
}
}
return std::move(*builder).Build();
}
absl::Status CreateStructStepForMap::Evaluate(ExecutionFrame* frame) const {
if (frame->value_stack().size() < 2 * entry_count_) {
return absl::InternalError("CreateStructStepForMap: stack underflow");
}
CEL_ASSIGN_OR_RETURN(auto result, DoEvaluate(frame));
frame->value_stack().PopAndPush(2 * entry_count_, std::move(result));
return absl::OkStatus();
}
class DirectCreateMapStep : public DirectExpressionStep {
public:
DirectCreateMapStep(std::vector<std::unique_ptr<DirectExpressionStep>> deps,
absl::flat_hash_set<int32_t> optional_indices,
int64_t expr_id)
: DirectExpressionStep(expr_id),
deps_(std::move(deps)),
optional_indices_(std::move(optional_indices)),
entry_count_(deps_.size() / 2) {}
absl::Status Evaluate(ExecutionFrameBase& frame, Value& result,
AttributeTrail& attribute_trail) const override;
private:
std::vector<std::unique_ptr<DirectExpressionStep>> deps_;
absl::flat_hash_set<int32_t> optional_indices_;
size_t entry_count_;
};
absl::Status DirectCreateMapStep::Evaluate(
ExecutionFrameBase& frame, Value& result,
AttributeTrail& attribute_trail) const {
Value key;
Value value;
AttributeTrail tmp_attr;
auto unknowns = frame.attribute_utility().CreateAccumulator();
CEL_ASSIGN_OR_RETURN(auto builder,
frame.value_manager().NewMapValueBuilder(
frame.value_manager().GetDynDynMapType()));
builder->Reserve(entry_count_);
for (size_t i = 0; i < entry_count_; i += 1) {
int map_key_index = 2 * i;
int map_value_index = map_key_index + 1;
CEL_RETURN_IF_ERROR(deps_[map_key_index]->Evaluate(frame, key, tmp_attr));
if (InstanceOf<ErrorValue>(key)) {
result = key;
return absl::OkStatus();
}
if (frame.unknown_processing_enabled()) {
if (InstanceOf<UnknownValue>(key)) {
unknowns.Add(Cast<UnknownValue>(key));
} else if (frame.attribute_utility().CheckForUnknownPartial(tmp_attr)) {
unknowns.Add(tmp_attr);
}
}
CEL_RETURN_IF_ERROR(
deps_[map_value_index]->Evaluate(frame, value, tmp_attr));
if (InstanceOf<ErrorValue>(value)) {
result = value;
return absl::OkStatus();
}
if (frame.unknown_processing_enabled()) {
if (InstanceOf<UnknownValue>(value)) {
unknowns.Add(Cast<UnknownValue>(value));
} else if (frame.attribute_utility().CheckForUnknownPartial(tmp_attr)) {
unknowns.Add(tmp_attr);
}
}
if (!unknowns.IsEmpty()) {
continue;
}
if (optional_indices_.contains(static_cast<int32_t>(i))) {
if (auto optional_map_value =
cel::As<cel::OptionalValue>(static_cast<const Value&>(value));
optional_map_value) {
if (!optional_map_value->HasValue()) {
continue;
}
auto key_status =
builder->Put(std::move(key), optional_map_value->Value());
if (!key_status.ok()) {
result = frame.value_manager().CreateErrorValue(key_status);
return absl::OkStatus();
}
continue;
}
return cel::TypeConversionError(value.DebugString(), "optional_type")
.NativeValue();
}
CEL_RETURN_IF_ERROR(cel::CheckMapKey(key));
auto put_status = builder->Put(std::move(key), std::move(value));
if (!put_status.ok()) {
result = frame.value_manager().CreateErrorValue(put_status);
return absl::OkStatus();
}
}
if (!unknowns.IsEmpty()) {
result = std::move(unknowns).Build();
return absl::OkStatus();
}
result = std::move(*builder).Build();
return absl::OkStatus();
}
}
std::unique_ptr<DirectExpressionStep> CreateDirectCreateMapStep(
std::vector<std::unique_ptr<DirectExpressionStep>> deps,
absl::flat_hash_set<int32_t> optional_indices, int64_t expr_id) {
return std::make_unique<DirectCreateMapStep>(
std::move(deps), std::move(optional_indices), expr_id);
}
absl::StatusOr<std::unique_ptr<ExpressionStep>> CreateCreateStructStepForMap(
size_t entry_count, absl::flat_hash_set<int32_t> optional_indices,
int64_t expr_id) {
return std::make_unique<CreateStructStepForMap>(expr_id, entry_count,
std::move(optional_indices));
}
} | #include "eval/eval/create_map_step.h"
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "base/ast_internal/expr.h"
#include "base/type_provider.h"
#include "eval/eval/cel_expression_flat_impl.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "eval/eval/ident_step.h"
#include "eval/public/activation.h"
#include "eval/public/cel_value.h"
#include "eval/public/unknown_set.h"
#include "eval/testutil/test_message.pb.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "runtime/runtime_options.h"
#include "google/protobuf/arena.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::TypeProvider;
using ::cel::ast_internal::Expr;
using ::google::protobuf::Arena;
absl::StatusOr<ExecutionPath> CreateStackMachineProgram(
const std::vector<std::pair<CelValue, CelValue>>& values,
Activation& activation) {
ExecutionPath path;
Expr expr1;
Expr expr0;
std::vector<Expr> exprs;
exprs.reserve(values.size() * 2);
int index = 0;
auto& create_struct = expr1.mutable_struct_expr();
for (const auto& item : values) {
std::string key_name = absl::StrCat("key", index);
std::string value_name = absl::StrCat("value", index);
auto& key_expr = exprs.emplace_back();
auto& key_ident = key_expr.mutable_ident_expr();
key_ident.set_name(key_name);
CEL_ASSIGN_OR_RETURN(auto step_key,
CreateIdentStep(key_ident, exprs.back().id()));
auto& value_expr = exprs.emplace_back();
auto& value_ident = value_expr.mutable_ident_expr();
value_ident.set_name(value_name);
CEL_ASSIGN_OR_RETURN(auto step_value,
CreateIdentStep(value_ident, exprs.back().id()));
path.push_back(std::move(step_key));
path.push_back(std::move(step_value));
activation.InsertValue(key_name, item.first);
activation.InsertValue(value_name, item.second);
create_struct.mutable_fields().emplace_back();
index++;
}
CEL_ASSIGN_OR_RETURN(
auto step1, CreateCreateStructStepForMap(values.size(), {}, expr1.id()));
path.push_back(std::move(step1));
return path;
}
absl::StatusOr<ExecutionPath> CreateRecursiveProgram(
const std::vector<std::pair<CelValue, CelValue>>& values,
Activation& activation) {
ExecutionPath path;
int index = 0;
std::vector<std::unique_ptr<DirectExpressionStep>> deps;
for (const auto& item : values) {
std::string key_name = absl::StrCat("key", index);
std::string value_name = absl::StrCat("value", index);
deps.push_back(CreateDirectIdentStep(key_name, -1));
deps.push_back(CreateDirectIdentStep(value_name, -1));
activation.InsertValue(key_name, item.first);
activation.InsertValue(value_name, item.second);
index++;
}
path.push_back(std::make_unique<WrappedDirectStep>(
CreateDirectCreateMapStep(std::move(deps), {}, -1), -1));
return path;
}
absl::StatusOr<CelValue> RunCreateMapExpression(
const std::vector<std::pair<CelValue, CelValue>>& values,
google::protobuf::Arena* arena, bool enable_unknowns, bool enable_recursive_program) {
Activation activation;
ExecutionPath path;
if (enable_recursive_program) {
CEL_ASSIGN_OR_RETURN(path, CreateRecursiveProgram(values, activation));
} else {
CEL_ASSIGN_OR_RETURN(path, CreateStackMachineProgram(values, activation));
}
cel::RuntimeOptions options;
if (enable_unknowns) {
options.unknown_processing = cel::UnknownProcessingOptions::kAttributeOnly;
}
CelExpressionFlatImpl cel_expr(
FlatExpression(std::move(path), 0,
TypeProvider::Builtin(), options));
return cel_expr.Evaluate(activation, arena);
}
class CreateMapStepTest
: public testing::TestWithParam<std::tuple<bool, bool>> {
public:
bool enable_unknowns() { return std::get<0>(GetParam()); }
bool enable_recursive_program() { return std::get<1>(GetParam()); }
absl::StatusOr<CelValue> RunMapExpression(
const std::vector<std::pair<CelValue, CelValue>>& values,
google::protobuf::Arena* arena) {
return RunCreateMapExpression(values, arena, enable_unknowns(),
enable_recursive_program());
}
};
TEST_P(CreateMapStepTest, TestCreateEmptyMap) {
Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, RunMapExpression({}, &arena));
ASSERT_TRUE(result.IsMap());
const CelMap* cel_map = result.MapOrDie();
ASSERT_EQ(cel_map->size(), 0);
}
TEST(CreateMapStepTest, TestMapCreateWithUnknown) {
Arena arena;
UnknownSet unknown_set;
std::vector<std::pair<CelValue, CelValue>> entries;
std::vector<std::string> kKeys = {"test2", "test1"};
entries.push_back(
{CelValue::CreateString(&kKeys[0]), CelValue::CreateInt64(2)});
entries.push_back({CelValue::CreateString(&kKeys[1]),
CelValue::CreateUnknownSet(&unknown_set)});
ASSERT_OK_AND_ASSIGN(CelValue result,
RunCreateMapExpression(entries, &arena, true, false));
ASSERT_TRUE(result.IsUnknownSet());
}
TEST(CreateMapStepTest, TestMapCreateWithUnknownRecursiveProgram) {
Arena arena;
UnknownSet unknown_set;
std::vector<std::pair<CelValue, CelValue>> entries;
std::vector<std::string> kKeys = {"test2", "test1"};
entries.push_back(
{CelValue::CreateString(&kKeys[0]), CelValue::CreateInt64(2)});
entries.push_back({CelValue::CreateString(&kKeys[1]),
CelValue::CreateUnknownSet(&unknown_set)});
ASSERT_OK_AND_ASSIGN(CelValue result,
RunCreateMapExpression(entries, &arena, true, true));
ASSERT_TRUE(result.IsUnknownSet());
}
TEST_P(CreateMapStepTest, TestCreateStringMap) {
Arena arena;
std::vector<std::pair<CelValue, CelValue>> entries;
std::vector<std::string> kKeys = {"test2", "test1"};
entries.push_back(
{CelValue::CreateString(&kKeys[0]), CelValue::CreateInt64(2)});
entries.push_back(
{CelValue::CreateString(&kKeys[1]), CelValue::CreateInt64(1)});
ASSERT_OK_AND_ASSIGN(CelValue result, RunMapExpression(entries, &arena));
ASSERT_TRUE(result.IsMap());
const CelMap* cel_map = result.MapOrDie();
ASSERT_EQ(cel_map->size(), 2);
auto lookup0 = cel_map->Get(&arena, CelValue::CreateString(&kKeys[0]));
ASSERT_TRUE(lookup0.has_value());
ASSERT_TRUE(lookup0->IsInt64()) << lookup0->DebugString();
EXPECT_EQ(lookup0->Int64OrDie(), 2);
auto lookup1 = cel_map->Get(&arena, CelValue::CreateString(&kKeys[1]));
ASSERT_TRUE(lookup1.has_value());
ASSERT_TRUE(lookup1->IsInt64());
EXPECT_EQ(lookup1->Int64OrDie(), 1);
}
INSTANTIATE_TEST_SUITE_P(CreateMapStep, CreateMapStepTest,
testing::Combine(testing::Bool(), testing::Bool()));
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/create_map_step.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/create_map_step_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
cfe580f4-0120-4a4a-afd0-a78f383ab876 | cpp | google/arolla | any_qtype | arolla/qtype/any_qtype.cc | arolla/qtype/any_qtype_test.cc | #include "arolla/qtype/any_qtype.h"
#include <typeinfo>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "arolla/qtype/simple_qtype.h"
#include "arolla/util/demangle.h"
namespace arolla {
absl::Status Any::InvalidCast(const std::type_info& t) const {
if (value_.has_value()) {
return absl::FailedPreconditionError(absl::StrFormat(
"can not cast Any(%s) to %s", TypeName(value_.type()), TypeName(t)));
} else {
return absl::FailedPreconditionError("can not cast an empty ::arolla::Any");
}
}
AROLLA_DEFINE_SIMPLE_QTYPE(ANY, Any);
} | #include "arolla/qtype/any_qtype.h"
#include <string>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/qtype/typed_value.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::HasSubstr;
TEST(AnyQType, AnyConstructorRegression) {
Any any;
Any copy_1 = any;
Any copy_2(any);
Any copy_3 = std::move(any);
Any copy_4(std::move(copy_2));
}
TEST(AnyQType, Any) {
int v1 = 5;
std::string v2 = "string";
TypedValue tv1 = TypedValue::FromValue(Any(v1));
TypedValue tv2 = TypedValue::FromValue(Any(v2));
TypedValue tv3 = TypedValue::FromValue(Any());
ASSERT_OK_AND_ASSIGN(const Any& a1, tv1.As<Any>());
ASSERT_OK_AND_ASSIGN(const Any& a2, tv2.As<Any>());
ASSERT_OK_AND_ASSIGN(const Any& a3, tv3.As<Any>());
EXPECT_THAT(a1.As<int>(), IsOkAndHolds(v1));
EXPECT_THAT(a1.As<double>(), StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("can not cast Any")));
ASSERT_OK_AND_ASSIGN(const std::string& v2_res, a2.As<std::string>());
EXPECT_EQ(v2, v2_res);
EXPECT_THAT(a2.As<double>(), StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("can not cast Any")));
EXPECT_THAT(a3.As<double>(),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("can not cast an empty ::arolla::Any")));
}
TEST(AnyQType, Fingerprint) {
Any a = Any(1);
Any b = Any(1);
Any a_copy = a;
EXPECT_NE(TypedValue::FromValue(a).GetFingerprint(),
TypedValue::FromValue(b).GetFingerprint());
EXPECT_EQ(TypedValue::FromValue(a).GetFingerprint(),
TypedValue::FromValue(a_copy).GetFingerprint());
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/any_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/any_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
17031416-18ad-44b1-bf16-d59ef841a76f | cpp | abseil/abseil-cpp | config | absl/base/config.h | absl/base/config_test.cc | #ifndef ABSL_BASE_CONFIG_H_
#define ABSL_BASE_CONFIG_H_
#include <limits.h>
#ifdef __cplusplus
#include <cstddef>
#endif
#if defined(_MSVC_LANG)
#define ABSL_INTERNAL_CPLUSPLUS_LANG _MSVC_LANG
#elif defined(__cplusplus)
#define ABSL_INTERNAL_CPLUSPLUS_LANG __cplusplus
#endif
#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
#include <version>
#endif
#if defined(__APPLE__)
#include <Availability.h>
#include <TargetConditionals.h>
#endif
#include "absl/base/options.h"
#include "absl/base/policy_checks.h"
#undef ABSL_LTS_RELEASE_VERSION
#undef ABSL_LTS_RELEASE_PATCH_LEVEL
#define ABSL_INTERNAL_DO_TOKEN_STR(x) #x
#define ABSL_INTERNAL_TOKEN_STR(x) ABSL_INTERNAL_DO_TOKEN_STR(x)
#if !defined(ABSL_OPTION_USE_INLINE_NAMESPACE) || \
!defined(ABSL_OPTION_INLINE_NAMESPACE_NAME)
#error options.h is misconfigured.
#endif
#if defined(__cplusplus) && ABSL_OPTION_USE_INLINE_NAMESPACE == 1
#define ABSL_INTERNAL_INLINE_NAMESPACE_STR \
ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME)
static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != '\0',
"options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must "
"not be empty.");
static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
ABSL_INTERNAL_INLINE_NAMESPACE_STR[1] != 'e' ||
ABSL_INTERNAL_INLINE_NAMESPACE_STR[2] != 'a' ||
ABSL_INTERNAL_INLINE_NAMESPACE_STR[3] != 'd' ||
ABSL_INTERNAL_INLINE_NAMESPACE_STR[4] != '\0',
"options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must "
"be changed to a new, unique identifier name.");
#endif
#if ABSL_OPTION_USE_INLINE_NAMESPACE == 0
#define ABSL_NAMESPACE_BEGIN
#define ABSL_NAMESPACE_END
#define ABSL_INTERNAL_C_SYMBOL(x) x
#elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1
#define ABSL_NAMESPACE_BEGIN \
inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME {
#define ABSL_NAMESPACE_END }
#define ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) x##_##v
#define ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, v) \
ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v)
#define ABSL_INTERNAL_C_SYMBOL(x) \
ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME)
#else
#error options.h is misconfigured.
#endif
#ifdef __has_builtin
#define ABSL_HAVE_BUILTIN(x) __has_builtin(x)
#else
#define ABSL_HAVE_BUILTIN(x) 0
#endif
#ifdef __has_feature
#define ABSL_HAVE_FEATURE(f) __has_feature(f)
#else
#define ABSL_HAVE_FEATURE(f) 0
#endif
#if defined(__GNUC__) && defined(__GNUC_MINOR__)
#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) \
(__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y))
#else
#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) 0
#endif
#if defined(__clang__) && defined(__clang_major__) && defined(__clang_minor__)
#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) \
(__clang_major__ > (x) || __clang_major__ == (x) && __clang_minor__ >= (y))
#else
#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) 0
#endif
#ifdef ABSL_HAVE_TLS
#error ABSL_HAVE_TLS cannot be directly set
#elif (defined(__linux__)) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS))
#define ABSL_HAVE_TLS 1
#endif
#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
#error ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set
#define ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1
#endif
#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
#error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set
#else
#define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1
#endif
#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
#error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot be directly set
#else
#define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1
#endif
#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE
#error ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE cannot be directly set
#define ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE 1
#endif
#ifdef ABSL_HAVE_THREAD_LOCAL
#error ABSL_HAVE_THREAD_LOCAL cannot be directly set
#else
#define ABSL_HAVE_THREAD_LOCAL 1
#endif
#ifdef ABSL_HAVE_INTRINSIC_INT128
#error ABSL_HAVE_INTRINSIC_INT128 cannot be directly set
#elif defined(__SIZEOF_INT128__)
#if (defined(__clang__) && !defined(_WIN32)) || \
(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ >= 9) || \
(defined(__GNUC__) && !defined(__clang__) && !defined(__CUDACC__))
#define ABSL_HAVE_INTRINSIC_INT128 1
#elif defined(__CUDACC__)
#if __CUDACC_VER__ >= 70000
#define ABSL_HAVE_INTRINSIC_INT128 1
#endif
#endif
#endif
#ifdef ABSL_HAVE_EXCEPTIONS
#error ABSL_HAVE_EXCEPTIONS cannot be directly set.
#elif ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(3, 6)
#if ABSL_HAVE_FEATURE(cxx_exceptions)
#define ABSL_HAVE_EXCEPTIONS 1
#endif
#elif defined(__clang__)
#if defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions)
#define ABSL_HAVE_EXCEPTIONS 1
#endif
#elif !(defined(__GNUC__) && !defined(__cpp_exceptions)) && \
!(defined(_MSC_VER) && !defined(_CPPUNWIND))
#define ABSL_HAVE_EXCEPTIONS 1
#endif
#ifdef ABSL_HAVE_MMAP
#error ABSL_HAVE_MMAP cannot be directly set
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
defined(_AIX) || defined(__ros__) || defined(__native_client__) || \
defined(__asmjs__) || defined(__EMSCRIPTEN__) || defined(__Fuchsia__) || \
defined(__sun) || defined(__myriad2__) || defined(__HAIKU__) || \
defined(__OpenBSD__) || defined(__NetBSD__) || defined(__QNX__) || \
defined(__VXWORKS__) || defined(__hexagon__)
#define ABSL_HAVE_MMAP 1
#endif
#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
#error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
defined(_AIX) || defined(__ros__) || defined(__OpenBSD__) || \
defined(__NetBSD__) || defined(__VXWORKS__)
#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1
#endif
#ifdef ABSL_HAVE_SCHED_GETCPU
#error ABSL_HAVE_SCHED_GETCPU cannot be directly set
#elif defined(__linux__)
#define ABSL_HAVE_SCHED_GETCPU 1
#endif
#ifdef ABSL_HAVE_SCHED_YIELD
#error ABSL_HAVE_SCHED_YIELD cannot be directly set
#elif defined(__linux__) || defined(__ros__) || defined(__native_client__) || \
defined(__VXWORKS__)
#define ABSL_HAVE_SCHED_YIELD 1
#endif
#ifdef ABSL_HAVE_SEMAPHORE_H
#error ABSL_HAVE_SEMAPHORE_H cannot be directly set
#elif defined(__linux__) || defined(__ros__) || defined(__VXWORKS__)
#define ABSL_HAVE_SEMAPHORE_H 1
#endif
#ifdef ABSL_HAVE_ALARM
#error ABSL_HAVE_ALARM cannot be directly set
#elif defined(__GOOGLE_GRTE_VERSION__)
#define ABSL_HAVE_ALARM 1
#elif defined(__GLIBC__)
#define ABSL_HAVE_ALARM 1
#elif defined(_MSC_VER)
#elif defined(__MINGW32__)
#elif defined(__EMSCRIPTEN__)
#elif defined(__wasi__)
#elif defined(__Fuchsia__)
#elif defined(__native_client__)
#elif defined(__hexagon__)
#else
#define ABSL_HAVE_ALARM 1
#endif
#if defined(ABSL_IS_BIG_ENDIAN)
#error "ABSL_IS_BIG_ENDIAN cannot be directly set."
#endif
#if defined(ABSL_IS_LITTLE_ENDIAN)
#error "ABSL_IS_LITTLE_ENDIAN cannot be directly set."
#endif
#if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
#define ABSL_IS_LITTLE_ENDIAN 1
#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define ABSL_IS_BIG_ENDIAN 1
#elif defined(_WIN32)
#define ABSL_IS_LITTLE_ENDIAN 1
#else
#error "absl endian detection needs to be set up for your compiler"
#endif
#if defined(__APPLE__) && \
((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) || \
(defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \
(defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \
__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \
(defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \
__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000))
#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1
#else
#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0
#endif
#ifdef ABSL_HAVE_STD_ANY
#error "ABSL_HAVE_STD_ANY cannot be directly set."
#elif defined(__cpp_lib_any) && __cpp_lib_any >= 201606L
#define ABSL_HAVE_STD_ANY 1
#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
!ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define ABSL_HAVE_STD_ANY 1
#endif
#ifdef ABSL_HAVE_STD_OPTIONAL
#error "ABSL_HAVE_STD_OPTIONAL cannot be directly set."
#elif defined(__cpp_lib_optional) && __cpp_lib_optional >= 202106L
#define ABSL_HAVE_STD_OPTIONAL 1
#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
!ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define ABSL_HAVE_STD_OPTIONAL 1
#endif
#ifdef ABSL_HAVE_STD_VARIANT
#error "ABSL_HAVE_STD_VARIANT cannot be directly set."
#elif defined(__cpp_lib_variant) && __cpp_lib_variant >= 201606L
#define ABSL_HAVE_STD_VARIANT 1
#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
!ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define ABSL_HAVE_STD_VARIANT 1
#endif
#ifdef ABSL_HAVE_STD_STRING_VIEW
#error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set."
#elif defined(__cpp_lib_string_view) && __cpp_lib_string_view >= 201606L
#define ABSL_HAVE_STD_STRING_VIEW 1
#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
#define ABSL_HAVE_STD_STRING_VIEW 1
#endif
#ifdef ABSL_HAVE_STD_ORDERING
#error "ABSL_HAVE_STD_ORDERING cannot be directly set."
#elif (defined(__cpp_lib_three_way_comparison) && \
__cpp_lib_three_way_comparison >= 201907L) || \
(defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L)
#define ABSL_HAVE_STD_ORDERING 1
#endif
#if !defined(ABSL_OPTION_USE_STD_ANY)
#error options.h is misconfigured.
#elif ABSL_OPTION_USE_STD_ANY == 0 || \
(ABSL_OPTION_USE_STD_ANY == 2 && !defined(ABSL_HAVE_STD_ANY))
#undef ABSL_USES_STD_ANY
#elif ABSL_OPTION_USE_STD_ANY == 1 || \
(ABSL_OPTION_USE_STD_ANY == 2 && defined(ABSL_HAVE_STD_ANY))
#define ABSL_USES_STD_ANY 1
#else
#error options.h is misconfigured.
#endif
#if !defined(ABSL_OPTION_USE_STD_OPTIONAL)
#error options.h is misconfigured.
#elif ABSL_OPTION_USE_STD_OPTIONAL == 0 || \
(ABSL_OPTION_USE_STD_OPTIONAL == 2 && !defined(ABSL_HAVE_STD_OPTIONAL))
#undef ABSL_USES_STD_OPTIONAL
#elif ABSL_OPTION_USE_STD_OPTIONAL == 1 || \
(ABSL_OPTION_USE_STD_OPTIONAL == 2 && defined(ABSL_HAVE_STD_OPTIONAL))
#define ABSL_USES_STD_OPTIONAL 1
#else
#error options.h is misconfigured.
#endif
#if !defined(ABSL_OPTION_USE_STD_VARIANT)
#error options.h is misconfigured.
#elif ABSL_OPTION_USE_STD_VARIANT == 0 || \
(ABSL_OPTION_USE_STD_VARIANT == 2 && !defined(ABSL_HAVE_STD_VARIANT))
#undef ABSL_USES_STD_VARIANT
#elif ABSL_OPTION_USE_STD_VARIANT == 1 || \
(ABSL_OPTION_USE_STD_VARIANT == 2 && defined(ABSL_HAVE_STD_VARIANT))
#define ABSL_USES_STD_VARIANT 1
#else
#error options.h is misconfigured.
#endif
#if !defined(ABSL_OPTION_USE_STD_STRING_VIEW)
#error options.h is misconfigured.
#elif ABSL_OPTION_USE_STD_STRING_VIEW == 0 || \
(ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \
!defined(ABSL_HAVE_STD_STRING_VIEW))
#undef ABSL_USES_STD_STRING_VIEW
#elif ABSL_OPTION_USE_STD_STRING_VIEW == 1 || \
(ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \
defined(ABSL_HAVE_STD_STRING_VIEW))
#define ABSL_USES_STD_STRING_VIEW 1
#else
#error options.h is misconfigured.
#endif
#if !defined(ABSL_OPTION_USE_STD_ORDERING)
#error options.h is misconfigured.
#elif ABSL_OPTION_USE_STD_ORDERING == 0 || \
(ABSL_OPTION_USE_STD_ORDERING == 2 && !defined(ABSL_HAVE_STD_ORDERING))
#undef ABSL_USES_STD_ORDERING
#elif ABSL_OPTION_USE_STD_ORDERING == 1 || \
(ABSL_OPTION_USE_STD_ORDERING == 2 && defined(ABSL_HAVE_STD_ORDERING))
#define ABSL_USES_STD_ORDERING 1
#else
#error options.h is misconfigured.
#endif
#if defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_DEBUG)
#define ABSL_INTERNAL_MSVC_2017_DBG_MODE
#endif
#if defined(_MSC_VER)
#if ABSL_OPTION_USE_INLINE_NAMESPACE == 0
#define ABSL_INTERNAL_MANGLED_NS "absl"
#define ABSL_INTERNAL_MANGLED_BACKREFERENCE "5"
#else
#define ABSL_INTERNAL_MANGLED_NS \
ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) "@absl"
#define ABSL_INTERNAL_MANGLED_BACKREFERENCE "6"
#endif
#endif
#if defined(_MSC_VER)
#if defined(ABSL_BUILD_DLL)
#define ABSL_DLL __declspec(dllexport)
#elif defined(ABSL_CONSUME_DLL)
#define ABSL_DLL __declspec(dllimport)
#else
#define ABSL_DLL
#endif
#else
#define ABSL_DLL
#endif
#if defined(_MSC_VER)
#if defined(ABSL_BUILD_TEST_DLL)
#define ABSL_TEST_DLL __declspec(dllexport)
#elif defined(ABSL_CONSUME_TEST_DLL)
#define ABSL_TEST_DLL __declspec(dllimport)
#else
#define ABSL_TEST_DLL
#endif
#else
#define ABSL_TEST_DLL
#endif
#ifdef ABSL_HAVE_MEMORY_SANITIZER
#error "ABSL_HAVE_MEMORY_SANITIZER cannot be directly set."
#elif !defined(__native_client__) && ABSL_HAVE_FEATURE(memory_sanitizer)
#define ABSL_HAVE_MEMORY_SANITIZER 1
#endif
#ifdef ABSL_HAVE_THREAD_SANITIZER
#error "ABSL_HAVE_THREAD_SANITIZER cannot be directly set."
#elif defined(__SANITIZE_THREAD__)
#define ABSL_HAVE_THREAD_SANITIZER 1
#elif ABSL_HAVE_FEATURE(thread_sanitizer)
#define ABSL_HAVE_THREAD_SANITIZER 1
#endif
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
#error "ABSL_HAVE_ADDRESS_SANITIZER cannot be directly set."
#elif defined(__SANITIZE_ADDRESS__)
#define ABSL_HAVE_ADDRESS_SANITIZER 1
#elif ABSL_HAVE_FEATURE(address_sanitizer)
#define ABSL_HAVE_ADDRESS_SANITIZER 1
#endif
#ifdef ABSL_HAVE_HWADDRESS_SANITIZER
#error "ABSL_HAVE_HWADDRESS_SANITIZER cannot be directly set."
#elif defined(__SANITIZE_HWADDRESS__)
#define ABSL_HAVE_HWADDRESS_SANITIZER 1
#elif ABSL_HAVE_FEATURE(hwaddress_sanitizer)
#define ABSL_HAVE_HWADDRESS_SANITIZER 1
#endif
#ifdef ABSL_HAVE_DATAFLOW_SANITIZER
#error "ABSL_HAVE_DATAFLOW_SANITIZER cannot be directly set."
#elif defined(DATAFLOW_SANITIZER)
#define ABSL_HAVE_DATAFLOW_SANITIZER 1
#elif ABSL_HAVE_FEATURE(dataflow_sanitizer)
#define ABSL_HAVE_DATAFLOW_SANITIZER 1
#endif
#ifdef ABSL_HAVE_LEAK_SANITIZER
#error "ABSL_HAVE_LEAK_SANITIZER cannot be directly set."
#elif defined(LEAK_SANITIZER)
#define ABSL_HAVE_LEAK_SANITIZER 1
#elif ABSL_HAVE_FEATURE(leak_sanitizer)
#define ABSL_HAVE_LEAK_SANITIZER 1
#elif defined(ABSL_HAVE_ADDRESS_SANITIZER)
#define ABSL_HAVE_LEAK_SANITIZER 1
#endif
#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
#error "ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION cannot be directly set."
#elif defined(__cpp_deduction_guides)
#define ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1
#endif
#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
#define ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL 1
#endif
#ifdef ABSL_INTERNAL_HAS_RTTI
#error ABSL_INTERNAL_HAS_RTTI cannot be directly set
#elif ABSL_HAVE_FEATURE(cxx_rtti)
#define ABSL_INTERNAL_HAS_RTTI 1
#elif defined(__GNUC__) && defined(__GXX_RTTI)
#define ABSL_INTERNAL_HAS_RTTI 1
#elif defined(_MSC_VER) && defined(_CPPRTTI)
#define ABSL_INTERNAL_HAS_RTTI 1
#elif !defined(__GNUC__) && !defined(_MSC_VER)
#define ABSL_INTERNAL_HAS_RTTI 1
#endif
#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
#error ABSL_INTERNAL_HAS_CXA_DEMANGLE cannot be directly set
#elif defined(OS_ANDROID) && (defined(__i386__) || defined(__x86_64__))
#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 0
#elif defined(__GNUC__)
#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 1
#elif defined(__clang__) && !defined(_MSC_VER)
#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 1
#endif
#ifdef ABSL_INTERNAL_HAVE_SSE
#error ABSL_INTERNAL_HAVE_SSE cannot be directly set
#elif defined(__SSE__)
#define ABSL_INTERNAL_HAVE_SSE 1
#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)) && \
!defined(_M_ARM64EC)
#define ABSL_INTERNAL_HAVE_SSE 1
#endif
#ifdef ABSL_INTERNAL_HAVE_SSE2
#error ABSL_INTERNAL_HAVE_SSE2 cannot be directly set
#elif defined(__SSE2__)
#define ABSL_INTERNAL_HAVE_SSE2 1
#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2)) && \
!defined(_M_ARM64EC)
#define ABSL_INTERNAL_HAVE_SSE2 1
#endif
#ifdef ABSL_INTERNAL_HAVE_SSSE3
#error ABSL_INTERNAL_HAVE_SSSE3 cannot be directly set
#elif defined(__SSSE3__)
#define ABSL_INTERNAL_HAVE_SSSE3 1
#endif
#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
#error ABSL_INTERNAL_HAVE_ARM_NEON cannot be directly set
#elif defined(__ARM_NEON) && !(defined(__NVCC__) && defined(__CUDACC__))
#define ABSL_INTERNAL_HAVE_ARM_NEON 1
#endif
#ifdef ABSL_HAVE_CONSTANT_EVALUATED
#error ABSL_HAVE_CONSTANT_EVALUATED cannot be directly set
#endif
#ifdef __cpp_lib_is_constant_evaluated
#define ABSL_HAVE_CONSTANT_EVALUATED 1
#elif ABSL_HAVE_BUILTIN(__builtin_is_constant_evaluated)
#define ABSL_HAVE_CONSTANT_EVALUATED 1
#endif
#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
#define ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17 constexpr
#else
#define ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
#endif
#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
#define ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 constexpr
#else
#define ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
#endif
#ifdef ABSL_INTERNAL_EMSCRIPTEN_VERSION
#error ABSL_INTERNAL_EMSCRIPTEN_VERSION cannot be directly set
#endif
#ifdef __EMSCRIPTEN__
#include <emscripten/version.h>
#ifdef __EMSCRIPTEN_major__
#if __EMSCRIPTEN_minor__ >= 1000
#error __EMSCRIPTEN_minor__ is too big to fit in ABSL_INTERNAL_EMSCRIPTEN_VERSION
#endif
#if __EMSCRIPTEN_tiny__ >= 1000
#error __EMSCRIPTEN_tiny__ is too big to fit in ABSL_INTERNAL_EMSCRIPTEN_VERSION
#endif
#define ABSL_INTERNAL_EMSCRIPTEN_VERSION \
((__EMSCRIPTEN_major__) * 1000000 + (__EMSCRIPTEN_minor__) * 1000 + \
(__EMSCRIPTEN_tiny__))
#endif
#endif
#endif | #include "absl/base/config.h"
#include <cstdint>
#include "gtest/gtest.h"
#include "absl/synchronization/internal/thread_pool.h"
namespace {
TEST(ConfigTest, Endianness) {
union {
uint32_t value;
uint8_t data[sizeof(uint32_t)];
} number;
number.data[0] = 0x00;
number.data[1] = 0x01;
number.data[2] = 0x02;
number.data[3] = 0x03;
#if defined(ABSL_IS_LITTLE_ENDIAN) && defined(ABSL_IS_BIG_ENDIAN)
#error Both ABSL_IS_LITTLE_ENDIAN and ABSL_IS_BIG_ENDIAN are defined
#elif defined(ABSL_IS_LITTLE_ENDIAN)
EXPECT_EQ(UINT32_C(0x03020100), number.value);
#elif defined(ABSL_IS_BIG_ENDIAN)
EXPECT_EQ(UINT32_C(0x00010203), number.value);
#else
#error Unknown endianness
#endif
}
#if defined(ABSL_HAVE_THREAD_LOCAL)
TEST(ConfigTest, ThreadLocal) {
static thread_local int mine_mine_mine = 16;
EXPECT_EQ(16, mine_mine_mine);
{
absl::synchronization_internal::ThreadPool pool(1);
pool.Schedule([&] {
EXPECT_EQ(16, mine_mine_mine);
mine_mine_mine = 32;
EXPECT_EQ(32, mine_mine_mine);
});
}
EXPECT_EQ(16, mine_mine_mine);
}
#endif
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/config.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/config_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
a631fdfb-c701-42a3-84f9-0c1b71de9f2c | cpp | tensorflow/tensorflow | count_ops | tensorflow/core/ops/count_ops.cc | tensorflow/core/kernels/count_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
Status DenseCountSparseOutputShapeFn(InferenceContext *c) {
auto values = c->input(0);
auto weights = c->input(1);
ShapeHandle output;
auto num_weights = c->NumElements(weights);
if (c->ValueKnown(num_weights) && c->Value(num_weights) == 0) {
output = values;
} else {
TF_RETURN_IF_ERROR(c->Merge(weights, values, &output));
}
auto rank = c->Rank(output);
auto nvals = c->UnknownDim();
c->set_output(0, c->Matrix(nvals, rank));
c->set_output(1, c->Vector(nvals));
c->set_output(2, c->Vector(rank));
return absl::OkStatus();
}
Status SparseCountSparseOutputShapeFn(InferenceContext *c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused));
auto rank = c->Dim(c->input(0), 1);
auto nvals = c->UnknownDim();
c->set_output(0, c->Matrix(nvals, rank));
c->set_output(1, c->Vector(nvals));
c->set_output(2, c->Vector(rank));
return absl::OkStatus();
}
Status RaggedCountSparseOutputShapeFn(InferenceContext *c) {
int32_t rank = c->Rank(c->input(1));
if (rank != c->kUnknownRank) {
++rank;
}
auto nvals = c->UnknownDim();
c->set_output(0, c->Matrix(nvals, rank));
c->set_output(1, c->Vector(nvals));
c->set_output(2, c->Vector(rank));
return absl::OkStatus();
}
REGISTER_OP("DenseCountSparseOutput")
.Input("values: T")
.Input("weights: output_type")
.Attr("T: {int32, int64}")
.Attr("minlength: int >= -1 = -1")
.Attr("maxlength: int >= -1 = -1")
.Attr("binary_output: bool")
.Attr("output_type: {int32, int64, float, double}")
.SetShapeFn(DenseCountSparseOutputShapeFn)
.Output("output_indices: int64")
.Output("output_values: output_type")
.Output("output_dense_shape: int64");
REGISTER_OP("SparseCountSparseOutput")
.Input("indices: int64")
.Input("values: T")
.Input("dense_shape: int64")
.Input("weights: output_type")
.Attr("T: {int32, int64}")
.Attr("minlength: int >= -1 = -1")
.Attr("maxlength: int >= -1 = -1")
.Attr("binary_output: bool")
.Attr("output_type: {int32, int64, float, double}")
.SetShapeFn(SparseCountSparseOutputShapeFn)
.Output("output_indices: int64")
.Output("output_values: output_type")
.Output("output_dense_shape: int64");
REGISTER_OP("RaggedCountSparseOutput")
.Input("splits: int64")
.Input("values: T")
.Input("weights: output_type")
.Attr("T: {int32, int64}")
.Attr("minlength: int >= -1 = -1")
.Attr("maxlength: int >= -1 = -1")
.Attr("binary_output: bool")
.Attr("output_type: {int32, int64, float, double}")
.SetShapeFn(RaggedCountSparseOutputShapeFn)
.Output("output_indices: int64")
.Output("output_values: output_type")
.Output("output_dense_shape: int64");
} | #include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST_F(OpsTestBase, DenseCountSparseOutputShapeFn) {
ShapeInferenceTestOp op("DenseCountSparseOutput");
INFER_OK(op, "[?];?", "[?,1];[?];[1]");
INFER_OK(op, "[?,?];?", "[?,2];[?];[2]");
}
TEST_F(OpsTestBase, SparseCountSparseOutputShapeFn) {
ShapeInferenceTestOp op("SparseCountSparseOutput");
INFER_OK(op, "[?,1];?;?;?", "[?,d0_1];[?];[d0_1]");
INFER_OK(op, "[?,2];?;?;?", "[?,d0_1];[?];[d0_1]");
}
TEST_F(OpsTestBase, RaggedCountSparseOutputShapeFn) {
ShapeInferenceTestOp op("RaggedCountSparseOutput");
INFER_OK(op, "?;[?];?", "[?,2];[?];[2]");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/count_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/count_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6bf951a5-3117-4d7c-af58-2d619f8af77e | cpp | tensorflow/tensorflow | bit_cast | third_party/xla/xla/bit_cast.h | third_party/xla/xla/bit_cast_test.cc | #ifndef XLA_BIT_CAST_H_
#define XLA_BIT_CAST_H_
#include <cstdint>
#include "absl/base/casts.h"
#include "Eigen/Core"
#include "xla/types.h"
#include "tsl/platform/bfloat16.h"
namespace xla {
template <typename T, typename U>
T BitCast(U src) {
static_assert(sizeof(T) == sizeof(U), "sizes don't match");
return absl::bit_cast<T>(src);
}
template <>
inline tsl::bfloat16 BitCast<tsl::bfloat16, uint16_t>(uint16_t src) {
return Eigen::numext::bit_cast<tsl::bfloat16>(src);
}
template <>
inline uint16_t BitCast<uint16_t, tsl::bfloat16>(tsl::bfloat16 src) {
return Eigen::numext::bit_cast<uint16_t>(src);
}
template <>
inline Eigen::half BitCast<Eigen::half, uint16_t>(uint16_t src) {
return Eigen::numext::bit_cast<Eigen::half>(src);
}
template <>
inline uint16_t BitCast<uint16_t, Eigen::half>(Eigen::half src) {
return Eigen::numext::bit_cast<uint16_t>(src);
}
}
#endif | #include "xla/bit_cast.h"
#include <cstdint>
#include "Eigen/Core"
#include "xla/test.h"
#include "tsl/platform/bfloat16.h"
namespace xla {
namespace {
using ::Eigen::half;
using ::tsl::bfloat16;
TEST(BitCastTest, BackAndForth) {
for (uint32_t n = 0; n < 0x10000; ++n) {
uint16_t initial_rep = n;
bfloat16 float_val = BitCast<bfloat16>(initial_rep);
uint16_t final_rep = BitCast<uint16_t>(float_val);
EXPECT_EQ(initial_rep, final_rep);
}
for (uint32_t n = 0; n < 0x10000; ++n) {
uint16_t initial_rep = n;
half float_val = BitCast<half>(initial_rep);
uint16_t final_rep = BitCast<uint16_t>(float_val);
EXPECT_EQ(initial_rep, final_rep);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/bit_cast.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/bit_cast_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
053b8ccf-9c89-4d6d-8a38-c5c1b6fa7759 | cpp | google/quiche | netlink | quiche/quic/qbone/platform/netlink.cc | quiche/quic/qbone/platform/netlink_test.cc | #include "quiche/quic/qbone/platform/netlink.h"
#include <linux/fib_rules.h>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/strings/str_cat.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/qbone/platform/rtnetlink_message.h"
#include "quiche/quic/qbone/qbone_constants.h"
namespace quic {
Netlink::Netlink(KernelInterface* kernel) : kernel_(kernel) {
seq_ = QuicRandom::GetInstance()->RandUint64();
}
Netlink::~Netlink() { CloseSocket(); }
void Netlink::ResetRecvBuf(size_t size) {
if (size != 0) {
recvbuf_ = std::make_unique<char[]>(size);
} else {
recvbuf_ = nullptr;
}
recvbuf_length_ = size;
}
bool Netlink::OpenSocket() {
if (socket_fd_ >= 0) {
return true;
}
socket_fd_ = kernel_->socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if (socket_fd_ < 0) {
QUIC_PLOG(ERROR) << "can't open netlink socket";
return false;
}
QUIC_LOG(INFO) << "Opened a new netlink socket fd = " << socket_fd_;
sockaddr_nl myaddr;
memset(&myaddr, 0, sizeof(myaddr));
myaddr.nl_family = AF_NETLINK;
if (kernel_->bind(socket_fd_, reinterpret_cast<struct sockaddr*>(&myaddr),
sizeof(myaddr)) < 0) {
QUIC_LOG(INFO) << "can't bind address to socket";
CloseSocket();
return false;
}
return true;
}
void Netlink::CloseSocket() {
if (socket_fd_ >= 0) {
QUIC_LOG(INFO) << "Closing netlink socket fd = " << socket_fd_;
kernel_->close(socket_fd_);
}
ResetRecvBuf(0);
socket_fd_ = -1;
}
namespace {
class LinkInfoParser : public NetlinkParserInterface {
public:
LinkInfoParser(std::string interface_name, Netlink::LinkInfo* link_info)
: interface_name_(std::move(interface_name)), link_info_(link_info) {}
void Run(struct nlmsghdr* netlink_message) override {
if (netlink_message->nlmsg_type != RTM_NEWLINK) {
QUIC_LOG(INFO) << absl::StrCat(
"Unexpected nlmsg_type: ", netlink_message->nlmsg_type,
" expected: ", RTM_NEWLINK);
return;
}
struct ifinfomsg* interface_info =
reinterpret_cast<struct ifinfomsg*>(NLMSG_DATA(netlink_message));
if (interface_info->ifi_family != AF_UNSPEC) {
QUIC_LOG(INFO) << absl::StrCat(
"Unexpected ifi_family: ", interface_info->ifi_family,
" expected: ", AF_UNSPEC);
return;
}
char hardware_address[kHwAddrSize];
size_t hardware_address_length = 0;
char broadcast_address[kHwAddrSize];
size_t broadcast_address_length = 0;
std::string name;
struct rtattr* rta;
int payload_length = IFLA_PAYLOAD(netlink_message);
for (rta = IFLA_RTA(interface_info); RTA_OK(rta, payload_length);
rta = RTA_NEXT(rta, payload_length)) {
int attribute_length;
switch (rta->rta_type) {
case IFLA_ADDRESS: {
attribute_length = RTA_PAYLOAD(rta);
if (attribute_length > kHwAddrSize) {
QUIC_VLOG(2) << "IFLA_ADDRESS too long: " << attribute_length;
break;
}
memmove(hardware_address, RTA_DATA(rta), attribute_length);
hardware_address_length = attribute_length;
break;
}
case IFLA_BROADCAST: {
attribute_length = RTA_PAYLOAD(rta);
if (attribute_length > kHwAddrSize) {
QUIC_VLOG(2) << "IFLA_BROADCAST too long: " << attribute_length;
break;
}
memmove(broadcast_address, RTA_DATA(rta), attribute_length);
broadcast_address_length = attribute_length;
break;
}
case IFLA_IFNAME: {
name = std::string(reinterpret_cast<char*>(RTA_DATA(rta)),
RTA_PAYLOAD(rta));
name = name.substr(0, name.find('\0'));
break;
}
}
}
QUIC_VLOG(2) << "interface name: " << name
<< ", index: " << interface_info->ifi_index;
if (name == interface_name_) {
link_info_->index = interface_info->ifi_index;
link_info_->type = interface_info->ifi_type;
link_info_->hardware_address_length = hardware_address_length;
if (hardware_address_length > 0) {
memmove(&link_info_->hardware_address, hardware_address,
hardware_address_length);
}
link_info_->broadcast_address_length = broadcast_address_length;
if (broadcast_address_length > 0) {
memmove(&link_info_->broadcast_address, broadcast_address,
broadcast_address_length);
}
found_link_ = true;
}
}
bool found_link() { return found_link_; }
private:
const std::string interface_name_;
Netlink::LinkInfo* const link_info_;
bool found_link_ = false;
};
}
bool Netlink::GetLinkInfo(const std::string& interface_name,
LinkInfo* link_info) {
auto message = LinkMessage::New(RtnetlinkMessage::Operation::GET,
NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST,
seq_, getpid(), nullptr);
if (!Send(message.BuildIoVec().get(), message.IoVecSize())) {
QUIC_LOG(ERROR) << "send failed.";
return false;
}
LinkInfoParser parser(interface_name, link_info);
if (!Recv(seq_++, &parser)) {
QUIC_LOG(ERROR) << "recv failed.";
return false;
}
return parser.found_link();
}
namespace {
class LocalAddressParser : public NetlinkParserInterface {
public:
LocalAddressParser(int interface_index, uint8_t unwanted_flags,
std::vector<Netlink::AddressInfo>* local_addresses,
int* num_ipv6_nodad_dadfailed_addresses)
: interface_index_(interface_index),
unwanted_flags_(unwanted_flags),
local_addresses_(local_addresses),
num_ipv6_nodad_dadfailed_addresses_(
num_ipv6_nodad_dadfailed_addresses) {}
void Run(struct nlmsghdr* netlink_message) override {
if (netlink_message->nlmsg_type != RTM_NEWADDR) {
QUIC_LOG(INFO) << "Unexpected nlmsg_type: " << netlink_message->nlmsg_type
<< " expected: " << RTM_NEWADDR;
return;
}
struct ifaddrmsg* interface_address =
reinterpret_cast<struct ifaddrmsg*>(NLMSG_DATA(netlink_message));
if (interface_address->ifa_family != AF_INET &&
interface_address->ifa_family != AF_INET6) {
QUIC_VLOG(2) << absl::StrCat("uninteresting ifa family: ",
interface_address->ifa_family);
return;
}
if (num_ipv6_nodad_dadfailed_addresses_ != nullptr &&
(interface_address->ifa_flags & IFA_F_NODAD) &&
(interface_address->ifa_flags & IFA_F_DADFAILED)) {
++(*num_ipv6_nodad_dadfailed_addresses_);
}
uint8_t unwanted_flags = interface_address->ifa_flags & unwanted_flags_;
if (unwanted_flags != 0) {
QUIC_VLOG(2) << absl::StrCat("unwanted ifa flags: ", unwanted_flags);
return;
}
struct rtattr* rta;
int payload_length = IFA_PAYLOAD(netlink_message);
Netlink::AddressInfo address_info;
for (rta = IFA_RTA(interface_address); RTA_OK(rta, payload_length);
rta = RTA_NEXT(rta, payload_length)) {
if (rta->rta_type != IFA_LOCAL && rta->rta_type != IFA_ADDRESS) {
QUIC_VLOG(2) << "Ignoring uninteresting rta_type: " << rta->rta_type;
continue;
}
switch (interface_address->ifa_family) {
case AF_INET:
ABSL_FALLTHROUGH_INTENDED;
case AF_INET6:
if (RTA_PAYLOAD(rta) == sizeof(struct in_addr) ||
RTA_PAYLOAD(rta) == sizeof(struct in6_addr)) {
auto* raw_ip = reinterpret_cast<char*>(RTA_DATA(rta));
if (rta->rta_type == IFA_LOCAL) {
address_info.local_address.FromPackedString(raw_ip,
RTA_PAYLOAD(rta));
} else {
address_info.interface_address.FromPackedString(raw_ip,
RTA_PAYLOAD(rta));
}
}
break;
default:
QUIC_LOG(ERROR) << absl::StrCat("Unknown address family: ",
interface_address->ifa_family);
}
}
QUIC_VLOG(2) << "local_address: " << address_info.local_address.ToString()
<< " interface_address: "
<< address_info.interface_address.ToString()
<< " index: " << interface_address->ifa_index;
if (interface_address->ifa_index != interface_index_) {
return;
}
address_info.prefix_length = interface_address->ifa_prefixlen;
address_info.scope = interface_address->ifa_scope;
if (address_info.local_address.IsInitialized() ||
address_info.interface_address.IsInitialized()) {
local_addresses_->push_back(address_info);
}
}
private:
const int interface_index_;
const uint8_t unwanted_flags_;
std::vector<Netlink::AddressInfo>* const local_addresses_;
int* const num_ipv6_nodad_dadfailed_addresses_;
};
}
bool Netlink::GetAddresses(int interface_index, uint8_t unwanted_flags,
std::vector<AddressInfo>* addresses,
int* num_ipv6_nodad_dadfailed_addresses) {
auto message = AddressMessage::New(RtnetlinkMessage::Operation::GET,
NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST,
seq_, getpid(), nullptr);
if (!Send(message.BuildIoVec().get(), message.IoVecSize())) {
QUIC_LOG(ERROR) << "send failed.";
return false;
}
addresses->clear();
if (num_ipv6_nodad_dadfailed_addresses != nullptr) {
*num_ipv6_nodad_dadfailed_addresses = 0;
}
LocalAddressParser parser(interface_index, unwanted_flags, addresses,
num_ipv6_nodad_dadfailed_addresses);
if (!Recv(seq_++, &parser)) {
QUIC_LOG(ERROR) << "recv failed";
return false;
}
return true;
}
namespace {
class UnknownParser : public NetlinkParserInterface {
public:
void Run(struct nlmsghdr* netlink_message) override {
QUIC_LOG(INFO) << "nlmsg reply type: " << netlink_message->nlmsg_type;
}
};
}
bool Netlink::ChangeLocalAddress(
uint32_t interface_index, Verb verb, const QuicIpAddress& address,
uint8_t prefix_length, uint8_t ifa_flags, uint8_t ifa_scope,
const std::vector<struct rtattr*>& additional_attributes) {
if (verb == Verb::kReplace) {
return false;
}
auto operation = verb == Verb::kAdd ? RtnetlinkMessage::Operation::NEW
: RtnetlinkMessage::Operation::DEL;
uint8_t address_family;
if (address.address_family() == IpAddressFamily::IP_V4) {
address_family = AF_INET;
} else if (address.address_family() == IpAddressFamily::IP_V6) {
address_family = AF_INET6;
} else {
return false;
}
struct ifaddrmsg address_header = {address_family, prefix_length, ifa_flags,
ifa_scope, interface_index};
auto message = AddressMessage::New(operation, NLM_F_REQUEST | NLM_F_ACK, seq_,
getpid(), &address_header);
for (const auto& attribute : additional_attributes) {
if (attribute->rta_type == IFA_LOCAL) {
continue;
}
message.AppendAttribute(attribute->rta_type, RTA_DATA(attribute),
RTA_PAYLOAD(attribute));
}
message.AppendAttribute(IFA_LOCAL, address.ToPackedString().c_str(),
address.ToPackedString().size());
if (!Send(message.BuildIoVec().get(), message.IoVecSize())) {
QUIC_LOG(ERROR) << "send failed";
return false;
}
UnknownParser parser;
if (!Recv(seq_++, &parser)) {
QUIC_LOG(ERROR) << "receive failed.";
return false;
}
return true;
}
namespace {
class RoutingRuleParser : public NetlinkParserInterface {
public:
explicit RoutingRuleParser(std::vector<Netlink::RoutingRule>* routing_rules)
: routing_rules_(routing_rules) {}
void Run(struct nlmsghdr* netlink_message) override {
if (netlink_message->nlmsg_type != RTM_NEWROUTE) {
QUIC_LOG(WARNING) << absl::StrCat(
"Unexpected nlmsg_type: ", netlink_message->nlmsg_type,
" expected: ", RTM_NEWROUTE);
return;
}
auto* route = reinterpret_cast<struct rtmsg*>(NLMSG_DATA(netlink_message));
int payload_length = RTM_PAYLOAD(netlink_message);
if (route->rtm_family != AF_INET && route->rtm_family != AF_INET6) {
QUIC_VLOG(2) << absl::StrCat("Uninteresting family: ", route->rtm_family);
return;
}
Netlink::RoutingRule rule;
rule.scope = route->rtm_scope;
rule.table = route->rtm_table;
struct rtattr* rta;
for (rta = RTM_RTA(route); RTA_OK(rta, payload_length);
rta = RTA_NEXT(rta, payload_length)) {
switch (rta->rta_type) {
case RTA_TABLE: {
rule.table = *reinterpret_cast<uint32_t*>(RTA_DATA(rta));
break;
}
case RTA_DST: {
QuicIpAddress destination;
destination.FromPackedString(reinterpret_cast<char*> RTA_DATA(rta),
RTA_PAYLOAD(rta));
rule.destination_subnet = IpRange(destination, route->rtm_dst_len);
break;
}
case RTA_PREFSRC: {
QuicIpAddress preferred_source;
rule.preferred_source.FromPackedString(
reinterpret_cast<char*> RTA_DATA(rta), RTA_PAYLOAD(rta));
break;
}
case RTA_OIF: {
rule.out_interface = *reinterpret_cast<int*>(RTA_DATA(rta));
break;
}
default: {
QUIC_VLOG(2) << absl::StrCat("Uninteresting attribute: ",
rta->rta_type);
}
}
}
routing_rules_->push_back(rule);
}
private:
std::vector<Netlink::RoutingRule>* routing_rules_;
};
}
bool Netlink::GetRouteInfo(std::vector<Netlink::RoutingRule>* routing_rules) {
rtmsg route_message{};
route_message.rtm_table = RT_TABLE_MAIN;
auto message = RouteMessage::New(RtnetlinkMessage::Operation::GET,
NLM_F_REQUEST | NLM_F_ROOT | NLM_F_MATCH,
seq_, getpid(), &route_message);
if (!Send(message.BuildIoVec().get(), message.IoVecSize())) {
QUIC_LOG(ERROR) << "send failed";
return false;
}
RoutingRuleParser parser(routing_rules);
if (!Recv(seq_++, &parser)) {
QUIC_LOG(ERROR) << "recv failed";
return false;
}
return true;
}
bool Netlink::ChangeRoute(Netlink::Verb verb, uint32_t table,
const IpRange& destination_subnet, uint8_t scope,
QuicIpAddress preferred_source,
int32_t interface_index) {
if (!destination_subnet.prefix().IsInitialized()) {
return false;
}
if (destination_subnet.address_family() != IpAddressFamily::IP_V4 &&
destination_subnet.address_family() != IpAddressFamily::IP_V6) {
return false;
}
if (preferred_source.IsInitialized() &&
preferred_source.address_family() !=
destination_subnet.address_family()) {
return false;
}
RtnetlinkMessage::Operation operation;
uint16_t flags = NLM_F_REQUEST | NLM_F_ACK;
switch (verb) {
case Verb::kAdd:
operation = RtnetlinkMessage::Operation::NEW;
flags |= NLM_F_EXCL | NLM_F_CREATE;
break;
case Verb::kRemove:
operation = RtnetlinkMessage::Operation::DEL;
break;
case Verb::kReplace:
operation = RtnetlinkMessage::Operation::NEW;
flags |= NLM_F_REPLACE | NLM_F_CREATE;
break;
}
struct rtmsg route_message;
memset(&route_message, 0, sizeof(route_message));
route_message.rtm_family =
destination_subnet.address_family() == IpAddressFamily::IP_V4 ? AF_INET
: AF_INET6;
route_message.rtm_dst_len = destination_subnet.prefix_length();
route_message.rtm_src_len = 0;
route_message.rtm_table = RT_TABLE_MAIN;
route_message.rtm_protocol =
verb == Verb::kRemove ? RTPROT_UNSPEC : RTPROT_STATIC;
route_message.rtm_scope = scope;
route_message.rtm_type = RTN_UNICAST;
auto message =
RouteMessage::New(operation, flags, seq_, getpid(), &route_message);
message.AppendAttribute(RTA_TABLE, &table, sizeof(table));
message.AppendAttribute(RTA_OIF, &interface_index, sizeof(interface_index));
message.AppendAttribute(
RTA_DST,
reinterpret_cast<const void*>(
destination_subnet.prefix().ToPackedString().c_str()),
destination_subnet.prefix().ToPackedString().size());
if (preferred_source.IsInitialized()) {
auto src_str = preferred_source.ToPackedString();
message.AppendAttribute(RTA_PREFSRC,
reinterpret_cast<const void*>(src_str.c_str()),
src_str.size());
}
if (verb != Verb::kRemove) {
auto gateway_str = QboneConstants::GatewayAddress()->ToPackedString();
message.AppendAttribute(RTA_GATEWAY,
reinterpret_cast<const void*>(gateway_str.c_str()),
gateway_str.size());
}
if (!Send(message.BuildIoVec().get(), message.IoVecSize())) {
QUIC_LOG(ERROR) << "send failed";
return false;
}
UnknownParser parser;
if (!Recv(seq_++, &parser)) {
QUIC_LOG(ERROR) << "receive failed.";
return false;
}
return true;
}
namespace {
class IpRuleParser : public NetlinkParserInterface {
public:
explicit IpRuleParser(std::vector<Netlink::IpRule>* ip_rules)
: ip_rules_(ip_rules) {}
void Run(struct nlmsghdr* netlink_message) override {
if (netlink_message->nlmsg_type != RTM_NEWRULE) {
QUIC_LOG(WARNING) << absl::StrCat(
"Unexpected nlmsg_type: ", netlink_message->nlmsg_type,
" expected: ", RTM_NEWRULE);
return;
}
auto* rule = reinterpret_cast<rtmsg*>(NLMSG_DATA(netlink_message));
int payload_length = RTM_PAYLOAD(netlink_message);
if (rule->rtm_family != AF_INET6) {
QUIC_LOG(ERROR) << absl::StrCat("Unexpected family: ", rule->rtm_family);
return;
}
Netlink::IpRule ip_rule;
ip_rule.table = rule->rtm_table;
struct rtattr* rta;
for (rta = RTM_RTA(rule); RTA_OK(rta, payload_length);
rta = RTA_NEXT(rta, payload_length)) {
switch (rta->rta_type) {
case RTA_TABLE: {
ip_rule.table = *reinterpret_cast<uint32_t*>(RTA_DATA(rta));
break;
}
case RTA_SRC: {
QuicIpAddress src_addr;
src_addr.FromPackedString(reinterpret_cast<char*>(RTA_DATA(rta)),
RTA_PAYLOAD(rta));
IpRange src_range(src_addr, rule->rtm_src_len);
ip_rule.source_range = src_range;
break;
}
default: {
QUIC_VLOG(2) << absl::StrCat("Uninteresting attribute: ",
rta->rta_type);
}
}
}
ip_rules_->emplace_back(ip_rule);
}
private:
std::vector<Netlink::IpRule>* ip_rules_;
};
}
bool Netlink::GetRuleInfo(std::vector<Netlink::IpRule>* ip_rules) {
rtmsg rule_message{};
rule_message.rtm_family = AF_INET6;
auto message = RuleMessage::New(RtnetlinkMessage::Operation::GET,
NLM_F_REQUEST | NLM_F_DUMP, seq_, getpid(),
&rule_message);
if (!Send(message.BuildIoVec().get(), message.IoVecSize())) {
QUIC_LOG(ERROR) << "send failed";
return false;
}
IpRuleParser parser(ip_rules);
if (!Recv(seq_++, &parser)) {
QUIC_LOG(ERROR) << "receive failed.";
return false;
}
return true;
}
bool Netlink::ChangeRule(Verb verb, uint32_t table, IpRange source_range) {
RtnetlinkMessage::Operation operation;
uint16_t flags = NLM_F_REQUEST | NLM_F_ACK;
rtmsg rule_message{};
rule_message.rtm_family = AF_INET6;
rule_message.rtm_protocol = RTPROT_STATIC;
rule_message.rtm_scope = RT_SCOPE_UNIVERSE;
rule_message.rtm_table = RT_TABLE_UNSPEC;
rule_message.rtm_flags |= FIB_RULE_FIND_SADDR;
switch (verb) {
case Verb::kAdd:
if (!source_range.IsInitialized()) {
QUIC_LOG(ERROR) << "Source range must be initialized.";
return false;
}
operation = RtnetlinkMessage::Operation::NEW;
flags |= NLM_F_EXCL | NLM_F_CREATE;
rule_message.rtm_type = FRA_DST;
rule_message.rtm_src_len = source_range.prefix_length();
break;
case Verb::kRemove:
operation = RtnetlinkMessage::Operation::DEL;
break;
case Verb::kReplace:
QUIC_LOG(ERROR) << "Unsupported verb: kReplace";
return false;
}
auto message =
RuleMessage::New(operation, flags, seq_, getpid(), &rule_message);
message.AppendAttribute(RTA_TABLE, &table, sizeof(table));
if (source_range.IsInitialized()) {
std::string packed_src = source_range.prefix().ToPackedString();
message.AppendAttribute(RTA_SRC,
reinterpret_cast<const void*>(packed_src.c_str()),
packed_src.size());
}
if (!Send(message.BuildIoVec().get(), message.IoVecSize())) {
QUIC_LOG(ERROR) << "send failed";
return false;
}
UnknownParser parser;
if (!Recv(seq_++, &parser)) {
QUIC_LOG(ERROR) << "receive failed.";
return false;
}
return true;
}
bool Netlink::Send(struct iovec* iov, size_t iovlen) {
if (!OpenSocket()) {
QUIC_LOG(ERROR) << "can't open socket";
return false;
}
sockaddr_nl netlink_address;
memset(&netlink_address, 0, sizeof(netlink_address));
netlink_address.nl_family = AF_NETLINK;
netlink_address.nl_pid = 0;
netlink_address.nl_groups = 0;
struct msghdr msg = {
&netlink_address, sizeof(netlink_address), iov, iovlen, nullptr, 0, 0};
if (kernel_->sendmsg(socket_fd_, &msg, 0) < 0) {
QUIC_LOG(ERROR) << "sendmsg failed";
CloseSocket();
return false;
}
return true;
}
bool Netlink::Recv(uint32_t seq, NetlinkParserInterface* parser) {
sockaddr_nl netlink_address;
for (;;) {
socklen_t address_length = sizeof(netlink_address);
int next_packet_size = kernel_->recvfrom(
socket_fd_, recvbuf_.get(), 0, MSG_PEEK | MSG_TRUNC,
reinterpret_cast<struct sockaddr*>(&netlink_address), &address_length);
if (next_packet_size < 0) {
QUIC_LOG(ERROR)
<< "error recvfrom with MSG_PEEK | MSG_TRUNC to get packet length.";
CloseSocket();
return false;
}
QUIC_VLOG(3) << "netlink packet size: " << next_packet_size;
if (next_packet_size > recvbuf_length_) {
QUIC_VLOG(2) << "resizing recvbuf to " << next_packet_size;
ResetRecvBuf(next_packet_size);
}
memset(recvbuf_.get(), 0, recvbuf_length_);
int len = kernel_->recvfrom(
socket_fd_, recvbuf_.get(), recvbuf_length_, 0,
reinterpret_cast<struct sockaddr*>(&netlink_address), &address_length);
QUIC_VLOG(3) << "recvfrom returned: " << len;
if (len < 0) {
QUIC_LOG(INFO) << "can't receive netlink packet";
CloseSocket();
return false;
}
struct nlmsghdr* netlink_message;
for (netlink_message = reinterpret_cast<struct nlmsghdr*>(recvbuf_.get());
NLMSG_OK(netlink_message, len);
netlink_message = NLMSG_NEXT(netlink_message, len)) {
QUIC_VLOG(3) << "netlink_message->nlmsg_type = "
<< netlink_message->nlmsg_type;
if (netlink_message->nlmsg_seq != seq) {
QUIC_LOG(INFO) << "netlink_message not meant for us."
<< " seq: " << seq
<< " nlmsg_seq: " << netlink_message->nlmsg_seq;
continue;
}
if (netlink_message->nlmsg_type == NLMSG_DONE) {
return true;
}
if (netlink_message->nlmsg_type == NLMSG_ERROR) {
struct nlmsgerr* err =
reinterpret_cast<struct nlmsgerr*>(NLMSG_DATA(netlink_message));
if (netlink_message->nlmsg_len <
NLMSG_LENGTH(sizeof(struct nlmsgerr))) {
QUIC_LOG(INFO) << "netlink_message ERROR truncated";
} else {
if (err->error == 0) {
QUIC_VLOG(3) << "Netlink sent an ACK";
return true;
}
QUIC_LOG(INFO) << "netlink_message ERROR: " << err->error;
}
return false;
}
parser->Run(netlink_message);
}
}
}
} | #include "quiche/quic/qbone/platform/netlink.h"
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/node_hash_set.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/qbone/platform/mock_kernel.h"
#include "quiche/quic/qbone/qbone_constants.h"
namespace quic::test {
namespace {
using ::testing::_;
using ::testing::Contains;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::Unused;
const int kSocketFd = 101;
class NetlinkTest : public QuicTest {
protected:
NetlinkTest() {
ON_CALL(mock_kernel_, socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE))
.WillByDefault(Invoke([this](Unused, Unused, Unused) {
EXPECT_CALL(mock_kernel_, close(kSocketFd)).WillOnce(Return(0));
return kSocketFd;
}));
}
void ExpectNetlinkPacket(
uint16_t type, uint16_t flags,
const std::function<ssize_t(void* buf, size_t len, int seq)>&
recv_callback,
const std::function<void(const void* buf, size_t len)>& send_callback =
nullptr) {
static int seq = -1;
InSequence s;
EXPECT_CALL(mock_kernel_, sendmsg(kSocketFd, _, _))
.WillOnce(Invoke([type, flags, send_callback](
Unused, const struct msghdr* msg, int) {
EXPECT_EQ(sizeof(struct sockaddr_nl), msg->msg_namelen);
auto* nl_addr =
reinterpret_cast<const struct sockaddr_nl*>(msg->msg_name);
EXPECT_EQ(AF_NETLINK, nl_addr->nl_family);
EXPECT_EQ(0, nl_addr->nl_pid);
EXPECT_EQ(0, nl_addr->nl_groups);
EXPECT_GE(msg->msg_iovlen, 1);
EXPECT_GE(msg->msg_iov[0].iov_len, sizeof(struct nlmsghdr));
std::string buf;
for (int i = 0; i < msg->msg_iovlen; i++) {
buf.append(
std::string(reinterpret_cast<char*>(msg->msg_iov[i].iov_base),
msg->msg_iov[i].iov_len));
}
auto* netlink_message =
reinterpret_cast<const struct nlmsghdr*>(buf.c_str());
EXPECT_EQ(type, netlink_message->nlmsg_type);
EXPECT_EQ(flags, netlink_message->nlmsg_flags);
EXPECT_GE(buf.size(), netlink_message->nlmsg_len);
if (send_callback != nullptr) {
send_callback(buf.c_str(), buf.size());
}
QUICHE_CHECK_EQ(seq, -1);
seq = netlink_message->nlmsg_seq;
return buf.size();
}));
EXPECT_CALL(mock_kernel_,
recvfrom(kSocketFd, _, 0, MSG_PEEK | MSG_TRUNC, _, _))
.WillOnce(Invoke([this, recv_callback](Unused, Unused, Unused, Unused,
struct sockaddr* src_addr,
socklen_t* addrlen) {
auto* nl_addr = reinterpret_cast<struct sockaddr_nl*>(src_addr);
nl_addr->nl_family = AF_NETLINK;
nl_addr->nl_pid = 0;
nl_addr->nl_groups = 0;
int ret = recv_callback(reply_packet_, sizeof(reply_packet_), seq);
QUICHE_CHECK_LE(ret, sizeof(reply_packet_));
return ret;
}));
EXPECT_CALL(mock_kernel_, recvfrom(kSocketFd, _, _, _, _, _))
.WillOnce(Invoke([recv_callback](Unused, void* buf, size_t len, Unused,
struct sockaddr* src_addr,
socklen_t* addrlen) {
auto* nl_addr = reinterpret_cast<struct sockaddr_nl*>(src_addr);
nl_addr->nl_family = AF_NETLINK;
nl_addr->nl_pid = 0;
nl_addr->nl_groups = 0;
int ret = recv_callback(buf, len, seq);
EXPECT_GE(len, ret);
seq = -1;
return ret;
}));
}
char reply_packet_[4096];
MockKernel mock_kernel_;
};
void AddRTA(struct nlmsghdr* netlink_message, uint16_t type, const void* data,
size_t len) {
auto* next_header_ptr = reinterpret_cast<char*>(netlink_message) +
NLMSG_ALIGN(netlink_message->nlmsg_len);
auto* rta = reinterpret_cast<struct rtattr*>(next_header_ptr);
rta->rta_type = type;
rta->rta_len = RTA_LENGTH(len);
memcpy(RTA_DATA(rta), data, len);
netlink_message->nlmsg_len =
NLMSG_ALIGN(netlink_message->nlmsg_len) + RTA_LENGTH(len);
}
void CreateIfinfomsg(struct nlmsghdr* netlink_message,
const std::string& interface_name, uint16_t type,
int index, unsigned int flags, unsigned int change,
uint8_t address[], int address_len, uint8_t broadcast[],
int broadcast_len) {
auto* interface_info =
reinterpret_cast<struct ifinfomsg*>(NLMSG_DATA(netlink_message));
interface_info->ifi_family = AF_UNSPEC;
interface_info->ifi_type = type;
interface_info->ifi_index = index;
interface_info->ifi_flags = flags;
interface_info->ifi_change = change;
netlink_message->nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
AddRTA(netlink_message, IFLA_ADDRESS, address, address_len);
AddRTA(netlink_message, IFLA_BROADCAST, broadcast, broadcast_len);
AddRTA(netlink_message, IFLA_IFNAME, interface_name.c_str(),
interface_name.size());
}
struct nlmsghdr* CreateNetlinkMessage(void* buf,
struct nlmsghdr* previous_netlink_message,
uint16_t type, int seq) {
auto* next_header_ptr = reinterpret_cast<char*>(buf);
if (previous_netlink_message != nullptr) {
next_header_ptr = reinterpret_cast<char*>(previous_netlink_message) +
NLMSG_ALIGN(previous_netlink_message->nlmsg_len);
}
auto* netlink_message = reinterpret_cast<nlmsghdr*>(next_header_ptr);
netlink_message->nlmsg_len = NLMSG_LENGTH(0);
netlink_message->nlmsg_type = type;
netlink_message->nlmsg_flags = NLM_F_MULTI;
netlink_message->nlmsg_pid = 0;
netlink_message->nlmsg_seq = seq;
return netlink_message;
}
void CreateIfaddrmsg(struct nlmsghdr* nlm, int interface_index,
unsigned char prefixlen, unsigned char flags,
unsigned char scope, QuicIpAddress ip) {
QUICHE_CHECK(ip.IsInitialized());
unsigned char family;
switch (ip.address_family()) {
case IpAddressFamily::IP_V4:
family = AF_INET;
break;
case IpAddressFamily::IP_V6:
family = AF_INET6;
break;
default:
QUIC_BUG(quic_bug_11034_1)
<< absl::StrCat("unexpected address family: ", ip.address_family());
family = AF_UNSPEC;
}
auto* msg = reinterpret_cast<struct ifaddrmsg*>(NLMSG_DATA(nlm));
msg->ifa_family = family;
msg->ifa_prefixlen = prefixlen;
msg->ifa_flags = flags;
msg->ifa_scope = scope;
msg->ifa_index = interface_index;
nlm->nlmsg_len = NLMSG_LENGTH(sizeof(struct ifaddrmsg));
AddRTA(nlm, IFA_LOCAL, ip.ToPackedString().c_str(),
ip.ToPackedString().size());
}
void CreateRtmsg(struct nlmsghdr* nlm, unsigned char family,
unsigned char destination_length, unsigned char source_length,
unsigned char tos, unsigned char table, unsigned char protocol,
unsigned char scope, unsigned char type, unsigned int flags,
QuicIpAddress destination, int interface_index) {
auto* msg = reinterpret_cast<struct rtmsg*>(NLMSG_DATA(nlm));
msg->rtm_family = family;
msg->rtm_dst_len = destination_length;
msg->rtm_src_len = source_length;
msg->rtm_tos = tos;
msg->rtm_table = table;
msg->rtm_protocol = protocol;
msg->rtm_scope = scope;
msg->rtm_type = type;
msg->rtm_flags = flags;
nlm->nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg));
AddRTA(nlm, RTA_DST, destination.ToPackedString().c_str(),
destination.ToPackedString().size());
AddRTA(nlm, RTA_OIF, &interface_index, sizeof(interface_index));
}
TEST_F(NetlinkTest, GetLinkInfoWorks) {
auto netlink = std::make_unique<Netlink>(&mock_kernel_);
uint8_t hwaddr[] = {'a', 'b', 'c', 'd', 'e', 'f'};
uint8_t bcaddr[] = {'c', 'b', 'a', 'f', 'e', 'd'};
ExpectNetlinkPacket(
RTM_GETLINK, NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST,
[&hwaddr, &bcaddr](void* buf, size_t len, int seq) {
int ret = 0;
struct nlmsghdr* netlink_message =
CreateNetlinkMessage(buf, nullptr, RTM_NEWLINK, seq);
CreateIfinfomsg(netlink_message, "tun0", 1,
7,
0,
0xFFFFFFFF, hwaddr, 6, bcaddr, 6);
ret += NLMSG_ALIGN(netlink_message->nlmsg_len);
netlink_message =
CreateNetlinkMessage(buf, netlink_message, NLMSG_DONE, seq);
ret += NLMSG_ALIGN(netlink_message->nlmsg_len);
return ret;
});
Netlink::LinkInfo link_info;
EXPECT_TRUE(netlink->GetLinkInfo("tun0", &link_info));
EXPECT_EQ(7, link_info.index);
EXPECT_EQ(1, link_info.type);
for (int i = 0; i < link_info.hardware_address_length; ++i) {
EXPECT_EQ(hwaddr[i], link_info.hardware_address[i]);
}
for (int i = 0; i < link_info.broadcast_address_length; ++i) {
EXPECT_EQ(bcaddr[i], link_info.broadcast_address[i]);
}
}
TEST_F(NetlinkTest, GetAddressesWorks) {
auto netlink = std::make_unique<Netlink>(&mock_kernel_);
absl::node_hash_set<std::string> addresses = {
QuicIpAddress::Any4().ToString(), QuicIpAddress::Any6().ToString()};
ExpectNetlinkPacket(
RTM_GETADDR, NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST,
[&addresses](void* buf, size_t len, int seq) {
int ret = 0;
struct nlmsghdr* nlm = nullptr;
for (const auto& address : addresses) {
QuicIpAddress ip;
ip.FromString(address);
nlm = CreateNetlinkMessage(buf, nlm, RTM_NEWADDR, seq);
CreateIfaddrmsg(nlm, 7, 24,
0, RT_SCOPE_UNIVERSE, ip);
ret += NLMSG_ALIGN(nlm->nlmsg_len);
}
{
QuicIpAddress ip;
ip.FromString("10.0.0.1");
nlm = CreateNetlinkMessage(buf, nlm, RTM_NEWADDR, seq);
CreateIfaddrmsg(nlm, 7, 16,
IFA_F_OPTIMISTIC,
RT_SCOPE_UNIVERSE, ip);
ret += NLMSG_ALIGN(nlm->nlmsg_len);
ip.FromString("10.0.0.2");
nlm = CreateNetlinkMessage(buf, nlm, RTM_NEWADDR, seq);
CreateIfaddrmsg(nlm, 7, 16,
IFA_F_TENTATIVE,
RT_SCOPE_UNIVERSE, ip);
ret += NLMSG_ALIGN(nlm->nlmsg_len);
}
nlm = CreateNetlinkMessage(buf, nlm, NLMSG_DONE, seq);
ret += NLMSG_ALIGN(nlm->nlmsg_len);
return ret;
});
std::vector<Netlink::AddressInfo> reported_addresses;
int num_ipv6_nodad_dadfailed_addresses = 0;
EXPECT_TRUE(netlink->GetAddresses(7, IFA_F_TENTATIVE | IFA_F_OPTIMISTIC,
&reported_addresses,
&num_ipv6_nodad_dadfailed_addresses));
for (const auto& reported_address : reported_addresses) {
EXPECT_TRUE(reported_address.local_address.IsInitialized());
EXPECT_FALSE(reported_address.interface_address.IsInitialized());
EXPECT_THAT(addresses, Contains(reported_address.local_address.ToString()));
addresses.erase(reported_address.local_address.ToString());
EXPECT_EQ(24, reported_address.prefix_length);
}
EXPECT_TRUE(addresses.empty());
}
TEST_F(NetlinkTest, ChangeLocalAddressAdd) {
auto netlink = std::make_unique<Netlink>(&mock_kernel_);
QuicIpAddress ip = QuicIpAddress::Any6();
ExpectNetlinkPacket(
RTM_NEWADDR, NLM_F_ACK | NLM_F_REQUEST,
[](void* buf, size_t len, int seq) {
struct nlmsghdr* netlink_message =
CreateNetlinkMessage(buf, nullptr, NLMSG_ERROR, seq);
auto* err =
reinterpret_cast<struct nlmsgerr*>(NLMSG_DATA(netlink_message));
err->error = 0;
netlink_message->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
return netlink_message->nlmsg_len;
},
[ip](const void* buf, size_t len) {
auto* netlink_message = reinterpret_cast<const struct nlmsghdr*>(buf);
auto* ifa = reinterpret_cast<const struct ifaddrmsg*>(
NLMSG_DATA(netlink_message));
EXPECT_EQ(19, ifa->ifa_prefixlen);
EXPECT_EQ(RT_SCOPE_UNIVERSE, ifa->ifa_scope);
EXPECT_EQ(IFA_F_PERMANENT, ifa->ifa_flags);
EXPECT_EQ(7, ifa->ifa_index);
EXPECT_EQ(AF_INET6, ifa->ifa_family);
const struct rtattr* rta;
int payload_length = IFA_PAYLOAD(netlink_message);
int num_rta = 0;
for (rta = IFA_RTA(ifa); RTA_OK(rta, payload_length);
rta = RTA_NEXT(rta, payload_length)) {
switch (rta->rta_type) {
case IFA_LOCAL: {
EXPECT_EQ(ip.ToPackedString().size(), RTA_PAYLOAD(rta));
const auto* raw_address =
reinterpret_cast<const char*>(RTA_DATA(rta));
ASSERT_EQ(sizeof(in6_addr), RTA_PAYLOAD(rta));
QuicIpAddress address;
address.FromPackedString(raw_address, RTA_PAYLOAD(rta));
EXPECT_EQ(ip, address);
break;
}
case IFA_CACHEINFO: {
EXPECT_EQ(sizeof(struct ifa_cacheinfo), RTA_PAYLOAD(rta));
const auto* cache_info =
reinterpret_cast<const struct ifa_cacheinfo*>(RTA_DATA(rta));
EXPECT_EQ(8, cache_info->ifa_prefered);
EXPECT_EQ(6, cache_info->ifa_valid);
EXPECT_EQ(4, cache_info->cstamp);
EXPECT_EQ(2, cache_info->tstamp);
break;
}
default:
EXPECT_TRUE(false) << "Seeing rtattr that should not exist";
}
++num_rta;
}
EXPECT_EQ(2, num_rta);
});
struct {
struct rtattr rta;
struct ifa_cacheinfo cache_info;
} additional_rta;
additional_rta.rta.rta_type = IFA_CACHEINFO;
additional_rta.rta.rta_len = RTA_LENGTH(sizeof(struct ifa_cacheinfo));
additional_rta.cache_info.ifa_prefered = 8;
additional_rta.cache_info.ifa_valid = 6;
additional_rta.cache_info.cstamp = 4;
additional_rta.cache_info.tstamp = 2;
EXPECT_TRUE(netlink->ChangeLocalAddress(7, Netlink::Verb::kAdd, ip, 19,
IFA_F_PERMANENT, RT_SCOPE_UNIVERSE,
{&additional_rta.rta}));
}
TEST_F(NetlinkTest, ChangeLocalAddressRemove) {
auto netlink = std::make_unique<Netlink>(&mock_kernel_);
QuicIpAddress ip = QuicIpAddress::Any4();
ExpectNetlinkPacket(
RTM_DELADDR, NLM_F_ACK | NLM_F_REQUEST,
[](void* buf, size_t len, int seq) {
struct nlmsghdr* netlink_message =
CreateNetlinkMessage(buf, nullptr, NLMSG_ERROR, seq);
auto* err =
reinterpret_cast<struct nlmsgerr*>(NLMSG_DATA(netlink_message));
err->error = 0;
netlink_message->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
return netlink_message->nlmsg_len;
},
[ip](const void* buf, size_t len) {
auto* netlink_message = reinterpret_cast<const struct nlmsghdr*>(buf);
auto* ifa = reinterpret_cast<const struct ifaddrmsg*>(
NLMSG_DATA(netlink_message));
EXPECT_EQ(32, ifa->ifa_prefixlen);
EXPECT_EQ(RT_SCOPE_UNIVERSE, ifa->ifa_scope);
EXPECT_EQ(0, ifa->ifa_flags);
EXPECT_EQ(7, ifa->ifa_index);
EXPECT_EQ(AF_INET, ifa->ifa_family);
const struct rtattr* rta;
int payload_length = IFA_PAYLOAD(netlink_message);
int num_rta = 0;
for (rta = IFA_RTA(ifa); RTA_OK(rta, payload_length);
rta = RTA_NEXT(rta, payload_length)) {
switch (rta->rta_type) {
case IFA_LOCAL: {
const auto* raw_address =
reinterpret_cast<const char*>(RTA_DATA(rta));
ASSERT_EQ(sizeof(in_addr), RTA_PAYLOAD(rta));
QuicIpAddress address;
address.FromPackedString(raw_address, RTA_PAYLOAD(rta));
EXPECT_EQ(ip, address);
break;
}
default:
EXPECT_TRUE(false) << "Seeing rtattr that should not exist";
}
++num_rta;
}
EXPECT_EQ(1, num_rta);
});
EXPECT_TRUE(netlink->ChangeLocalAddress(7, Netlink::Verb::kRemove, ip, 32, 0,
RT_SCOPE_UNIVERSE, {}));
}
TEST_F(NetlinkTest, GetRouteInfoWorks) {
auto netlink = std::make_unique<Netlink>(&mock_kernel_);
QuicIpAddress destination;
ASSERT_TRUE(destination.FromString("f800::2"));
ExpectNetlinkPacket(RTM_GETROUTE, NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST,
[destination](void* buf, size_t len, int seq) {
int ret = 0;
struct nlmsghdr* netlink_message = CreateNetlinkMessage(
buf, nullptr, RTM_NEWROUTE, seq);
CreateRtmsg(netlink_message, AF_INET6, 48, 0, 0,
RT_TABLE_MAIN, RTPROT_STATIC, RT_SCOPE_LINK,
RTN_UNICAST, 0, destination, 7);
ret += NLMSG_ALIGN(netlink_message->nlmsg_len);
netlink_message = CreateNetlinkMessage(
buf, netlink_message, NLMSG_DONE, seq);
ret += NLMSG_ALIGN(netlink_message->nlmsg_len);
QUIC_LOG(INFO) << "ret: " << ret;
return ret;
});
std::vector<Netlink::RoutingRule> routing_rules;
EXPECT_TRUE(netlink->GetRouteInfo(&routing_rules));
ASSERT_EQ(1, routing_rules.size());
EXPECT_EQ(RT_SCOPE_LINK, routing_rules[0].scope);
EXPECT_EQ(IpRange(destination, 48).ToString(),
routing_rules[0].destination_subnet.ToString());
EXPECT_FALSE(routing_rules[0].preferred_source.IsInitialized());
EXPECT_EQ(7, routing_rules[0].out_interface);
}
TEST_F(NetlinkTest, ChangeRouteAdd) {
auto netlink = std::make_unique<Netlink>(&mock_kernel_);
QuicIpAddress preferred_ip;
preferred_ip.FromString("ff80:dead:beef::1");
IpRange subnet;
subnet.FromString("ff80:dead:beef::/48");
int egress_interface_index = 7;
ExpectNetlinkPacket(
RTM_NEWROUTE, NLM_F_ACK | NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL,
[](void* buf, size_t len, int seq) {
struct nlmsghdr* netlink_message =
CreateNetlinkMessage(buf, nullptr, NLMSG_ERROR, seq);
auto* err =
reinterpret_cast<struct nlmsgerr*>(NLMSG_DATA(netlink_message));
err->error = 0;
netlink_message->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
return netlink_message->nlmsg_len;
},
[preferred_ip, subnet, egress_interface_index](const void* buf,
size_t len) {
auto* netlink_message = reinterpret_cast<const struct nlmsghdr*>(buf);
auto* rtm =
reinterpret_cast<const struct rtmsg*>(NLMSG_DATA(netlink_message));
EXPECT_EQ(AF_INET6, rtm->rtm_family);
EXPECT_EQ(48, rtm->rtm_dst_len);
EXPECT_EQ(0, rtm->rtm_src_len);
EXPECT_EQ(RT_TABLE_MAIN, rtm->rtm_table);
EXPECT_EQ(RTPROT_STATIC, rtm->rtm_protocol);
EXPECT_EQ(RT_SCOPE_LINK, rtm->rtm_scope);
EXPECT_EQ(RTN_UNICAST, rtm->rtm_type);
const struct rtattr* rta;
int payload_length = RTM_PAYLOAD(netlink_message);
int num_rta = 0;
for (rta = RTM_RTA(rtm); RTA_OK(rta, payload_length);
rta = RTA_NEXT(rta, payload_length)) {
switch (rta->rta_type) {
case RTA_PREFSRC: {
const auto* raw_address =
reinterpret_cast<const char*>(RTA_DATA(rta));
ASSERT_EQ(sizeof(struct in6_addr), RTA_PAYLOAD(rta));
QuicIpAddress address;
address.FromPackedString(raw_address, RTA_PAYLOAD(rta));
EXPECT_EQ(preferred_ip, address);
break;
}
case RTA_GATEWAY: {
const auto* raw_address =
reinterpret_cast<const char*>(RTA_DATA(rta));
ASSERT_EQ(sizeof(struct in6_addr), RTA_PAYLOAD(rta));
QuicIpAddress address;
address.FromPackedString(raw_address, RTA_PAYLOAD(rta));
EXPECT_EQ(*QboneConstants::GatewayAddress(), address);
break;
}
case RTA_OIF: {
ASSERT_EQ(sizeof(int), RTA_PAYLOAD(rta));
const auto* interface_index =
reinterpret_cast<const int*>(RTA_DATA(rta));
EXPECT_EQ(egress_interface_index, *interface_index);
break;
}
case RTA_DST: {
const auto* raw_address =
reinterpret_cast<const char*>(RTA_DATA(rta));
ASSERT_EQ(sizeof(struct in6_addr), RTA_PAYLOAD(rta));
QuicIpAddress address;
address.FromPackedString(raw_address, RTA_PAYLOAD(rta));
EXPECT_EQ(subnet.ToString(),
IpRange(address, rtm->rtm_dst_len).ToString());
break;
}
case RTA_TABLE: {
ASSERT_EQ(*reinterpret_cast<uint32_t*>(RTA_DATA(rta)),
QboneConstants::kQboneRouteTableId);
break;
}
default:
EXPECT_TRUE(false) << "Seeing rtattr that should not be sent";
}
++num_rta;
}
EXPECT_EQ(5, num_rta);
});
EXPECT_TRUE(netlink->ChangeRoute(
Netlink::Verb::kAdd, QboneConstants::kQboneRouteTableId, subnet,
RT_SCOPE_LINK, preferred_ip, egress_interface_index));
}
TEST_F(NetlinkTest, ChangeRouteRemove) {
auto netlink = std::make_unique<Netlink>(&mock_kernel_);
QuicIpAddress preferred_ip;
preferred_ip.FromString("ff80:dead:beef::1");
IpRange subnet;
subnet.FromString("ff80:dead:beef::/48");
int egress_interface_index = 7;
ExpectNetlinkPacket(
RTM_DELROUTE, NLM_F_ACK | NLM_F_REQUEST,
[](void* buf, size_t len, int seq) {
struct nlmsghdr* netlink_message =
CreateNetlinkMessage(buf, nullptr, NLMSG_ERROR, seq);
auto* err =
reinterpret_cast<struct nlmsgerr*>(NLMSG_DATA(netlink_message));
err->error = 0;
netlink_message->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
return netlink_message->nlmsg_len;
},
[preferred_ip, subnet, egress_interface_index](const void* buf,
size_t len) {
auto* netlink_message = reinterpret_cast<const struct nlmsghdr*>(buf);
auto* rtm =
reinterpret_cast<const struct rtmsg*>(NLMSG_DATA(netlink_message));
EXPECT_EQ(AF_INET6, rtm->rtm_family);
EXPECT_EQ(48, rtm->rtm_dst_len);
EXPECT_EQ(0, rtm->rtm_src_len);
EXPECT_EQ(RT_TABLE_MAIN, rtm->rtm_table);
EXPECT_EQ(RTPROT_UNSPEC, rtm->rtm_protocol);
EXPECT_EQ(RT_SCOPE_LINK, rtm->rtm_scope);
EXPECT_EQ(RTN_UNICAST, rtm->rtm_type);
const struct rtattr* rta;
int payload_length = RTM_PAYLOAD(netlink_message);
int num_rta = 0;
for (rta = RTM_RTA(rtm); RTA_OK(rta, payload_length);
rta = RTA_NEXT(rta, payload_length)) {
switch (rta->rta_type) {
case RTA_PREFSRC: {
const auto* raw_address =
reinterpret_cast<const char*>(RTA_DATA(rta));
ASSERT_EQ(sizeof(struct in6_addr), RTA_PAYLOAD(rta));
QuicIpAddress address;
address.FromPackedString(raw_address, RTA_PAYLOAD(rta));
EXPECT_EQ(preferred_ip, address);
break;
}
case RTA_OIF: {
ASSERT_EQ(sizeof(int), RTA_PAYLOAD(rta));
const auto* interface_index =
reinterpret_cast<const int*>(RTA_DATA(rta));
EXPECT_EQ(egress_interface_index, *interface_index);
break;
}
case RTA_DST: {
const auto* raw_address =
reinterpret_cast<const char*>(RTA_DATA(rta));
ASSERT_EQ(sizeof(struct in6_addr), RTA_PAYLOAD(rta));
QuicIpAddress address;
address.FromPackedString(raw_address, RTA_PAYLOAD(rta));
EXPECT_EQ(subnet.ToString(),
IpRange(address, rtm->rtm_dst_len).ToString());
break;
}
case RTA_TABLE: {
ASSERT_EQ(*reinterpret_cast<uint32_t*>(RTA_DATA(rta)),
QboneConstants::kQboneRouteTableId);
break;
}
default:
EXPECT_TRUE(false) << "Seeing rtattr that should not be sent";
}
++num_rta;
}
EXPECT_EQ(4, num_rta);
});
EXPECT_TRUE(netlink->ChangeRoute(
Netlink::Verb::kRemove, QboneConstants::kQboneRouteTableId, subnet,
RT_SCOPE_LINK, preferred_ip, egress_interface_index));
}
TEST_F(NetlinkTest, ChangeRouteReplace) {
auto netlink = std::make_unique<Netlink>(&mock_kernel_);
QuicIpAddress preferred_ip;
preferred_ip.FromString("ff80:dead:beef::1");
IpRange subnet;
subnet.FromString("ff80:dead:beef::/48");
int egress_interface_index = 7;
ExpectNetlinkPacket(
RTM_NEWROUTE, NLM_F_ACK | NLM_F_REQUEST | NLM_F_CREATE | NLM_F_REPLACE,
[](void* buf, size_t len, int seq) {
struct nlmsghdr* netlink_message =
CreateNetlinkMessage(buf, nullptr, NLMSG_ERROR, seq);
auto* err =
reinterpret_cast<struct nlmsgerr*>(NLMSG_DATA(netlink_message));
err->error = 0;
netlink_message->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
return netlink_message->nlmsg_len;
},
[preferred_ip, subnet, egress_interface_index](const void* buf,
size_t len) {
auto* netlink_message = reinterpret_cast<const struct nlmsghdr*>(buf);
auto* rtm =
reinterpret_cast<const struct rtmsg*>(NLMSG_DATA(netlink_message));
EXPECT_EQ(AF_INET6, rtm->rtm_family);
EXPECT_EQ(48, rtm->rtm_dst_len);
EXPECT_EQ(0, rtm->rtm_src_len);
EXPECT_EQ(RT_TABLE_MAIN, rtm->rtm_table);
EXPECT_EQ(RTPROT_STATIC, rtm->rtm_protocol);
EXPECT_EQ(RT_SCOPE_LINK, rtm->rtm_scope);
EXPECT_EQ(RTN_UNICAST, rtm->rtm_type);
const struct rtattr* rta;
int payload_length = RTM_PAYLOAD(netlink_message);
int num_rta = 0;
for (rta = RTM_RTA(rtm); RTA_OK(rta, payload_length);
rta = RTA_NEXT(rta, payload_length)) {
switch (rta->rta_type) {
case RTA_PREFSRC: {
const auto* raw_address =
reinterpret_cast<const char*>(RTA_DATA(rta));
ASSERT_EQ(sizeof(struct in6_addr), RTA_PAYLOAD(rta));
QuicIpAddress address;
address.FromPackedString(raw_address, RTA_PAYLOAD(rta));
EXPECT_EQ(preferred_ip, address);
break;
}
case RTA_GATEWAY: {
const auto* raw_address =
reinterpret_cast<const char*>(RTA_DATA(rta));
ASSERT_EQ(sizeof(struct in6_addr), RTA_PAYLOAD(rta));
QuicIpAddress address;
address.FromPackedString(raw_address, RTA_PAYLOAD(rta));
EXPECT_EQ(*QboneConstants::GatewayAddress(), address);
break;
}
case RTA_OIF: {
ASSERT_EQ(sizeof(int), RTA_PAYLOAD(rta));
const auto* interface_index =
reinterpret_cast<const int*>(RTA_DATA(rta));
EXPECT_EQ(egress_interface_index, *interface_index);
break;
}
case RTA_DST: {
const auto* raw_address =
reinterpret_cast<const char*>(RTA_DATA(rta));
ASSERT_EQ(sizeof(struct in6_addr), RTA_PAYLOAD(rta));
QuicIpAddress address;
address.FromPackedString(raw_address, RTA_PAYLOAD(rta));
EXPECT_EQ(subnet.ToString(),
IpRange(address, rtm->rtm_dst_len).ToString());
break;
}
case RTA_TABLE: {
ASSERT_EQ(*reinterpret_cast<uint32_t*>(RTA_DATA(rta)),
QboneConstants::kQboneRouteTableId);
break;
}
default:
EXPECT_TRUE(false) << "Seeing rtattr that should not be sent";
}
++num_rta;
}
EXPECT_EQ(5, num_rta);
});
EXPECT_TRUE(netlink->ChangeRoute(
Netlink::Verb::kReplace, QboneConstants::kQboneRouteTableId, subnet,
RT_SCOPE_LINK, preferred_ip, egress_interface_index));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/platform/netlink.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/platform/netlink_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
3e76bcbc-e1c4-4513-974e-a2ea6cb94dd3 | cpp | tensorflow/tensorflow | floor_mod | tensorflow/lite/kernels/floor_mod.cc | tensorflow/lite/kernels/floor_mod_test.cc | #include <stddef.h>
#include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/binary_function.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace floor_mod {
namespace {
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpData {
bool requires_broadcast;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
data->requires_broadcast = false;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const TfLiteType type = input1->type;
if (type != kTfLiteInt8 && type != kTfLiteInt16 && type != kTfLiteInt32 &&
type != kTfLiteFloat32 && type != kTfLiteInt64) {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by floor_mod.",
TfLiteTypeGetName(type));
return kTfLiteError;
}
output->type = type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
}
template <typename T>
TfLiteStatus EvalImpl(TfLiteContext* context, bool requires_broadcast,
const TfLiteTensor* input1, const TfLiteTensor* input2,
TfLiteTensor* output) {
const T* denominator_data = GetTensorData<T>(input2);
if (input2->type == kTfLiteInt8 || input2->type == kTfLiteInt16 ||
input2->type == kTfLiteInt32 || input2->type == kTfLiteInt64) {
const int num_elements = NumElements(input2);
for (int i = 0; i < num_elements; ++i) {
if (denominator_data[i] == 0) {
TF_LITE_KERNEL_LOG(context, "Division by 0");
return kTfLiteError;
}
}
}
if (requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<T, T, T>(
GetTensorShape(input1), GetTensorData<T>(input1),
GetTensorShape(input2), denominator_data, GetTensorShape(output),
GetTensorData<T>(output), reference_ops::FloorMod<T>);
} else {
reference_ops::BinaryFunction<T, T, T>(
GetTensorShape(input1), GetTensorData<T>(input1),
GetTensorShape(input2), GetTensorData<T>(input2),
GetTensorShape(output), GetTensorData<T>(output),
reference_ops::FloorMod<T>);
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (input1->type) {
case kTfLiteInt8: {
return EvalImpl<int8_t>(context, data->requires_broadcast, input1, input2,
output);
}
case kTfLiteInt16: {
return EvalImpl<int16_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteInt32: {
return EvalImpl<int32_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteInt64: {
return EvalImpl<int64_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteFloat32: {
return EvalImpl<float>(context, data->requires_broadcast, input1, input2,
output);
}
default: {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by floor_mod.",
TfLiteTypeGetName(input1->type));
return kTfLiteError;
}
}
}
}
}
TfLiteRegistration* Register_FLOOR_MOD() {
static TfLiteRegistration r = {floor_mod::Init, floor_mod::Free,
floor_mod::Prepare, floor_mod::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/floor_mod_test_common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
TEST(FloorModModel, Simple) {
FloorModModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, 9, 11, 3});
model.PopulateTensor<int32_t>(model.input2(), {2, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(FloorModModel, NegativeValue) {
FloorModModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int32_t>(model.input2(), {2, 2, -3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, -2, -1));
}
TEST(FloorModModel, BroadcastFloorMod) {
FloorModModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1}}, {TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int32_t>(model.input2(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-2, 0, -2, -2));
}
TEST(FloorModModel, Int64WithBroadcast) {
FloorModModel<int64_t> model({TensorType_INT64, {1, 2, 2, 1}},
{TensorType_INT64, {1}}, {TensorType_INT64, {}});
model.PopulateTensor<int64_t>(model.input1(), {10, -9, -11, (1LL << 34) + 9});
model.PopulateTensor<int64_t>(model.input2(), {-(1LL << 33)});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(),
ElementsAre(-8589934582, -9, -11, -8589934583));
}
TEST(FloorModModel, FloatSimple) {
FloorModModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10, 9, 11, 3});
model.PopulateTensor<float>(model.input2(), {2, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(FloorModModel, FloatNegativeValue) {
FloorModModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<float>(model.input2(), {2, 2, -3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, -2, -1));
}
TEST(FloorModModel, FloatBroadcastFloorMod) {
FloorModModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<float>(model.input2(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-2, 0, -2, -2));
}
TEST(FloorModModel, SimpleInt16) {
FloorModModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, 9, 11, 3});
model.PopulateTensor<int16_t>(model.input2(), {2, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(FloorModModel, NegativeValueInt16) {
FloorModModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int16_t>(model.input2(), {2, 2, -3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, -2, -1));
}
TEST(FloorModModel, BroadcastFloorModInt16) {
FloorModModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1}}, {TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int16_t>(model.input2(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-2, 0, -2, -2));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/floor_mod.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/floor_mod_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d01b82b0-bed4-4e1d-aee4-3f6f5100cdea | cpp | google/tensorstore | xz_compressor | tensorstore/internal/compression/xz_compressor.cc | tensorstore/internal/compression/xz_compressor_test.cc | #include "tensorstore/internal/compression/xz_compressor.h"
#include <cstddef>
#include <memory>
#include <utility>
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "riegeli/xz/xz_reader.h"
#include "riegeli/xz/xz_writer.h"
namespace tensorstore {
namespace internal {
std::unique_ptr<riegeli::Writer> XzCompressor::GetWriter(
std::unique_ptr<riegeli::Writer> base_writer, size_t element_bytes) const {
using Writer = riegeli::XzWriter<std::unique_ptr<riegeli::Writer>>;
Writer::Options options;
options.set_container(Writer::Container::kXz);
options.set_check(static_cast<Writer::Check>(check));
options.set_compression_level(level);
options.set_extreme(extreme);
return std::make_unique<Writer>(std::move(base_writer), options);
}
std::unique_ptr<riegeli::Reader> XzCompressor::GetReader(
std::unique_ptr<riegeli::Reader> base_reader, size_t element_bytes) const {
using Reader = riegeli::XzReader<std::unique_ptr<riegeli::Reader>>;
Reader::Options options;
options.set_container(Reader::Container::kXzOrLzma);
options.set_concatenate(true);
return std::make_unique<Reader>(std::move(base_reader), options);
}
}
} | #include "tensorstore/internal/compression/xz_compressor.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "absl/strings/cord_test_helpers.h"
#include <lzma.h>
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::XzCompressor;
TEST(XzCompressorTest, SmallRoundtrip) {
XzCompressor compressor;
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result("abc"), decode_result("def");
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result, 0));
ASSERT_GE(encode_result.size(), 3);
EXPECT_EQ("abc", encode_result.Subcord(0, 3));
TENSORSTORE_ASSERT_OK(compressor.Decode(
encode_result.Subcord(3, encode_result.size() - 3), &decode_result, 0));
EXPECT_EQ("def" + std::string(input), decode_result);
}
TEST(XzCompressorTest, SmallRoundtripFragmented) {
XzCompressor compressor;
const absl::Cord input = absl::MakeFragmentedCord(
{"The quick", " brown fox", " jumped over", " ", "the lazy dog."});
absl::Cord encode_result("abc"), decode_result("def");
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result, 0));
ASSERT_GE(encode_result.size(), 3);
EXPECT_EQ("abc", encode_result.Subcord(0, 3));
std::vector<std::string> encode_result_fragments;
for (size_t i = 3; i < encode_result.size(); ++i) {
encode_result_fragments.push_back(std::string(encode_result.Subcord(i, 1)));
}
TENSORSTORE_ASSERT_OK(compressor.Decode(
absl::MakeFragmentedCord(encode_result_fragments), &decode_result, 0));
EXPECT_EQ("def" + std::string(input), decode_result);
}
TEST(XzCompressorTest, LargeRoundtrip) {
std::string input(100000, '\0');
unsigned char x = 0;
for (auto& v : input) {
v = x;
x += 7;
}
XzCompressor compressor;
absl::Cord encode_result, decode_result;
TENSORSTORE_ASSERT_OK(
compressor.Encode(absl::Cord(input), &encode_result, 0));
TENSORSTORE_ASSERT_OK(compressor.Decode(encode_result, &decode_result, 0));
EXPECT_EQ(input, decode_result);
}
TEST(XzCompressorTest, NonDefaultLevel) {
XzCompressor compressor;
XzCompressor compressor2;
compressor2.level = 9;
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result1, encode_result2;
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result1, 0));
TENSORSTORE_ASSERT_OK(compressor2.Encode(input, &encode_result2, 0));
EXPECT_NE(encode_result1, encode_result2);
absl::Cord decode_result;
TENSORSTORE_ASSERT_OK(compressor.Decode(encode_result2, &decode_result, 0));
EXPECT_EQ(input, decode_result);
}
TEST(XzCompressorTest, NonDefaultCheck) {
XzCompressor compressor;
XzCompressor compressor2;
compressor2.check = LZMA_CHECK_CRC32;
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result1, encode_result2;
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result1, 0));
TENSORSTORE_ASSERT_OK(compressor2.Encode(input, &encode_result2, 0));
EXPECT_NE(encode_result1, encode_result2);
absl::Cord decode_result;
TENSORSTORE_ASSERT_OK(compressor.Decode(encode_result2, &decode_result, 0));
EXPECT_EQ(input, decode_result);
}
TEST(XzCompressorTest, DecodeCorruptData) {
XzCompressor compressor;
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
{
absl::Cord encode_result, decode_result;
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result, 0));
ASSERT_GE(encode_result.size(), 1);
std::string corrupted(encode_result);
corrupted[0] = 0;
EXPECT_THAT(compressor.Decode(absl::Cord(corrupted), &decode_result, 0),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
{
absl::Cord encode_result, decode_result;
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result, 0));
ASSERT_GE(encode_result.size(), 1);
EXPECT_THAT(
compressor.Decode(encode_result.Subcord(0, encode_result.size() - 1),
&decode_result, 0),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/xz_compressor.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/xz_compressor_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
160aad35-3fdd-4e79-a85d-a8b2a9e081e4 | cpp | tensorflow/tensorflow | version | third_party/xla/xla/python/ifrt_proxy/server/version.cc | third_party/xla/xla/python/ifrt_proxy/server/version_test.cc | #include "xla/python/ifrt_proxy/server/version.h"
#include <algorithm>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
namespace xla {
namespace ifrt {
namespace proxy {
absl::StatusOr<int> ChooseVersion(int client_min_version,
int client_max_version,
int server_min_version,
int server_max_version) {
const int version = std::min(server_max_version, client_max_version);
if (version < server_min_version || version < client_min_version) {
return absl::InvalidArgumentError(absl::StrCat(
"IFRT Proxy client and server failed to agree on the "
"protocol version; supported versions: client = [",
client_min_version, ", ", client_max_version, "], server = [",
server_min_version, ", ", server_max_version, "]"));
}
return version;
}
}
}
} | #include "xla/python/ifrt_proxy/server/version.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tsl/platform/status_matchers.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
struct Param {
int client_min_version;
int client_max_version;
int server_min_version;
int server_max_version;
};
class CompatibleVersionTest : public ::testing::TestWithParam<Param> {};
TEST_P(CompatibleVersionTest, Verify) {
const Param& param = GetParam();
EXPECT_THAT(ChooseVersion(param.client_min_version, param.client_max_version,
param.server_min_version, param.server_max_version),
IsOk());
}
INSTANTIATE_TEST_SUITE_P(CompatibleVersionTest, CompatibleVersionTest,
::testing::Values(Param{1, 1, 1, 1}, Param{1, 2, 2, 2},
Param{2, 2, 1, 2},
Param{1, 3, 3, 4}));
class IncompatibleVersionTest : public ::testing::TestWithParam<Param> {};
TEST_P(IncompatibleVersionTest, Verify) {
const Param& param = GetParam();
EXPECT_THAT(ChooseVersion(param.client_min_version, param.client_max_version,
param.server_min_version, param.server_max_version),
StatusIs(absl::StatusCode::kInvalidArgument));
}
INSTANTIATE_TEST_SUITE_P(IncompatibleVersionTest, IncompatibleVersionTest,
::testing::Values(Param{1, 2, 3, 3}, Param{1, 3, 4, 6},
Param{1, 1, 2, 2}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/version.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/version_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2afc9150-0c31-4ec4-b238-3aee0943c1dd | cpp | tensorflow/tensorflow | cudnn_simplify_padding | third_party/xla/xla/service/gpu/transforms/cudnn_simplify_padding.cc | third_party/xla/xla/service/gpu/transforms/cudnn_simplify_padding_test.cc | #include "xla/service/gpu/transforms/cudnn_simplify_padding.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
namespace m = ::xla::match;
std::optional<int64_t> FindFalseIndex(absl::Span<const bool> vals) {
std::optional<int64_t> missing_dim;
for (int i = 0; i < vals.size(); i++) {
if (vals[i]) {
continue;
}
if (missing_dim.has_value()) {
VLOG(2) << "Multiple dimensions are missing from conv dnums; can't "
"determine which is vect_c dimension";
return std::nullopt;
}
missing_dim = i;
}
return missing_dim;
}
std::optional<int64_t> FindOutputVectCDim(HloInstruction* conv) {
const ConvolutionDimensionNumbers& dnums =
conv->convolution_dimension_numbers();
int64_t num_dims = conv->shape().tuple_shapes(0).dimensions_size();
absl::InlinedVector<bool, 5> seen_dims(num_dims);
seen_dims[dnums.output_batch_dimension()] = true;
seen_dims[dnums.output_feature_dimension()] = true;
for (int64_t d : dnums.output_spatial_dimensions()) {
seen_dims[d] = true;
}
return FindFalseIndex(seen_dims);
}
std::optional<int64_t> FindKernelVectCDim(HloInstruction* conv) {
const ConvolutionDimensionNumbers& dnums =
conv->convolution_dimension_numbers();
int64_t num_dims = conv->operand(1)->shape().dimensions_size();
absl::InlinedVector<bool, 5> seen_dims(num_dims);
seen_dims[dnums.kernel_input_feature_dimension()] = true;
seen_dims[dnums.kernel_output_feature_dimension()] = true;
for (int64_t d : dnums.kernel_spatial_dimensions()) {
seen_dims[d] = true;
}
return FindFalseIndex(seen_dims);
}
std::optional<int64_t> NumTrailingZeroOutputFeatures(HloInstruction* conv) {
const ConvolutionDimensionNumbers& dnums =
conv->convolution_dimension_numbers();
int64_t feature_dim = dnums.kernel_output_feature_dimension();
const HloInstruction* weights = conv->operand(1);
auto backend_config = conv->backend_config<GpuBackendConfig>();
if (backend_config.ok() &&
backend_config->cudnn_conv_backend_config().reordered_int8_nchw_vect()) {
VLOG(2) << "Matched int8x32 convolution with filter reordering";
const HloInstruction *reshape, *transpose;
bool matched =
Match(weights, m::Reshape(m::Transpose(
&transpose, m::Reshape(&reshape, m::Op(&weights)))));
if (!matched || feature_dim != 0 || transpose->shape().rank() != 8) {
VLOG(2) << "The filter output feature dimension cannot be determined, as "
"the reordering sequence is modified";
return std::nullopt;
}
const auto& transpose_dimensions =
Cast<HloTransposeInstruction>(transpose)->dimensions();
int64_t preceding_size = 1;
for (int64_t i = transpose_dimensions.at(3) - 1; i >= 0; --i) {
preceding_size *= reshape->shape().dimensions(i);
}
int64_t accumulated_size = 1;
for (int64_t size : weights->shape().dimensions()) {
if (accumulated_size < preceding_size) {
accumulated_size *= size;
++feature_dim;
} else {
break;
}
}
if (accumulated_size != preceding_size) {
VLOG(2) << "Something is really wrong here, I give up";
return std::nullopt;
}
VLOG(2) << "Computed output feature dimension: " << feature_dim;
}
VLOG(2) << "Computing NumTrailingZeroOutputFeatures of " << conv->ToString()
<< "\nwith weights " << weights->ToString();
if (Match(weights, m::Pad(m::Op(), m::ConstantEffectiveScalar(0)))) {
const PaddingConfig::PaddingConfigDimension& padding_config =
weights->padding_config().dimensions(feature_dim);
VLOG(2) << "Success: Weights is a pad; padding on output feature dim is "
<< padding_config.edge_padding_high();
return padding_config.edge_padding_high();
} else if (const HloInstruction * pad; Match(
weights, m::Reshape(m::Pad(&pad, m::Op(),
m::ConstantEffectiveScalar(0))))) {
std::optional<int64_t> vect_c_dim = FindKernelVectCDim(conv);
if (!vect_c_dim.has_value()) {
VLOG(2) << "fail: Can't find vect_c dimension in conv.";
return std::nullopt;
}
if (*vect_c_dim != dnums.kernel_input_feature_dimension() + 1) {
VLOG(2) << "fail: vect_c dim is in the wrong place; should be right "
"after kernel input feature dims in conv.";
return std::nullopt;
}
absl::InlinedVector<int64_t, 5> expected_pad_dim_sizes(
weights->shape().dimensions().begin(),
weights->shape().dimensions().end());
expected_pad_dim_sizes[dnums.kernel_input_feature_dimension()] *=
weights->shape().dimensions(*vect_c_dim);
expected_pad_dim_sizes.erase(expected_pad_dim_sizes.begin() + *vect_c_dim);
if (pad->shape().dimensions() != expected_pad_dim_sizes) {
VLOG(2) << "fail: Reshape doesn't simply merge vect_c dimension into "
"input features dim "
<< weights->ToString() << " but expected dims "
<< absl::StrJoin(expected_pad_dim_sizes, ",");
return std::nullopt;
}
int64_t feature_dim_before_reshape = feature_dim;
if (dnums.kernel_output_feature_dimension() >
dnums.kernel_input_feature_dimension()) {
feature_dim_before_reshape--;
}
const PaddingConfig::PaddingConfigDimension& padding_config =
pad->padding_config().dimensions(feature_dim_before_reshape);
VLOG(2) << "Success: Weights is a reshape of a pad; padding on output "
"feature dim is "
<< padding_config.edge_padding_high();
return padding_config.edge_padding_high();
} else if (Match(weights, m::Constant())) {
const Literal& lit = weights->literal();
const auto& dims = weights->shape().dimensions();
absl::InlinedVector<int64_t, 5> multi_index;
for (int64_t dim : dims) {
multi_index.push_back(dim - 1);
}
auto decrement_multi_index = [&] {
for (int i = 0; i < multi_index.size(); ++i) {
if (i != feature_dim) {
int64_t& idx = multi_index[i];
--idx;
if (idx == -1) {
idx = dims[i] - 1;
} else {
return true;
}
}
}
int64_t& idx = multi_index[feature_dim];
--idx;
return idx != -1;
};
do {
if (!lit.IsZero(multi_index)) {
break;
}
} while (decrement_multi_index());
int64_t first_trailing_zero_feature = multi_index[feature_dim] + 1;
if (first_trailing_zero_feature == 0) {
VLOG(2) << "Weights constant is entirely zero.";
} else {
VLOG(2) << "First nonzero index in weights constant is "
<< absl::StrJoin(multi_index, ",");
}
int64_t ret =
std::max<int64_t>(0, weights->shape().dimensions(feature_dim) -
first_trailing_zero_feature);
VLOG(2) << "Success: weights is a constant; num zero trailing output "
"features is "
<< ret;
return ret;
}
return std::nullopt;
}
absl::StatusOr<bool> TrySimplifyPadding(HloInstruction* instr) {
HloInstruction* conv;
HloInstruction* transpose = nullptr;
HloInstruction* reshape = nullptr;
HloInstruction* slice;
HloInstruction* pad;
auto conv_matcher = m::GetTupleElement(
m::CustomCall(&conv).WithPredicate([](const HloInstruction* instr) {
return instr->custom_call_target() == kCudnnConvForwardCallTarget ||
instr->custom_call_target() ==
kCudnnConvBiasActivationForwardCallTarget;
}),
0);
auto pad_matcher = m::Pad(m::Op(), m::ConstantEffectiveScalar(0));
if (!MatchAndLogIfFailed(instr, "conv-slice-pad",
m::Pad(&pad, m::Slice(&slice, conv_matcher),
m::ConstantEffectiveScalar(0)),
VLOG_IS_ON(3), pad_matcher) &&
!MatchAndLogIfFailed(
instr, "conv-reshape-slice-pad",
m::Pad(&pad, m::Slice(&slice, m::Reshape(&reshape, conv_matcher)),
m::ConstantEffectiveScalar(0)),
VLOG_IS_ON(3), pad_matcher) &&
!MatchAndLogIfFailed(
instr, "conv-transpose-reshape-slice-pad",
m::Pad(&pad,
m::Slice(&slice,
m::Reshape(&reshape,
m::Transpose(&transpose, conv_matcher))),
m::ConstantEffectiveScalar(0)),
VLOG_IS_ON(3), pad_matcher)) {
return false;
}
VLOG(2) << "Found pattern to attempt to simplify:\n"
<< "conv: " << conv->ToString()
<< "\ntranspose: "
<< (transpose != nullptr ? transpose->ToString() : "(null)")
<< "\nreshape: "
<< (reshape != nullptr ? reshape->ToString() : "(null)")
<< "\nslice: " << slice->ToString()
<< "\npad: " << pad->ToString();
std::optional<int64_t> num_known_zero_output_features =
NumTrailingZeroOutputFeatures(conv);
if (!num_known_zero_output_features.has_value() ||
*num_known_zero_output_features == 0) {
VLOG(2) << "fail: Didn't find any known-zero output features";
return false;
}
const auto& dnums = conv->convolution_dimension_numbers();
int64_t output_feature_dim;
if (reshape == nullptr) {
CHECK_EQ(transpose, nullptr);
output_feature_dim = dnums.output_feature_dimension();
} else {
std::optional<int64_t> vect_c_dim_before_transpose =
FindOutputVectCDim(conv);
if (!vect_c_dim_before_transpose.has_value()) {
VLOG(2) << "Couldn't find vect_c output dim in conv.";
return false;
}
int64_t feature_dim_after_transpose;
int64_t vect_c_dim_after_transpose;
if (transpose == nullptr) {
feature_dim_after_transpose = dnums.output_feature_dimension();
vect_c_dim_after_transpose = *vect_c_dim_before_transpose;
} else {
const auto& transpose_dims = transpose->dimensions();
feature_dim_after_transpose = std::distance(
transpose->dimensions().begin(),
absl::c_find(transpose_dims, dnums.output_feature_dimension()));
vect_c_dim_after_transpose = std::distance(
transpose->dimensions().begin(),
absl::c_find(transpose_dims, *vect_c_dim_before_transpose));
}
if (vect_c_dim_after_transpose != feature_dim_after_transpose + 1) {
VLOG(2) << "fail: after transpose (if present), vect_c dim must appear "
"immediately after output feature dim: Computed "
"vect_d_dim_after_transpose to be "
<< vect_c_dim_after_transpose;
return false;
}
absl::InlinedVector<int64_t, 5> expected_reshape_dim_sizes(
reshape->operand(0)->shape().dimensions().begin(),
reshape->operand(0)->shape().dimensions().end());
expected_reshape_dim_sizes[feature_dim_after_transpose] *=
expected_reshape_dim_sizes[vect_c_dim_after_transpose];
expected_reshape_dim_sizes.erase(expected_reshape_dim_sizes.begin() +
vect_c_dim_after_transpose);
if (reshape->shape().dimensions() != expected_reshape_dim_sizes) {
VLOG(2) << "fail: Reshape doesn't merge vect_c with feature dimension.";
return false;
}
output_feature_dim = feature_dim_after_transpose;
}
if (!absl::c_all_of(slice->slice_starts(), [](auto v) { return v == 0; }) ||
!absl::c_all_of(slice->slice_strides(), [](auto v) { return v == 1; })) {
VLOG(2) << "fail: Slice doesn't start at the front or has stride != 1.";
return false;
}
for (int64_t dim = 0; dim < slice->slice_limits().size(); dim++) {
if (slice->slice_starts(dim) != 0 || slice->slice_strides(dim) != 1 ||
(dim != output_feature_dim &&
slice->slice_limits(dim) !=
slice->operand(0)->shape().dimensions(dim))) {
VLOG(2) << "fail: Slice removes something other than the features dim.";
return false;
}
}
int64_t num_sliced_from_feature_dim =
slice->operand(0)->shape().dimensions(output_feature_dim) -
slice->slice_limits(output_feature_dim);
if (num_sliced_from_feature_dim > *num_known_zero_output_features) {
VLOG(2) << "fail: Slice removes " << num_sliced_from_feature_dim
<< " features from the conv, but only "
<< *num_known_zero_output_features
<< " features in the conv are known to be zero.";
return false;
}
if (pad->padding_config().dimensions(output_feature_dim).interior_padding() !=
0) {
VLOG(2)
<< "fail: Can't merge slice into pad because pad adds interior padding "
"in feature dimension.";
return false;
}
VLOG(1) << "Eliminating " << num_sliced_from_feature_dim
<< " elements of padding from conv " << conv->name();
PaddingConfig new_padding_config = pad->padding_config();
PaddingConfig::PaddingConfigDimension* new_pad_feature_dim =
new_padding_config.mutable_dimensions(output_feature_dim);
new_pad_feature_dim->set_edge_padding_high(
new_pad_feature_dim->edge_padding_high() - num_sliced_from_feature_dim);
TF_ASSIGN_OR_RETURN(HloInstruction * new_pad,
MakePadHlo(slice->mutable_operand(0),
pad->mutable_operand(1), new_padding_config));
TF_RETURN_IF_ERROR(pad->parent()->ReplaceInstruction(pad, new_pad));
return true;
}
}
absl::StatusOr<bool> CudnnSimplifyPadding::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool c, TrySimplifyPadding(instr));
changed |= c;
}
}
return changed;
}
} | #include "xla/service/gpu/transforms/cudnn_simplify_padding.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/function_ref.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/pass/hlo_pass_fix.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/literal.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/call_inliner.h"
#include "xla/service/gpu/transforms/cudnn_pad_for_convolutions.h"
#include "xla/service/gpu/transforms/cudnn_vectorize_convolutions.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/reshape_mover.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
namespace m = ::xla::match;
class CudnnSimplifyPaddingTest : public HloTestBase {
protected:
absl::StatusOr<bool> RunEndToEnd(std::pair<int, int> compute_capability,
HloModule* module) {
se::CudaComputeCapability cc{compute_capability.first,
compute_capability.second};
TF_RETURN_IF_ERROR(
RunHloPass(CudnnPadForConvolutions(cc), module).status());
TF_RETURN_IF_ERROR(
RunHloPass(CudnnVectorizeConvolutions(
cc, se::dnn::VersionInfo{8, 3, 0}),
module)
.status());
VLOG(1) << "after vectorizing convs:\n" << module->ToString();
TF_RETURN_IF_ERROR(RunHloPass(CallInliner(), module).status());
VLOG(1) << "after inliner:\n" << module->ToString();
TF_RETURN_IF_ERROR(RunHloPass(TupleSimplifier(), module).status());
VLOG(1) << "after tuple simplifier:\n" << module->ToString();
TF_ASSIGN_OR_RETURN(bool changed,
RunHloPass(CudnnSimplifyPadding(), module));
VLOG(1) << "after simplify_padding:\n" << module->ToString();
{
HloPassFix<HloPassPipeline> pipeline("reshape-mover and algsimp");
pipeline.AddPass<ReshapeMover>();
pipeline.AddPass<AlgebraicSimplifier>(AlgebraicSimplifierOptions());
TF_RETURN_IF_ERROR(RunHloPass(pipeline, module).status());
}
VLOG(1) << "after reshape mover + algsimp:\n" << module->ToString();
return changed;
}
absl::StatusOr<bool> RunJustThisPass(HloModule* module) {
TF_ASSIGN_OR_RETURN(bool changed,
RunHloPass(CudnnSimplifyPadding(), module));
VLOG(1) << "after simplify_padding:\n" << module->ToString();
TF_RETURN_IF_ERROR(RunHloPass(HloPassFix<AlgebraicSimplifier>(
AlgebraicSimplifierOptions()),
module)
.status());
return changed;
}
};
void ExpectOnlyPadsOneDim(int64_t dim, int64_t padding_high,
const PaddingConfig& p) {
SCOPED_TRACE(p.DebugString());
for (int i = 0; i < p.dimensions_size(); ++i) {
SCOPED_TRACE(absl::StrCat("dimension ", i));
EXPECT_EQ(p.dimensions(i).edge_padding_low(), 0);
if (i == dim) {
EXPECT_EQ(p.dimensions(i).edge_padding_high(), padding_high);
} else {
EXPECT_EQ(p.dimensions(i).edge_padding_high(), 0);
}
}
}
template <typename NativeT>
void SetConstantValue(
HloInstruction* instr,
absl::FunctionRef<NativeT(absl::Span<const int64_t>, NativeT)> value_fn) {
Literal new_literal = instr->literal().Clone();
new_literal.MutableEachCell<int8_t>(value_fn);
TF_EXPECT_OK(instr->parent()->ReplaceWithNewInstruction(
instr, HloInstruction::CreateConstant(std::move(new_literal))));
}
TEST_F(CudnnSimplifyPaddingTest, EndToEnd) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv1 = (s8[10,20,30,190], u8[0]) custom-call(
s8[10,20,30,63] parameter(0), s8[3,5,63,190] parameter(1),
f32[10] parameter(2), s8[10,20,30,190] parameter(3)),
window={size=3x5}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBiasActivationForward"
conv1_result = get-tuple-element(conv1), index=0
ROOT conv2 = (s8[10,20,30,29], u8[0]) custom-call(
conv1_result, s8[3,5,190,29] parameter(4),
f32[10] parameter(5), s8[10,20,30,29] parameter(6)),
window={size=3x5}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBiasActivationForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunEndToEnd({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(m::Tuple(
m::Slice(m::Reshape(m::GetTupleElement(m::CustomCall(
{"__cudnn$convBiasActivationForward"},
m::GetTupleElement(
m::CustomCall({"__cudnn$convBiasActivationForward"}), 0),
m::Op(), m::Op(), m::Op())))),
m::Op())));
}
TEST_F(CudnnSimplifyPaddingTest, EndToEndNCHW) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv1 = (s8[1,64,480,400], u8[0]) custom-call(
s8[1,112,480,400] parameter(0), s8[3,3,112,64] parameter(1),
f32[64] parameter(2)),
window={size=3x3}, dim_labels=bf01_01io->bf01,
custom_call_target="__cudnn$convBiasActivationForward"
conv1_result = get-tuple-element(conv1), index=0
convert = f32[1,64,480,400] convert(conv1_result)
constant = f32[] constant(0.349002093)
broadcast = f32[1,64,480,400] broadcast(constant)
ROOT multiply = f32[1,64,480,400] multiply(convert, broadcast)
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunEndToEnd({7, 5}, module.get()));
EXPECT_FALSE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Reshape(m::Multiply())));
}
TEST_F(CudnnSimplifyPaddingTest, PaddedWeights) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* pad = nullptr;
ASSERT_THAT(root,
GmockMatch(m::Pad(&pad, m::GetTupleElement(m::CustomCall(), 0),
m::ConstantScalar(0))));
ExpectOnlyPadsOneDim(3, 1, pad->padding_config());
}
TEST_F(CudnnSimplifyPaddingTest, PaddedWeightsNotPaddedEnough) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_3
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, PaddedAndReshapedWeightsNCHW) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0
weights = s8[2,32,64,3,3] reshape(weights_p)
conv = (s8[10,2,32,10,10], u8[0]) custom-call(
s8[10,2,32,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=bf?01_i?o01->bf?01,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_result)), slice={[0:10], [0:60], [0:10], [0:10]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_5x0_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* pad = nullptr;
ASSERT_THAT(
root, GmockMatch(
m::Pad(&pad, m::Reshape(m::GetTupleElement(m::CustomCall(), 0)),
m::ConstantScalar(0))));
ExpectOnlyPadsOneDim(1, 1, pad->padding_config());
}
TEST_F(CudnnSimplifyPaddingTest, PaddedAndReshapedWeightsNHWC) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[3,3,64,60] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
weights = s8[3,3,2,32,64] reshape(weights_p)
conv = (s8[10,10,10,2,32], u8[0]) custom-call(
s8[10,10,10,2,32] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f?_01i?o->b01f?,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,60] slice(s8[10,10,10,64] reshape(conv_result)), slice={[0:10], [0:10], [0:10], [0:60]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* pad = nullptr;
ASSERT_THAT(
root, GmockMatch(
m::Pad(&pad, m::Reshape(m::GetTupleElement(m::CustomCall(), 0)),
m::ConstantScalar(0))));
ExpectOnlyPadsOneDim(3, 1, pad->padding_config());
}
TEST_F(CudnnSimplifyPaddingTest, PaddedTransposedAndReshapedOutput) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0
weights = s8[2,32,64,3,3] reshape(weights_p)
conv = (s8[10,2,10,10,32], u8[0]) custom-call(
s8[10,2,10,10,32] parameter(1),
weights
), window={size=3x3}, dim_labels=bf01?_i?o01->bf01?,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
conv_transposed = s8[10,2,32,10,10] transpose(conv_result), dimensions={0,1,4,2,3}
slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_transposed)), slice={[0:10], [0:60], [0:10], [0:10]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_6x0_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* pad = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Pad(
&pad,
m::Reshape(m::Transpose(m::GetTupleElement(m::CustomCall(), 0))),
m::ConstantScalar(0))));
ExpectOnlyPadsOneDim(1, 2, pad->padding_config());
}
TEST_F(CudnnSimplifyPaddingTest, PaddedConstantWeight) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(0),
s8[3,3,10,10] constant({...})
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
{
HloInstruction* weights = nullptr;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Pad(m::Slice(m::GetTupleElement(m::CustomCall(
m::Op(), m::Constant(&weights)))),
m::Op())));
SetConstantValue<int8_t>(
weights, [](absl::Span<const int64_t> dims, int8_t old_val) -> int8_t {
if (dims[3] < 6) return 1;
return 0;
});
}
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* pad = nullptr;
ASSERT_THAT(root,
GmockMatch(m::Pad(&pad, m::GetTupleElement(m::CustomCall(), 0),
m::ConstantScalar(0))));
ExpectOnlyPadsOneDim(3, 1, pad->padding_config());
}
TEST_F(CudnnSimplifyPaddingTest, PaddedConstantWeightIsNotLargeEnough) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(0),
s8[3,3,10,10] constant({...})
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
{
HloInstruction* weights = nullptr;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Pad(m::Slice(m::GetTupleElement(m::CustomCall(
m::Op(), m::Constant(&weights)))),
m::Op())));
SetConstantValue<int8_t>(
weights, [](absl::Span<const int64_t> dims, int8_t old_val) -> int8_t {
if (dims[3] < 5 ) return 0;
return 1;
});
}
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, ReshapeDoesntMergeVectCDim) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0
weights = s8[2,64,3,3,32] reshape(weights_p)
conv = (s8[10,2,10,10,32], u8[0]) custom-call(
s8[10,2,10,10,32] parameter(1),
weights_p
), window={size=3x3}, dim_labels=bf01?_io01?->bf01?,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_result)), slice={[0:10], [0:60], [0:10], [0:10]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_6x0_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, TwoVectCDimsInOutput) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0
weights = s8[2,64,3,3,32] reshape(weights_p)
conv = (s8[10,2,10,10,4,8], u8[0]) custom-call(
s8[10,2,10,10,32] parameter(1),
weights
), window={size=3x3}, dim_labels=bf01?_io01?->bf01??,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
conv_transposed = s8[10,2,4,8,10,10] transpose(conv_result), dimensions={0,1,4,5,2,3}
slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_transposed)), slice={[0:10], [0:60], [0:10], [0:10]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_6x0_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, TwoVectCDimsInKernel) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0
weights = s8[2,64,3,3,4,8] reshape(weights_p)
conv = (s8[10,2,10,10,32], u8[0]) custom-call(
s8[10,2,10,10,32] parameter(1),
weights
), window={size=3x3}, dim_labels=bf01?_io01??->bf01?,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
conv_transposed = s8[10,2,32,10,10] transpose(conv_result), dimensions={0,1,4,2,3}
slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_transposed)), slice={[0:10], [0:60], [0:10], [0:10]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_6x0_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, SliceDoesntStartAtBeginning) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,9,10,6] slice(conv_result), slice={[0:10], [1:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, SliceDoesntStartAtBeginningOfFeatureDim) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,5] slice(conv_result), slice={[0:10], [0:10], [0:10], [1:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, SliceHasStride) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,3] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6:2]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, PadAddsInteriorPadding) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5_1
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, SliceMoreElementsThanPad) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_2
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* slice = nullptr;
ASSERT_THAT(root, GmockMatch(m::Slice(
&slice, m::GetTupleElement(m::CustomCall(), 0))));
for (int64_t i = 0; i < slice->shape().dimensions_size(); ++i) {
SCOPED_TRACE(i);
EXPECT_EQ(slice->slice_starts(i), 0);
EXPECT_EQ(slice->slice_strides(i), 1);
if (i != 3) {
EXPECT_EQ(slice->slice_limits(i), 10);
} else {
EXPECT_EQ(slice->slice_limits(i), 8);
}
}
}
TEST_F(CudnnSimplifyPaddingTest, NoChangeOnNonTrivialConstants) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule jit_outer
ENTRY main.26 {
reshape.2 = f32[1,3,3,12]{3,2,1,0} parameter(0)
constant.1 = f32[3,3,1,12]{3,2,1,0} constant({ {
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } } })
cudnn-conv = (f32[1,5,5,12]{3,2,1,0}, u8[0]{0}) custom-call(reshape.2, constant.1), window={size=3x3 pad=2_2x2_2}, dim_labels=b01f_01io->b01f, feature_group_count=12, custom_call_target="__cudnn$convForward"
get-tuple-element = f32[1,5,5,12]{3,2,1,0} get-tuple-element(cudnn-conv), index=0
slice.2 = f32[1,5,1,12]{3,2,1,0} slice(get-tuple-element), slice={[0:1], [0:5], [0:1], [0:12]}
constant.0 = f32[] constant(0)
ROOT pad.1 = f32[1,5,3,12]{3,2,1,0} pad(slice.2, constant.0), padding=0_0x0_0x2_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, NoChangeOnComplexSlices) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule jit_outer
ENTRY main.26 {
reshape.2 = f32[1,3,3,12]{3,2,1,0} parameter(0)
constant.1 = f32[3,3,1,12]{3,2,1,0} constant({ {
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } } })
cudnn-conv = (f32[1,5,5,12]{3,2,1,0}, u8[0]{0}) custom-call(reshape.2, constant.1), window={size=3x3 pad=2_2x2_2}, dim_labels=b01f_01io->b01f, feature_group_count=12, custom_call_target="__cudnn$convForward"
get-tuple-element = f32[1,5,5,12]{3,2,1,0} get-tuple-element(cudnn-conv), index=0
slice.2 = f32[1,5,5,4]{3,2,1,0} slice(get-tuple-element), slice={[0:1], [0:5], [0:5], [2:6]}
constant.0 = f32[] constant(0)
ROOT pad.1 = f32[1,5,5,12]{3,2,1,0} pad(slice.2, constant.0), padding=0_0x0_0x0_0x0_8
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, ScanOrderFeatureDimLast) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule jit_outer
ENTRY main.26 {
reshape.2 = f32[1,3,3,12]{3,2,1,0} parameter(0)
constant.1 = f32[3,3,1,12]{3,2,1,0} constant({ {
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } } })
cudnn-conv = (f32[1,5,5,12]{3,2,1,0}, u8[0]{0}) custom-call(reshape.2, constant.1), window={size=3x3 pad=2_2x2_2}, dim_labels=b01f_01io->b01f, feature_group_count=12, custom_call_target="__cudnn$convForward"
get-tuple-element = f32[1,5,5,12]{3,2,1,0} get-tuple-element(cudnn-conv), index=0
slice.2 = f32[1,5,5,6]{3,2,1,0} slice(get-tuple-element), slice={[0:1], [0:5], [0:5], [0:6]}
constant.0 = f32[] constant(0)
ROOT pad.1 = f32[1,5,5,12]{3,2,1,0} pad(slice.2, constant.0), padding=0_0x0_0x0_0x0_6
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, Int8FilterReorderedOutputFirst) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv.1 = (s8[1,63,80,80], u8[0]) custom-call(
s8[1,112,80,80] parameter(0), s8[63,112,3,3] parameter(1)),
window={size=3x3}, dim_labels=bf01_oi01->bf01,
custom_call_target="__cudnn$convForward"
gte.1 = s8[1,63,80,80] get-tuple-element(conv.1), index=0
const.0 = s8[] constant(0)
ROOT pad.1 = s8[1,64,80,80] pad(gte.1, const.0), padding=0_0x0_1x0_0x0_0
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunEndToEnd({7, 5}, module.get()));
EXPECT_TRUE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, Int8FilterReorderedOutputLast) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv.1 = (s8[1,63,80,80], u8[0]) custom-call(
s8[1,112,80,80] parameter(0), s8[3,3,112,63] parameter(1)),
window={size=3x3}, dim_labels=bf01_01io->bf01,
custom_call_target="__cudnn$convForward"
gte.1 = s8[1,63,80,80] get-tuple-element(conv.1), index=0
const.0 = s8[] constant(0)
ROOT pad.1 = s8[1,64,80,80] pad(gte.1, const.0), padding=0_0x0_1x0_0x0_0
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunEndToEnd({7, 5}, module.get()));
EXPECT_TRUE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_simplify_padding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_simplify_padding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
37e3ea92-5e0b-42f0-a7ac-7eaa650395c4 | cpp | tensorflow/tensorflow | scoped_module_handle | third_party/xla/xla/stream_executor/scoped_module_handle.h | third_party/xla/xla/stream_executor/scoped_module_handle_test.cc | #ifndef XLA_STREAM_EXECUTOR_SCOPED_MODULE_HANDLE_H_
#define XLA_STREAM_EXECUTOR_SCOPED_MODULE_HANDLE_H_
#include "absl/log/check.h"
#include "xla/stream_executor/module_spec.h"
#include "xla/stream_executor/stream_executor.h"
namespace stream_executor {
class ScopedModuleHandle {
public:
ScopedModuleHandle(StreamExecutor* executor, ModuleHandle module_handle)
: executor_(executor), module_handle_(module_handle) {}
ScopedModuleHandle(ScopedModuleHandle&& other) {
executor_ = other.executor_;
module_handle_ = other.module_handle_;
other.executor_ = nullptr;
other.module_handle_ = ModuleHandle();
}
ScopedModuleHandle& operator=(ScopedModuleHandle&& other) {
executor_ = other.executor_;
module_handle_ = other.module_handle_;
other.executor_ = nullptr;
other.module_handle_ = ModuleHandle();
return *this;
}
~ScopedModuleHandle() {
if (static_cast<bool>(module_handle_)) {
CHECK(executor_->UnloadModule(module_handle_));
}
}
private:
StreamExecutor* executor_;
ModuleHandle module_handle_;
ScopedModuleHandle(const ScopedModuleHandle&) = delete;
void operator=(const ScopedModuleHandle&) = delete;
};
}
#endif | #include "xla/stream_executor/scoped_module_handle.h"
#include <utility>
#include "xla/stream_executor/mock_stream_executor.h"
#include "xla/stream_executor/module_spec.h"
#include "tsl/platform/test.h"
using testing::Return;
namespace stream_executor {
namespace {
TEST(ScopedModuleHandleTest, NoUnloadForNullHandle) {
ModuleHandle foo;
MockStreamExecutor executor;
EXPECT_CALL(executor, UnloadModule).Times(0);
{
ScopedModuleHandle first(&executor, foo);
ScopedModuleHandle second = std::move(first);
ScopedModuleHandle third(&executor, foo);
third = std::move(second);
}
}
TEST(ScopedModuleHandleTest, NonNullHandleUnloadsOnceAfterMoves) {
ModuleHandle foo(reinterpret_cast<void*>(1));
MockStreamExecutor executor;
EXPECT_CALL(executor, UnloadModule).WillOnce(Return(true));
{
ScopedModuleHandle first(&executor, foo);
ScopedModuleHandle second = std::move(first);
ScopedModuleHandle third(&executor, ModuleHandle{});
third = std::move(second);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/scoped_module_handle.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/scoped_module_handle_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
773e8676-b15b-43ea-bce2-eabce4530c4d | cpp | abseil/abseil-cpp | scoped_mock_log | absl/log/scoped_mock_log.cc | absl/log/scoped_mock_log_test.cc | #include "absl/log/scoped_mock_log.h"
#include <atomic>
#include <string>
#include "gmock/gmock.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/log/log_entry.h"
#include "absl/log/log_sink.h"
#include "absl/log/log_sink_registry.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
ScopedMockLog::ScopedMockLog(MockLogDefault default_exp)
: sink_(this), is_capturing_logs_(false), is_triggered_(false) {
if (default_exp == MockLogDefault::kIgnoreUnexpected) {
EXPECT_CALL(*this, Log).Times(::testing::AnyNumber());
} else {
EXPECT_CALL(*this, Log).Times(0);
}
EXPECT_CALL(*this, Send)
.Times(::testing::AnyNumber())
.WillRepeatedly([this](const absl::LogEntry& entry) {
is_triggered_.store(true, std::memory_order_relaxed);
Log(entry.log_severity(), std::string(entry.source_filename()),
std::string(entry.text_message()));
});
EXPECT_CALL(*this, Flush).Times(::testing::AnyNumber());
}
ScopedMockLog::~ScopedMockLog() {
ABSL_RAW_CHECK(is_triggered_.load(std::memory_order_relaxed),
"Did you forget to call StartCapturingLogs()?");
if (is_capturing_logs_) StopCapturingLogs();
}
void ScopedMockLog::StartCapturingLogs() {
ABSL_RAW_CHECK(!is_capturing_logs_,
"StartCapturingLogs() can be called only when the "
"absl::ScopedMockLog object is not capturing logs.");
is_capturing_logs_ = true;
is_triggered_.store(true, std::memory_order_relaxed);
absl::AddLogSink(&sink_);
}
void ScopedMockLog::StopCapturingLogs() {
ABSL_RAW_CHECK(is_capturing_logs_,
"StopCapturingLogs() can be called only when the "
"absl::ScopedMockLog object is capturing logs.");
is_capturing_logs_ = false;
absl::RemoveLogSink(&sink_);
}
absl::LogSink& ScopedMockLog::UseAsLocalSink() {
is_triggered_.store(true, std::memory_order_relaxed);
return sink_;
}
ABSL_NAMESPACE_END
} | #include "absl/log/scoped_mock_log.h"
#include <memory>
#include <thread>
#include "gmock/gmock.h"
#include "gtest/gtest-spi.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/log_severity.h"
#include "absl/log/globals.h"
#include "absl/log/internal/test_helpers.h"
#include "absl/log/internal/test_matchers.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/barrier.h"
#include "absl/synchronization/notification.h"
namespace {
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::InSequence;
using ::testing::Lt;
using ::testing::Truly;
using absl::log_internal::SourceBasename;
using absl::log_internal::SourceFilename;
using absl::log_internal::SourceLine;
using absl::log_internal::TextMessageWithPrefix;
using absl::log_internal::ThreadID;
auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
new absl::log_internal::LogTestEnvironment);
#if GTEST_HAS_DEATH_TEST
TEST(ScopedMockLogDeathTest,
StartCapturingLogsCannotBeCalledWhenAlreadyCapturing) {
EXPECT_DEATH(
{
absl::ScopedMockLog log;
log.StartCapturingLogs();
log.StartCapturingLogs();
},
"StartCapturingLogs");
}
TEST(ScopedMockLogDeathTest, StopCapturingLogsCannotBeCalledWhenNotCapturing) {
EXPECT_DEATH(
{
absl::ScopedMockLog log;
log.StopCapturingLogs();
},
"StopCapturingLogs");
}
TEST(ScopedMockLogDeathTest, FailsCheckIfStartCapturingLogsIsNeverCalled) {
EXPECT_DEATH({ absl::ScopedMockLog log; },
"Did you forget to call StartCapturingLogs");
}
#endif
TEST(ScopedMockLogTest, LogMockCatchAndMatchStrictExpectations) {
absl::ScopedMockLog log;
InSequence s;
EXPECT_CALL(log,
Log(absl::LogSeverity::kWarning, HasSubstr(__FILE__), "Danger."));
EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "Working...")).Times(2);
EXPECT_CALL(log, Log(absl::LogSeverity::kError, _, "Bad!!"));
log.StartCapturingLogs();
LOG(WARNING) << "Danger.";
LOG(INFO) << "Working...";
LOG(INFO) << "Working...";
LOG(ERROR) << "Bad!!";
}
TEST(ScopedMockLogTest, LogMockCatchAndMatchSendExpectations) {
absl::ScopedMockLog log;
EXPECT_CALL(
log,
Send(AllOf(SourceFilename(Eq("/my/very/very/very_long_source_file.cc")),
SourceBasename(Eq("very_long_source_file.cc")),
SourceLine(Eq(777)), ThreadID(Eq(absl::LogEntry::tid_t{1234})),
TextMessageWithPrefix(Truly([](absl::string_view msg) {
return absl::EndsWith(
msg, " very_long_source_file.cc:777] Info message");
})))));
log.StartCapturingLogs();
LOG(INFO)
.AtLocation("/my/very/very/very_long_source_file.cc", 777)
.WithThreadID(1234)
<< "Info message";
}
TEST(ScopedMockLogTest, ScopedMockLogCanBeNice) {
absl::ScopedMockLog log;
InSequence s;
EXPECT_CALL(log,
Log(absl::LogSeverity::kWarning, HasSubstr(__FILE__), "Danger."));
EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "Working...")).Times(2);
EXPECT_CALL(log, Log(absl::LogSeverity::kError, _, "Bad!!"));
log.StartCapturingLogs();
LOG(INFO) << "Info message.";
LOG(WARNING).AtLocation("SomeOtherFile.cc", 100) << "Danger ";
LOG(WARNING) << "Danger.";
LOG(INFO) << "Info message.";
LOG(WARNING).AtLocation("SomeOtherFile.cc", 100) << "Danger ";
LOG(INFO) << "Working...";
LOG(INFO) << "Info message.";
LOG(WARNING).AtLocation("SomeOtherFile.cc", 100) << "Danger ";
LOG(INFO) << "Working...";
LOG(INFO) << "Info message.";
LOG(WARNING).AtLocation("SomeOtherFile.cc", 100) << "Danger ";
LOG(ERROR) << "Bad!!";
LOG(INFO) << "Info message.";
LOG(WARNING).AtLocation("SomeOtherFile.cc", 100) << "Danger ";
}
TEST(ScopedMockLogTest, RejectsUnexpectedLogs) {
EXPECT_NONFATAL_FAILURE(
{
absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(log, Log(Lt(absl::LogSeverity::kError), _, _))
.Times(AnyNumber());
log.StartCapturingLogs();
LOG(INFO) << "Ignored";
LOG(WARNING) << "Ignored";
LOG(ERROR) << "Should not be ignored";
},
"Should not be ignored");
}
TEST(ScopedMockLogTest, CapturesLogsAfterStartCapturingLogs) {
absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfinity);
absl::ScopedMockLog log;
LOG(INFO) << "Ignored info";
LOG(WARNING) << "Ignored warning";
LOG(ERROR) << "Ignored error";
EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "Expected info"));
log.StartCapturingLogs();
LOG(INFO) << "Expected info";
}
TEST(ScopedMockLogTest, DoesNotCaptureLogsAfterStopCapturingLogs) {
absl::ScopedMockLog log;
EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "Expected info"));
log.StartCapturingLogs();
LOG(INFO) << "Expected info";
log.StopCapturingLogs();
LOG(INFO) << "Ignored info";
LOG(WARNING) << "Ignored warning";
LOG(ERROR) << "Ignored error";
}
TEST(ScopedMockLogTest, LogFromMultipleThreads) {
absl::ScopedMockLog log;
EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, __FILE__, "Thread 1"));
EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, __FILE__, "Thread 2"));
log.StartCapturingLogs();
absl::Barrier barrier(2);
std::thread thread1([&barrier]() {
barrier.Block();
LOG(INFO) << "Thread 1";
});
std::thread thread2([&barrier]() {
barrier.Block();
LOG(INFO) << "Thread 2";
});
thread1.join();
thread2.join();
}
TEST(ScopedMockLogTest, NoSequenceWithMultipleThreads) {
absl::ScopedMockLog log;
absl::Barrier barrier(2);
EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, _))
.Times(2)
.WillRepeatedly([&barrier]() { barrier.Block(); });
log.StartCapturingLogs();
std::thread thread1([]() { LOG(INFO) << "Thread 1"; });
std::thread thread2([]() { LOG(INFO) << "Thread 2"; });
thread1.join();
thread2.join();
}
TEST(ScopedMockLogTsanTest,
ScopedMockLogCanBeDeletedWhenAnotherThreadIsLogging) {
auto log = absl::make_unique<absl::ScopedMockLog>();
EXPECT_CALL(*log, Log(absl::LogSeverity::kInfo, __FILE__, "Thread log"))
.Times(AnyNumber());
log->StartCapturingLogs();
absl::Notification logging_started;
std::thread thread([&logging_started]() {
for (int i = 0; i < 100; ++i) {
if (i == 50) logging_started.Notify();
LOG(INFO) << "Thread log";
}
});
logging_started.WaitForNotification();
log.reset();
thread.join();
}
TEST(ScopedMockLogTest, AsLocalSink) {
absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(log, Log(_, _, "two"));
EXPECT_CALL(log, Log(_, _, "three"));
LOG(INFO) << "one";
LOG(INFO).ToSinkOnly(&log.UseAsLocalSink()) << "two";
LOG(INFO).ToSinkAlso(&log.UseAsLocalSink()) << "three";
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/scoped_mock_log.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/scoped_mock_log_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
c08d64b7-ea07-462b-8780-bba018bd7092 | cpp | google/tensorstore | extents | tensorstore/util/extents.h | tensorstore/util/extents_test.cc | #ifndef TENSORSTORE_UTIL_EXTENTS_H_
#define TENSORSTORE_UTIL_EXTENTS_H_
#include <cassert>
#include <cstddef>
#include <limits>
#include <type_traits>
#include "absl/base/optimization.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
template <typename T, ptrdiff_t Extent>
T ProductOfExtents(tensorstore::span<T, Extent> s) {
using value_type = std::remove_const_t<T>;
value_type result = 1;
for (const auto& x : s) {
assert(x >= 0);
if (ABSL_PREDICT_FALSE(internal::MulOverflow(result, x, &result))) {
result = std::numeric_limits<value_type>::max();
}
}
return result;
}
template <DimensionIndex Rank, typename Indices, typename = void>
constexpr inline bool IsCompatibleFullIndexVector = false;
template <DimensionIndex Rank, typename Indices>
constexpr inline bool IsCompatibleFullIndexVector<
Rank, Indices, std::void_t<internal::ConstSpanType<Indices>>> =
RankConstraint::EqualOrUnspecified(
Rank, internal::ConstSpanType<Indices>::extent) &&
internal::IsIndexPack<
typename internal::ConstSpanType<Indices>::value_type>;
template <DimensionIndex Rank, typename Indices, typename = void>
constexpr inline bool IsImplicitlyCompatibleFullIndexVector = false;
template <DimensionIndex Rank, typename Indices>
constexpr inline bool IsImplicitlyCompatibleFullIndexVector<
Rank, Indices, std::void_t<internal::ConstSpanType<Indices>>> =
RankConstraint::Implies(internal::ConstSpanType<Indices>::extent, Rank) &&
internal::IsIndexPack<
typename internal::ConstSpanType<Indices>::value_type>;
template <DimensionIndex Rank, typename Indices, typename = void>
constexpr inline bool IsCompatiblePartialIndexVector = false;
template <DimensionIndex Rank, typename Indices>
constexpr inline bool IsCompatiblePartialIndexVector<
Rank, Indices, std::void_t<internal::ConstSpanType<Indices>>> =
RankConstraint::GreaterEqualOrUnspecified(
Rank, internal::ConstSpanType<Indices>::extent) &&
internal::IsIndexPack<
typename internal::ConstSpanType<Indices>::value_type>;
template <DimensionIndex Rank, typename... IndexType>
constexpr inline bool IsCompatibleFullIndexPack =
RankConstraint::EqualOrUnspecified(Rank, sizeof...(IndexType)) &&
internal::IsIndexPack<IndexType...>;
template <typename Indices, typename = void>
constexpr inline bool IsIndexConvertibleVector = false;
template <typename Indices>
constexpr inline bool IsIndexConvertibleVector<
Indices, std::void_t<internal::ConstSpanType<Indices>>> =
internal::IsIndexPack<
typename internal::ConstSpanType<Indices>::value_type>;
template <typename Indices, typename = Index>
constexpr inline bool IsIndexVector = false;
template <typename Indices>
constexpr inline bool IsIndexVector<
Indices, typename internal::ConstSpanType<Indices>::value_type> = true;
template <typename Indices, typename = Index>
constexpr inline bool IsMutableIndexVector = false;
template <typename Indices>
constexpr inline bool IsMutableIndexVector<
Indices, typename internal::SpanType<Indices>::element_type> = true;
namespace internal_extents {
template <typename... Xs>
struct SpanStaticExtentHelper {};
template <typename... Ts, ptrdiff_t Extent>
struct SpanStaticExtentHelper<tensorstore::span<Ts, Extent>...>
: public std::integral_constant<ptrdiff_t, Extent> {};
}
template <typename X0, typename... Xs>
using SpanStaticExtent =
internal_extents::SpanStaticExtentHelper<internal::ConstSpanType<X0>,
internal::ConstSpanType<Xs>...>;
}
#endif | #include "tensorstore/util/extents.h"
#include <cstdint>
#include <limits>
#include <type_traits>
#include <vector>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::dynamic_extent;
using ::tensorstore::Index;
using ::tensorstore::IsCompatibleFullIndexVector;
using ::tensorstore::IsCompatiblePartialIndexVector;
using ::tensorstore::IsImplicitlyCompatibleFullIndexVector;
using ::tensorstore::IsIndexConvertibleVector;
using ::tensorstore::IsIndexVector;
using ::tensorstore::IsMutableIndexVector;
using ::tensorstore::ProductOfExtents;
using ::tensorstore::span;
using ::tensorstore::SpanStaticExtent;
static_assert(IsCompatibleFullIndexVector<3, int (&)[3]>);
static_assert(IsCompatibleFullIndexVector<dynamic_extent, int (&)[3]>);
static_assert(IsCompatibleFullIndexVector<3, span<int, 3>>);
static_assert(IsCompatibleFullIndexVector<3, span<int>>);
static_assert(IsCompatibleFullIndexVector<dynamic_extent, span<int>>);
static_assert(IsCompatibleFullIndexVector<dynamic_extent, span<int, 3>>);
static_assert(!IsCompatibleFullIndexVector<3, span<int, 2>>);
static_assert(!IsCompatibleFullIndexVector<3, span<float, 3>>);
static_assert(!IsCompatibleFullIndexVector<3, span<float, 2>>);
static_assert(IsCompatiblePartialIndexVector<3, int (&)[3]>);
static_assert(IsCompatiblePartialIndexVector<4, int (&)[3]>);
static_assert(IsCompatiblePartialIndexVector<dynamic_extent, int (&)[3]>);
static_assert(IsCompatiblePartialIndexVector<3, span<int, 3>>);
static_assert(IsCompatiblePartialIndexVector<4, span<int, 3>>);
static_assert(IsCompatiblePartialIndexVector<3, span<int>>);
static_assert(IsCompatiblePartialIndexVector<dynamic_extent, span<int>>);
static_assert(IsCompatiblePartialIndexVector<dynamic_extent, span<int, 3>>);
static_assert(!IsCompatiblePartialIndexVector<3, span<int, 4>>);
static_assert(!IsCompatiblePartialIndexVector<3, span<float, 3>>);
static_assert(!IsCompatiblePartialIndexVector<3, span<float, 2>>);
static_assert(IsImplicitlyCompatibleFullIndexVector<3, int (&)[3]>);
static_assert(
IsImplicitlyCompatibleFullIndexVector<dynamic_extent, int (&)[3]>);
static_assert(IsImplicitlyCompatibleFullIndexVector<3, span<int, 3>>);
static_assert(IsImplicitlyCompatibleFullIndexVector<dynamic_extent, span<int>>);
static_assert(!IsImplicitlyCompatibleFullIndexVector<3, span<int>>);
static_assert(!IsImplicitlyCompatibleFullIndexVector<3, span<float, 3>>);
static_assert(!IsImplicitlyCompatibleFullIndexVector<3, span<float, 2>>);
static_assert(IsIndexConvertibleVector<span<int>>);
static_assert(IsIndexConvertibleVector<span<int, 3>>);
static_assert(IsIndexConvertibleVector<std::vector<int>>);
static_assert(!IsIndexConvertibleVector<span<float, 3>>);
static_assert(IsIndexVector<span<Index>>);
static_assert(IsIndexVector<span<Index, 3>>);
static_assert(IsIndexVector<span<const Index>>);
static_assert(IsIndexVector<span<const Index, 3>>);
static_assert(IsIndexVector<span<const Index>>);
static_assert(IsIndexVector<std::vector<Index>>);
static_assert(IsIndexVector<const std::vector<Index>>);
static_assert(!IsIndexVector<span<int, 3>>);
static_assert(!IsIndexVector<span<float>>);
static_assert(IsMutableIndexVector<span<Index>>);
static_assert(IsMutableIndexVector<span<Index, 3>>);
static_assert(!IsMutableIndexVector<span<const Index>>);
static_assert(!IsMutableIndexVector<span<const Index, 3>>);
static_assert(IsMutableIndexVector<std::vector<Index>&>);
static_assert(!IsMutableIndexVector<const std::vector<Index>>);
static_assert(!IsMutableIndexVector<span<int, 3>>);
static_assert(!IsMutableIndexVector<span<float>>);
static_assert(SpanStaticExtent<std::vector<int>>() == dynamic_extent);
static_assert(SpanStaticExtent<span<int, 3>>() == 3);
static_assert(SpanStaticExtent<span<int>>() == dynamic_extent);
static_assert(SpanStaticExtent<std::vector<int>, span<int>>() == dynamic_extent,
"");
static_assert(SpanStaticExtent<span<int, 3>, span<float, 3>>() == 3);
TEST(ProductOfExtentsTest, Basic) {
EXPECT_EQ(1, ProductOfExtents(span<int, 0>()));
EXPECT_EQ(20, ProductOfExtents(span({4, 5})));
}
TEST(ProductOfExtentsTest, Overflow) {
EXPECT_EQ(0, ProductOfExtents(span<const int>(
{5, std::numeric_limits<int>::max() - 1, 0})));
EXPECT_EQ(std::numeric_limits<int>::max(),
ProductOfExtents(
span<const int>({5, std::numeric_limits<int>::max() - 1})));
EXPECT_EQ(std::numeric_limits<std::int64_t>::max(),
ProductOfExtents(
span<const std::int64_t>({32768, 32768, 32768, 32768, 32768})));
EXPECT_EQ(0, ProductOfExtents(span<const int>(
{5, std::numeric_limits<int>::max() - 1, 0})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/extents.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/extents_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
6f511ca0-49b7-4aac-b89e-b29692c7d1e9 | cpp | tensorflow/tensorflow | xplane_to_tf_functions | tensorflow/core/profiler/convert/xplane_to_tf_functions.cc | tensorflow/core/profiler/convert/xplane_to_tf_functions_test.cc | #include "tensorflow/core/profiler/convert/xplane_to_tf_functions.h"
#include <algorithm>
#include <ostream>
#include <stack>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
namespace {
std::pair<TfFunctionExecutionMode, TfFunctionCompiler> Decode(
absl::string_view function_name, absl::string_view mode) {
if (mode == "eager") return {EAGER_MODE, INVALID_COMPILER};
if (mode == "concrete") return {CONCRETE_MODE, INVALID_COMPILER};
if (mode == "traced-xla") return {TRACED_MODE, XLA_COMPILER};
if (mode == "traced-nonXla") return {TRACED_MODE, OTHER_COMPILER};
if (mode == "notTraced-xla") return {NOT_TRACED_MODE, XLA_COMPILER};
if (mode == "notTraced-nonXla") return {NOT_TRACED_MODE, OTHER_COMPILER};
LOG(ERROR) << absl::StrCat("tf-function '", function_name,
"' has an unexpected execution mode '", mode, "'")
<< std::endl;
return {INVALID_MODE, INVALID_COMPILER};
DCHECK(false);
}
double ComputeExpensiveCallPercent(const TfFunction& tf_function) {
uint64 total_call_time_ps = 0;
uint64 expensive_call_time_ps = 0;
for (const auto& mode_metrics : tf_function.metrics()) {
const auto mode = mode_metrics.first;
const auto& metrics = mode_metrics.second;
total_call_time_ps += metrics.self_time_ps();
if (mode == TRACED_MODE || mode == EAGER_MODE) {
expensive_call_time_ps += metrics.self_time_ps();
}
}
return tsl::profiler::SafeDivide(100.0 * expensive_call_time_ps,
total_call_time_ps);
}
struct ActivationRecord {
std::string function_name;
tsl::profiler::Timespan timespan;
TfFunctionExecutionMode execution_mode;
TfFunctionCompiler compiler;
int64_t tracing_count;
uint64 children_duration_ps;
ActivationRecord()
: function_name(""),
execution_mode(INVALID_MODE),
compiler(INVALID_COMPILER),
tracing_count(0),
children_duration_ps(0) {}
ActivationRecord(absl::string_view name,
const tsl::profiler::Timespan& timespan,
TfFunctionExecutionMode exe_mode,
TfFunctionCompiler compiler, int64_t tracing_cnt)
: function_name(std::string(name)),
timespan(timespan),
execution_mode(exe_mode),
compiler(compiler),
tracing_count(tracing_cnt),
children_duration_ps(0) {}
std::string DebugString() const {
return absl::StrCat("{", function_name, ", ",
TfFunctionExecutionMode_Name(execution_mode), ", ",
TfFunctionCompiler_Name(compiler),
", tracing_count:", tracing_count,
", children_duration:", children_duration_ps,
" ps, timespan:", timespan.DebugString(), "}");
}
};
struct EntryOrExit {
bool is_entry;
int64_t index;
uint64 timestamp_ps;
EntryOrExit() : is_entry(false), index(-1), timestamp_ps(0) {}
EntryOrExit(bool is_entry, int64_t index, uint64 timestamp_ps)
: is_entry(is_entry), index(index), timestamp_ps(timestamp_ps) {}
std::string DebugString() const {
std::string entry_or_exit = is_entry ? "entry, " : "exit, ";
return absl::StrCat("{", entry_or_exit, "idx:", index,
", timestamp:", timestamp_ps, "}");
}
};
TfFunctionCompiler CombineCompilers(TfFunctionCompiler a,
TfFunctionCompiler b) {
if (a == INVALID_COMPILER) return b;
if (b == INVALID_COMPILER) return a;
if (a == b) return a;
return MIXED_COMPILER;
}
void CombineTfFunctionMetrics(const TfFunctionMetrics& src,
TfFunctionMetrics* dst) {
dst->set_count(src.count() + dst->count());
dst->set_self_time_ps(src.self_time_ps() + dst->self_time_ps());
}
void CombineTfFunction(const TfFunction& src, TfFunction* dst) {
dst->set_total_tracing_count(
std::max(src.total_tracing_count(), dst->total_tracing_count()));
dst->set_compiler(CombineCompilers(src.compiler(), dst->compiler()));
for (const auto& mode_metrics : src.metrics()) {
int32_t execution_mode = mode_metrics.first;
const TfFunctionMetrics& src_metrics = mode_metrics.second;
TfFunctionMetrics* dst_metrics =
gtl::FindOrNull(*dst->mutable_metrics(), execution_mode);
if (dst_metrics == nullptr) {
(*dst->mutable_metrics())[execution_mode] = src_metrics;
} else {
CombineTfFunctionMetrics(src_metrics, dst_metrics);
}
}
dst->set_expensive_call_percent(ComputeExpensiveCallPercent(*dst));
}
class TfFunctionExecutions {
public:
explicit TfFunctionExecutions(const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
absl::string_view mode;
int64_t tracing_count = 0;
event.ForEachStat([&mode, &tracing_count](const XStatVisitor& stat) {
if (!stat.Type().has_value()) return;
switch (stat.Type().value()) {
case StatType::kTfFunctionCall:
mode = stat.StrOrRefValue();
break;
case StatType::kTfFunctionTracingCount:
tracing_count = stat.IntValue();
break;
}
});
if (mode.empty()) return;
int64_t index = activations_.size();
auto timespan = event.GetTimespan();
auto mode_compiler = Decode(event.Name(), mode);
ActivationRecord activation_record =
ActivationRecord(event.Name(), timespan, mode_compiler.first,
mode_compiler.second, tracing_count);
activations_.push_back(activation_record);
EntryOrExit entry_point =
EntryOrExit(true, index, timespan.begin_ps());
EntryOrExit exit_point =
EntryOrExit(false, index, timespan.end_ps());
points_.push_back(entry_point);
points_.push_back(exit_point);
});
auto ascending_in_timestamp = [](const EntryOrExit& a,
const EntryOrExit& b) {
return a.timestamp_ps < b.timestamp_ps;
};
absl::c_sort(points_, ascending_in_timestamp);
CalculateChildrenDurations();
}
std::string DebugString() const {
std::string result = "\nActivations:\n";
for (int i = 0, end = activations_.size(); i < end; i++) {
absl::StrAppend(&result, "[", i, "] ", activations_[i].DebugString(),
"\n");
}
absl::StrAppend(&result, "tf-function Entry/Exit Points:\n");
for (const auto& pt : points_) {
absl::StrAppend(&result, pt.DebugString(), "\n");
}
return result;
}
TfFunctionDb ConvertToTfFunctionDb() {
TfFunctionDb result;
for (const auto& record : activations_) {
TfFunction* fun = &(*result.mutable_tf_functions())[record.function_name];
fun->set_total_tracing_count(
std::max(static_cast<int64_t>(fun->total_tracing_count()),
record.tracing_count));
fun->set_compiler(CombineCompilers(fun->compiler(), record.compiler));
uint64 self_time_ps =
record.timespan.duration_ps() - record.children_duration_ps;
TfFunctionMetrics* metrics =
&(*fun->mutable_metrics())[record.execution_mode];
metrics->set_count(metrics->count() + 1);
metrics->set_self_time_ps(metrics->self_time_ps() + self_time_ps);
}
for (auto& name_fun : *result.mutable_tf_functions()) {
TfFunction& fun = name_fun.second;
fun.set_expensive_call_percent(ComputeExpensiveCallPercent(fun));
}
return result;
}
void CalculateChildrenDurations() {
std::stack<int64_t> call_stack;
for (const auto& pt : points_) {
if (pt.is_entry) {
call_stack.push(pt.index);
} else {
DCHECK(call_stack.top() == pt.index);
uint64 call_duration = activations_[pt.index].timespan.duration_ps();
call_stack.pop();
if (!call_stack.empty()) {
activations_[call_stack.top()].children_duration_ps += call_duration;
}
}
}
}
private:
std::vector<ActivationRecord> activations_;
std::vector<EntryOrExit> points_;
};
}
std::string DebugString(const TfFunctionDb& tf_function_db) {
std::string str;
protobuf::TextFormat::PrintToString(tf_function_db, &str);
return str;
}
void CombineTfFunctionDb(const TfFunctionDb& src, TfFunctionDb* dst) {
for (const auto& name_function : src.tf_functions()) {
const auto& name = name_function.first;
const auto& src_fun = name_function.second;
TfFunction* dst_fun = gtl::FindOrNull(*dst->mutable_tf_functions(), name);
if (dst_fun == nullptr) {
(*dst->mutable_tf_functions())[name] = src_fun;
} else {
CombineTfFunction(src_fun, dst_fun);
}
}
}
TfFunctionDb ConvertHostThreadsXLineToTfFunctionDb(const XLineVisitor& line) {
TfFunctionExecutions tf_function_executions = TfFunctionExecutions(line);
return tf_function_executions.ConvertToTfFunctionDb();
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_tf_functions.h"
#include <string>
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/protobuf/tf_function.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
namespace {
const absl::string_view kEager = "eager";
const absl::string_view kConcrete = "concrete";
const absl::string_view kTracedNonXla = "traced-nonXla";
const absl::string_view kTracedXla = "traced-xla";
const absl::string_view kNotTracedNonXla = "notTraced-nonXla";
const absl::string_view kNotTracedXla = "notTraced-xla";
constexpr double kMaxError = 0.001;
TfFunctionDb ConvertXSpaceToTfFunctionDb(const XSpace& space) {
TfFunctionDb result;
const XPlane* host_plane = FindPlaneWithName(space, kHostThreadsPlaneName);
if (host_plane) {
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_plane);
plane.ForEachLine([&result](const XLineVisitor& line) {
TfFunctionDb tf_function_db = ConvertHostThreadsXLineToTfFunctionDb(line);
CombineTfFunctionDb(tf_function_db, &result);
});
}
return result;
}
TEST(ConvertXPlaneToTfFunctions, CombineTwoThreads) {
XSpace space;
XPlaneBuilder host_plane_builder(space.add_planes());
host_plane_builder.SetName(kHostThreadsPlaneName);
host_plane_builder.ReserveLines(2);
std::string kFunctionName = "decrement";
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread, kFunctionName,
10, 100, kTracedNonXla, 1);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread, kFunctionName,
150, 20, kNotTracedNonXla, 2);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread, kFunctionName,
200, 80, kTracedNonXla, 3);
auto other_thread = host_plane_builder.GetOrCreateLine(1);
CreateTfFunctionCallEvent(&host_plane_builder, &other_thread, kFunctionName,
20, 100, kTracedNonXla, 2);
CreateTfFunctionCallEvent(&host_plane_builder, &other_thread, kFunctionName,
160, 20, kNotTracedNonXla, 2);
CreateTfFunctionCallEvent(&host_plane_builder, &other_thread, kFunctionName,
210, 80, kTracedXla, 4);
TfFunctionDb tf_function_db = ConvertXSpaceToTfFunctionDb(space);
EXPECT_EQ(tf_function_db.tf_functions().size(), 1);
EXPECT_EQ(tf_function_db.tf_functions().count(kFunctionName), 1);
const TfFunction& tf_function =
tf_function_db.tf_functions().at(kFunctionName);
EXPECT_EQ(tf_function.total_tracing_count(), 4);
EXPECT_EQ(tf_function.compiler(), MIXED_COMPILER);
EXPECT_NEAR(tf_function.expensive_call_percent(), 90, kMaxError);
const auto& metrics = tf_function.metrics();
EXPECT_EQ(metrics.size(), 2);
EXPECT_EQ(metrics.count(TRACED_MODE), 1);
EXPECT_EQ(metrics.count(NOT_TRACED_MODE), 1);
const auto& traced_mode = metrics.at(TRACED_MODE);
EXPECT_EQ(traced_mode.count(), 4);
EXPECT_EQ(traced_mode.self_time_ps(), 360);
const auto& not_traced_mode = metrics.at(NOT_TRACED_MODE);
EXPECT_EQ(not_traced_mode.count(), 2);
EXPECT_EQ(not_traced_mode.self_time_ps(), 40);
}
TEST(ConvertXPlaneToTfFunctions, NestedFunctions) {
XSpace space;
XPlaneBuilder host_plane_builder(space.add_planes());
host_plane_builder.SetName(kHostThreadsPlaneName);
host_plane_builder.ReserveLines(1);
std::string kOuterFunctionName = "outer";
std::string kInnerFunctionName = "inner";
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread,
kOuterFunctionName, 10, 100, kTracedNonXla, 1);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread,
kInnerFunctionName, 30, 40, kNotTracedXla, 0);
TfFunctionDb tf_function_db = ConvertXSpaceToTfFunctionDb(space);
EXPECT_EQ(tf_function_db.tf_functions().size(), 2);
EXPECT_EQ(tf_function_db.tf_functions().count(kOuterFunctionName), 1);
EXPECT_EQ(tf_function_db.tf_functions().count(kInnerFunctionName), 1);
const TfFunction& outer =
tf_function_db.tf_functions().at(kOuterFunctionName);
EXPECT_EQ(outer.total_tracing_count(), 1);
EXPECT_EQ(outer.compiler(), OTHER_COMPILER);
EXPECT_NEAR(outer.expensive_call_percent(), 100, kMaxError);
const auto& outer_metrics = outer.metrics();
EXPECT_EQ(outer_metrics.size(), 1);
EXPECT_EQ(outer_metrics.count(TRACED_MODE), 1);
const auto& traced_mode = outer_metrics.at(TRACED_MODE);
EXPECT_EQ(traced_mode.count(), 1);
EXPECT_EQ(traced_mode.self_time_ps(), 60);
const TfFunction& inner =
tf_function_db.tf_functions().at(kInnerFunctionName);
EXPECT_EQ(inner.total_tracing_count(), 0);
EXPECT_EQ(inner.compiler(), XLA_COMPILER);
EXPECT_NEAR(inner.expensive_call_percent(), 0, kMaxError);
const auto& inner_metrics = inner.metrics();
EXPECT_EQ(inner_metrics.size(), 1);
EXPECT_EQ(inner_metrics.count(NOT_TRACED_MODE), 1);
const auto& not_traced_mode = inner_metrics.at(NOT_TRACED_MODE);
EXPECT_EQ(not_traced_mode.count(), 1);
EXPECT_EQ(not_traced_mode.self_time_ps(), 40);
}
TEST(ConvertXPlaneToTfFunctions, EagerPlusConcrete) {
XSpace space;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&space));
host_plane_builder.ReserveLines(2);
std::string kEagerFunctionName = "i_am_eager";
std::string kConcreteFunctionName = "i_am_concrete";
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread,
kEagerFunctionName, 10, 200, kEager);
auto other_thread = host_plane_builder.GetOrCreateLine(1);
CreateTfFunctionCallEvent(&host_plane_builder, &other_thread,
kConcreteFunctionName, 20, 40, kConcrete);
TfFunctionDb tf_function_db = ConvertXSpaceToTfFunctionDb(space);
EXPECT_EQ(tf_function_db.tf_functions().size(), 2);
EXPECT_EQ(tf_function_db.tf_functions().count(kEagerFunctionName), 1);
EXPECT_EQ(tf_function_db.tf_functions().count(kConcreteFunctionName), 1);
const TfFunction& eager =
tf_function_db.tf_functions().at(kEagerFunctionName);
EXPECT_EQ(eager.total_tracing_count(), 0);
EXPECT_EQ(eager.compiler(), INVALID_COMPILER);
EXPECT_NEAR(eager.expensive_call_percent(), 100, kMaxError);
const auto& eager_metrics = eager.metrics();
EXPECT_EQ(eager_metrics.size(), 1);
EXPECT_EQ(eager_metrics.count(EAGER_MODE), 1);
const auto& eager_mode = eager_metrics.at(EAGER_MODE);
EXPECT_EQ(eager_mode.count(), 1);
EXPECT_EQ(eager_mode.self_time_ps(), 200);
const TfFunction& concrete =
tf_function_db.tf_functions().at(kConcreteFunctionName);
EXPECT_EQ(concrete.total_tracing_count(), 0);
EXPECT_EQ(concrete.compiler(), INVALID_COMPILER);
EXPECT_NEAR(concrete.expensive_call_percent(), 0, kMaxError);
const auto& concrete_metrics = concrete.metrics();
EXPECT_EQ(concrete_metrics.size(), 1);
EXPECT_EQ(concrete_metrics.count(CONCRETE_MODE), 1);
const auto& concrete_mode = concrete_metrics.at(CONCRETE_MODE);
EXPECT_EQ(concrete_mode.count(), 1);
EXPECT_EQ(concrete_mode.self_time_ps(), 40);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_tf_functions.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_tf_functions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4273af3f-f205-4dcb-946a-0956d06dc284 | cpp | google/tsl | ram_file_block_cache | tsl/platform/cloud/ram_file_block_cache.cc | tsl/platform/cloud/ram_file_block_cache_test.cc | #include "tsl/platform/cloud/ram_file_block_cache.h"
#include <cstring>
#include <memory>
#include "absl/cleanup/cleanup.h"
#include "tsl/platform/env.h"
namespace tsl {
bool RamFileBlockCache::BlockNotStale(const std::shared_ptr<Block>& block) {
mutex_lock l(block->mu);
if (block->state != FetchState::FINISHED) {
return true;
}
if (max_staleness_ == 0) return true;
return env_->NowSeconds() - block->timestamp <= max_staleness_;
}
std::shared_ptr<RamFileBlockCache::Block> RamFileBlockCache::Lookup(
const Key& key) {
mutex_lock lock(mu_);
auto entry = block_map_.find(key);
if (entry != block_map_.end()) {
if (BlockNotStale(entry->second)) {
if (cache_stats_ != nullptr) {
cache_stats_->RecordCacheHitBlockSize(entry->second->data.size());
}
return entry->second;
} else {
RemoveFile_Locked(key.first);
}
}
auto new_entry = std::make_shared<Block>();
lru_list_.push_front(key);
lra_list_.push_front(key);
new_entry->lru_iterator = lru_list_.begin();
new_entry->lra_iterator = lra_list_.begin();
new_entry->timestamp = env_->NowSeconds();
block_map_.emplace(std::make_pair(key, new_entry));
return new_entry;
}
void RamFileBlockCache::Trim() {
while (!lru_list_.empty() && cache_size_ > max_bytes_) {
RemoveBlock(block_map_.find(lru_list_.back()));
}
}
absl::Status RamFileBlockCache::UpdateLRU(const Key& key,
const std::shared_ptr<Block>& block) {
mutex_lock lock(mu_);
if (block->timestamp == 0) {
return absl::OkStatus();
}
if (block->lru_iterator != lru_list_.begin()) {
lru_list_.erase(block->lru_iterator);
lru_list_.push_front(key);
block->lru_iterator = lru_list_.begin();
}
if (block->data.size() < block_size_) {
Key fmax = std::make_pair(key.first, std::numeric_limits<size_t>::max());
auto fcmp = block_map_.upper_bound(fmax);
if (fcmp != block_map_.begin() && key < (--fcmp)->first) {
return errors::Internal("Block cache contents are inconsistent.");
}
}
Trim();
return absl::OkStatus();
}
absl::Status RamFileBlockCache::MaybeFetch(
const Key& key, const std::shared_ptr<Block>& block) {
bool downloaded_block = false;
auto reconcile_state =
absl::MakeCleanup([this, &downloaded_block, &key, &block] {
if (downloaded_block) {
mutex_lock l(mu_);
if (block->timestamp != 0) {
cache_size_ += block->data.capacity();
lra_list_.erase(block->lra_iterator);
lra_list_.push_front(key);
block->lra_iterator = lra_list_.begin();
block->timestamp = env_->NowSeconds();
}
}
});
mutex_lock l(block->mu);
absl::Status status = absl::OkStatus();
while (true) {
switch (block->state) {
case FetchState::ERROR:
TF_FALLTHROUGH_INTENDED;
case FetchState::CREATED:
block->state = FetchState::FETCHING;
block->mu.unlock();
block->data.clear();
block->data.resize(block_size_, 0);
size_t bytes_transferred;
status.Update(block_fetcher_(key.first, key.second, block_size_,
block->data.data(), &bytes_transferred));
if (cache_stats_ != nullptr) {
cache_stats_->RecordCacheMissBlockSize(bytes_transferred);
}
block->mu.lock();
if (status.ok()) {
block->data.resize(bytes_transferred, 0);
std::vector<char>(block->data).swap(block->data);
downloaded_block = true;
block->state = FetchState::FINISHED;
} else {
block->state = FetchState::ERROR;
}
block->cond_var.notify_all();
return status;
case FetchState::FETCHING:
block->cond_var.wait_for(l, std::chrono::seconds(60));
if (block->state == FetchState::FINISHED) {
return absl::OkStatus();
}
break;
case FetchState::FINISHED:
return absl::OkStatus();
}
}
return errors::Internal(
"Control flow should never reach the end of RamFileBlockCache::Fetch.");
}
absl::Status RamFileBlockCache::Read(const string& filename, size_t offset,
size_t n, char* buffer,
size_t* bytes_transferred) {
*bytes_transferred = 0;
if (n == 0) {
return absl::OkStatus();
}
if (!IsCacheEnabled() || (n > max_bytes_)) {
return block_fetcher_(filename, offset, n, buffer, bytes_transferred);
}
size_t start = block_size_ * (offset / block_size_);
size_t finish = block_size_ * ((offset + n) / block_size_);
if (finish < offset + n) {
finish += block_size_;
}
size_t total_bytes_transferred = 0;
for (size_t pos = start; pos < finish; pos += block_size_) {
Key key = std::make_pair(filename, pos);
std::shared_ptr<Block> block = Lookup(key);
DCHECK(block) << "No block for key " << key.first << "@" << key.second;
TF_RETURN_IF_ERROR(MaybeFetch(key, block));
TF_RETURN_IF_ERROR(UpdateLRU(key, block));
const auto& data = block->data;
if (offset >= pos + data.size()) {
*bytes_transferred = total_bytes_transferred;
return errors::OutOfRange("EOF at offset ", offset, " in file ", filename,
" at position ", pos, "with data size ",
data.size());
}
auto begin = data.begin();
if (offset > pos) {
begin += offset - pos;
}
auto end = data.end();
if (pos + data.size() > offset + n) {
end -= (pos + data.size()) - (offset + n);
}
if (begin < end) {
size_t bytes_to_copy = end - begin;
memcpy(&buffer[total_bytes_transferred], &*begin, bytes_to_copy);
total_bytes_transferred += bytes_to_copy;
}
if (data.size() < block_size_) {
break;
}
}
*bytes_transferred = total_bytes_transferred;
return absl::OkStatus();
}
bool RamFileBlockCache::ValidateAndUpdateFileSignature(const string& filename,
int64_t file_signature) {
mutex_lock lock(mu_);
auto it = file_signature_map_.find(filename);
if (it != file_signature_map_.end()) {
if (it->second == file_signature) {
return true;
}
RemoveFile_Locked(filename);
it->second = file_signature;
return false;
}
file_signature_map_[filename] = file_signature;
return true;
}
size_t RamFileBlockCache::CacheSize() const {
mutex_lock lock(mu_);
return cache_size_;
}
void RamFileBlockCache::Prune() {
while (!WaitForNotificationWithTimeout(&stop_pruning_thread_, 1000000)) {
mutex_lock lock(mu_);
uint64 now = env_->NowSeconds();
while (!lra_list_.empty()) {
auto it = block_map_.find(lra_list_.back());
if (now - it->second->timestamp <= max_staleness_) {
break;
}
RemoveFile_Locked(std::string(it->first.first));
}
}
}
void RamFileBlockCache::Flush() {
mutex_lock lock(mu_);
block_map_.clear();
lru_list_.clear();
lra_list_.clear();
cache_size_ = 0;
}
void RamFileBlockCache::RemoveFile(const string& filename) {
mutex_lock lock(mu_);
RemoveFile_Locked(filename);
}
void RamFileBlockCache::RemoveFile_Locked(const string& filename) {
Key begin = std::make_pair(filename, 0);
auto it = block_map_.lower_bound(begin);
while (it != block_map_.end() && it->first.first == filename) {
auto next = std::next(it);
RemoveBlock(it);
it = next;
}
}
void RamFileBlockCache::RemoveBlock(BlockMap::iterator entry) {
entry->second->timestamp = 0;
lru_list_.erase(entry->second->lru_iterator);
lra_list_.erase(entry->second->lra_iterator);
cache_size_ -= entry->second->data.capacity();
block_map_.erase(entry);
}
} | #include "tsl/platform/cloud/ram_file_block_cache.h"
#include <cstring>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/cloud/now_seconds_env.h"
#include "tsl/platform/env.h"
#include "tsl/platform/notification.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
absl::Status ReadCache(RamFileBlockCache* cache, const string& filename,
size_t offset, size_t n, std::vector<char>* out) {
out->clear();
out->resize(n, 0);
size_t bytes_transferred = 0;
absl::Status status =
cache->Read(filename, offset, n, out->data(), &bytes_transferred);
EXPECT_LE(bytes_transferred, n);
out->resize(bytes_transferred, n);
return status;
}
TEST(RamFileBlockCacheTest, IsCacheEnabled) {
auto fetcher = [](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
return absl::OkStatus();
};
RamFileBlockCache cache1(0, 0, 0, fetcher);
RamFileBlockCache cache2(16, 0, 0, fetcher);
RamFileBlockCache cache3(0, 32, 0, fetcher);
RamFileBlockCache cache4(16, 32, 0, fetcher);
EXPECT_FALSE(cache1.IsCacheEnabled());
EXPECT_FALSE(cache2.IsCacheEnabled());
EXPECT_FALSE(cache3.IsCacheEnabled());
EXPECT_TRUE(cache4.IsCacheEnabled());
}
TEST(RamFileBlockCacheTest, ValidateAndUpdateFileSignature) {
int calls = 0;
auto fetcher = [&calls](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
calls++;
memset(buffer, 'x', n);
*bytes_transferred = n;
return absl::OkStatus();
};
string filename = "file";
RamFileBlockCache cache(16, 32, 0, fetcher);
std::vector<char> out;
EXPECT_TRUE(cache.ValidateAndUpdateFileSignature(filename, 123));
TF_EXPECT_OK(ReadCache(&cache, filename, 0, 16, &out));
EXPECT_EQ(calls, 1);
EXPECT_TRUE(cache.ValidateAndUpdateFileSignature(filename, 123));
TF_EXPECT_OK(ReadCache(&cache, filename, 0, 16, &out));
EXPECT_EQ(calls, 1);
EXPECT_FALSE(cache.ValidateAndUpdateFileSignature(filename, 321));
TF_EXPECT_OK(ReadCache(&cache, filename, 0, 16, &out));
EXPECT_EQ(calls, 2);
}
TEST(RamFileBlockCacheTest, PassThrough) {
const string want_filename = "foo/bar";
const size_t want_offset = 42;
const size_t want_n = 1024;
int calls = 0;
auto fetcher = [&calls, want_filename, want_offset, want_n](
const string& got_filename, size_t got_offset,
size_t got_n, char* buffer, size_t* bytes_transferred) {
EXPECT_EQ(got_filename, want_filename);
EXPECT_EQ(got_offset, want_offset);
EXPECT_EQ(got_n, want_n);
calls++;
memset(buffer, 'x', got_n);
*bytes_transferred = got_n;
return absl::OkStatus();
};
RamFileBlockCache cache1(1, 0, 0, fetcher);
RamFileBlockCache cache2(0, 1, 0, fetcher);
RamFileBlockCache cache3(0, 0, 0, fetcher);
RamFileBlockCache cache4(1000, 1000, 0, fetcher);
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache1, want_filename, want_offset, want_n, &out));
EXPECT_EQ(calls, 1);
TF_EXPECT_OK(ReadCache(&cache2, want_filename, want_offset, want_n, &out));
EXPECT_EQ(calls, 2);
TF_EXPECT_OK(ReadCache(&cache3, want_filename, want_offset, want_n, &out));
EXPECT_EQ(calls, 3);
TF_EXPECT_OK(ReadCache(&cache4, want_filename, want_offset, want_n, &out));
EXPECT_EQ(calls, 4);
}
TEST(RamFileBlockCacheTest, BlockAlignment) {
const size_t size = 256;
std::vector<char> buf;
for (int i = 0; i < size; i++) {
buf.push_back(i);
}
auto fetcher = [&buf](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
if (offset < buf.size()) {
size_t bytes_to_copy = std::min<size_t>(buf.size() - offset, n);
memcpy(buffer, buf.data() + offset, bytes_to_copy);
*bytes_transferred = bytes_to_copy;
} else {
*bytes_transferred = 0;
}
return absl::OkStatus();
};
for (size_t block_size = 2; block_size <= 4; block_size++) {
RamFileBlockCache cache(block_size, block_size, 0, fetcher);
for (size_t offset = 0; offset < 10; offset++) {
for (size_t n = block_size - 2; n <= block_size + 2; n++) {
std::vector<char> got;
TF_EXPECT_OK(ReadCache(&cache, "", offset, n, &got));
if (offset + n <= size) {
EXPECT_EQ(got.size(), n) << "block size = " << block_size
<< ", offset = " << offset << ", n = " << n;
} else {
EXPECT_EQ(got.size(), size - offset)
<< "block size = " << block_size << ", offset = " << offset
<< ", n = " << n;
}
std::vector<char>::const_iterator begin = buf.begin() + offset;
std::vector<char>::const_iterator end =
offset + n > buf.size() ? buf.end() : begin + n;
std::vector<char> want(begin, end);
EXPECT_EQ(got, want) << "block size = " << block_size
<< ", offset = " << offset << ", n = " << n;
}
}
}
}
TEST(RamFileBlockCacheTest, CacheHits) {
const size_t block_size = 16;
std::set<size_t> calls;
auto fetcher = [&calls, block_size](const string& filename, size_t offset,
size_t n, char* buffer,
size_t* bytes_transferred) {
EXPECT_EQ(n, block_size);
EXPECT_EQ(offset % block_size, 0);
EXPECT_EQ(calls.find(offset), calls.end()) << "at offset " << offset;
calls.insert(offset);
memset(buffer, 'x', n);
*bytes_transferred = n;
return absl::OkStatus();
};
const uint32 block_count = 256;
RamFileBlockCache cache(block_size, block_count * block_size, 0, fetcher);
std::vector<char> out;
out.resize(block_count, 0);
for (int i = 0; i < 2; i++) {
for (int j = 0; j < block_count; j++) {
TF_EXPECT_OK(ReadCache(&cache, "", block_size * j, block_size, &out));
}
}
}
TEST(RamFileBlockCacheTest, OutOfRange) {
const size_t block_size = 16;
const size_t file_size = 24;
bool first_block = false;
bool second_block = false;
auto fetcher = [block_size, file_size, &first_block, &second_block](
const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
EXPECT_EQ(n, block_size);
EXPECT_EQ(offset % block_size, 0);
size_t bytes_to_copy = 0;
if (offset == 0) {
memset(buffer, 'x', n);
bytes_to_copy = n;
first_block = true;
} else if (offset == block_size) {
bytes_to_copy = file_size - block_size;
memset(buffer, 'x', bytes_to_copy);
second_block = true;
}
*bytes_transferred = bytes_to_copy;
return absl::OkStatus();
};
RamFileBlockCache cache(block_size, block_size, 0, fetcher);
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache, "", 0, block_size, &out));
EXPECT_TRUE(first_block);
EXPECT_EQ(out.size(), block_size);
absl::Status status = ReadCache(&cache, "", file_size + 4, 4, &out);
EXPECT_EQ(status.code(), error::OUT_OF_RANGE);
EXPECT_TRUE(second_block);
second_block = false;
TF_EXPECT_OK(ReadCache(&cache, "", block_size, block_size, &out));
EXPECT_FALSE(second_block);
EXPECT_EQ(out.size(), file_size - block_size);
}
TEST(RamFileBlockCacheTest, Inconsistent) {
const size_t block_size = 16;
auto fetcher = [block_size](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
EXPECT_EQ(n, block_size);
EXPECT_EQ(offset % block_size, 0);
EXPECT_GE(n, 1);
memset(buffer, 'x', 1);
*bytes_transferred = 1;
return absl::OkStatus();
};
RamFileBlockCache cache(block_size, 2 * block_size, 0, fetcher);
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache, "", block_size, block_size, &out));
EXPECT_EQ(out.size(), 1);
absl::Status status = ReadCache(&cache, "", 0, block_size, &out);
EXPECT_EQ(status.code(), error::INTERNAL);
}
TEST(RamFileBlockCacheTest, LRU) {
const size_t block_size = 16;
std::list<size_t> calls;
auto fetcher = [&calls, block_size](const string& filename, size_t offset,
size_t n, char* buffer,
size_t* bytes_transferred) {
EXPECT_EQ(n, block_size);
EXPECT_FALSE(calls.empty()) << "at offset = " << offset;
if (!calls.empty()) {
EXPECT_EQ(offset, calls.front());
calls.pop_front();
}
memset(buffer, 'x', n);
*bytes_transferred = n;
return absl::OkStatus();
};
const uint32 block_count = 2;
RamFileBlockCache cache(block_size, block_count * block_size, 0, fetcher);
std::vector<char> out;
calls.push_back(0);
TF_EXPECT_OK(ReadCache(&cache, "", 0, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "", 0, 1, &out));
calls.push_back(block_size);
TF_EXPECT_OK(ReadCache(&cache, "", block_size, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "", block_size, 1, &out));
calls.push_back(2 * block_size);
TF_EXPECT_OK(ReadCache(&cache, "", 2 * block_size, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "", 2 * block_size, 1, &out));
calls.push_back(0);
TF_EXPECT_OK(ReadCache(&cache, "", 0, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "", 2 * block_size, 1, &out));
calls.push_back(block_size);
TF_EXPECT_OK(ReadCache(&cache, "", block_size, 1, &out));
calls.push_back(0);
TF_EXPECT_OK(ReadCache(&cache, "", 0, 1, &out));
}
TEST(RamFileBlockCacheTest, MaxStaleness) {
int calls = 0;
auto fetcher = [&calls](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
calls++;
memset(buffer, 'x', n);
*bytes_transferred = n;
return absl::OkStatus();
};
std::vector<char> out;
std::unique_ptr<NowSecondsEnv> env(new NowSecondsEnv);
RamFileBlockCache cache1(8, 16, 2 , fetcher, env.get());
TF_EXPECT_OK(ReadCache(&cache1, "", 0, 1, &out));
EXPECT_EQ(calls, 1);
for (int i = 1; i <= 10; i++) {
env->SetNowSeconds(i + 1);
TF_EXPECT_OK(ReadCache(&cache1, "", 0, 1, &out));
EXPECT_EQ(calls, 1 + i / 3);
}
calls = 0;
env->SetNowSeconds(0);
RamFileBlockCache cache2(8, 16, 0 , fetcher, env.get());
TF_EXPECT_OK(ReadCache(&cache2, "", 0, 1, &out));
EXPECT_EQ(calls, 1);
env->SetNowSeconds(365 * 24 * 60 * 60);
TF_EXPECT_OK(ReadCache(&cache2, "", 0, 1, &out));
EXPECT_EQ(calls, 1);
}
TEST(RamFileBlockCacheTest, RemoveFile) {
int calls = 0;
auto fetcher = [&calls](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
calls++;
char c = (filename == "a") ? 'a' : (filename == "b") ? 'b' : 'x';
if (offset > 0) {
c = toupper(c);
}
memset(buffer, c, n);
*bytes_transferred = n;
return absl::OkStatus();
};
const size_t n = 3;
RamFileBlockCache cache(8, 32, 0, fetcher);
std::vector<char> out;
std::vector<char> a(n, 'a');
std::vector<char> b(n, 'b');
std::vector<char> A(n, 'A');
std::vector<char> B(n, 'B');
TF_EXPECT_OK(ReadCache(&cache, "a", 0, n, &out));
EXPECT_EQ(out, a);
EXPECT_EQ(calls, 1);
TF_EXPECT_OK(ReadCache(&cache, "a", 8, n, &out));
EXPECT_EQ(out, A);
EXPECT_EQ(calls, 2);
TF_EXPECT_OK(ReadCache(&cache, "b", 0, n, &out));
EXPECT_EQ(out, b);
EXPECT_EQ(calls, 3);
TF_EXPECT_OK(ReadCache(&cache, "b", 8, n, &out));
EXPECT_EQ(out, B);
EXPECT_EQ(calls, 4);
TF_EXPECT_OK(ReadCache(&cache, "a", 0, n, &out));
EXPECT_EQ(out, a);
TF_EXPECT_OK(ReadCache(&cache, "a", 8, n, &out));
EXPECT_EQ(out, A);
TF_EXPECT_OK(ReadCache(&cache, "b", 0, n, &out));
EXPECT_EQ(out, b);
TF_EXPECT_OK(ReadCache(&cache, "b", 8, n, &out));
EXPECT_EQ(out, B);
EXPECT_EQ(calls, 4);
cache.RemoveFile("a");
TF_EXPECT_OK(ReadCache(&cache, "b", 0, n, &out));
EXPECT_EQ(out, b);
TF_EXPECT_OK(ReadCache(&cache, "b", 8, n, &out));
EXPECT_EQ(out, B);
EXPECT_EQ(calls, 4);
TF_EXPECT_OK(ReadCache(&cache, "a", 0, n, &out));
EXPECT_EQ(out, a);
EXPECT_EQ(calls, 5);
TF_EXPECT_OK(ReadCache(&cache, "a", 8, n, &out));
EXPECT_EQ(out, A);
EXPECT_EQ(calls, 6);
}
TEST(RamFileBlockCacheTest, Prune) {
int calls = 0;
auto fetcher = [&calls](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
calls++;
memset(buffer, 'x', n);
*bytes_transferred = n;
return absl::OkStatus();
};
std::vector<char> out;
std::unique_ptr<NowSecondsEnv> env(new NowSecondsEnv);
uint64 now = Env::Default()->NowSeconds();
env->SetNowSeconds(now);
RamFileBlockCache cache(8, 32, 1 , fetcher, env.get());
TF_EXPECT_OK(ReadCache(&cache, "a", 0, 1, &out));
env->SetNowSeconds(now + 1);
TF_EXPECT_OK(ReadCache(&cache, "b", 0, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "a", 8, 1, &out));
EXPECT_EQ(cache.CacheSize(), 24);
EXPECT_EQ(calls, 3);
TF_EXPECT_OK(ReadCache(&cache, "a", 0, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "b", 0, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "a", 8, 1, &out));
EXPECT_EQ(calls, 3);
env->SetNowSeconds(now + 2);
uint64 start = Env::Default()->NowSeconds();
do {
Env::Default()->SleepForMicroseconds(100000);
} while (cache.CacheSize() == 24 && Env::Default()->NowSeconds() - start < 3);
EXPECT_EQ(cache.CacheSize(), 8);
TF_EXPECT_OK(ReadCache(&cache, "b", 0, 1, &out));
EXPECT_EQ(calls, 3);
env->SetNowSeconds(now + 3);
start = Env::Default()->NowSeconds();
do {
Env::Default()->SleepForMicroseconds(100000);
} while (cache.CacheSize() == 8 && Env::Default()->NowSeconds() - start < 3);
EXPECT_EQ(cache.CacheSize(), 0);
}
TEST(RamFileBlockCacheTest, ParallelReads) {
const int callers = 4;
BlockingCounter counter(callers);
auto fetcher = [&counter](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
counter.DecrementCount();
if (!counter.WaitFor(std::chrono::seconds(10))) {
return errors::FailedPrecondition("desired concurrency not reached");
}
memset(buffer, 'x', n);
*bytes_transferred = n;
return absl::OkStatus();
};
const int block_size = 8;
RamFileBlockCache cache(block_size, 2 * callers * block_size, 0, fetcher);
std::vector<std::unique_ptr<Thread>> threads;
for (int i = 0; i < callers; i++) {
threads.emplace_back(
Env::Default()->StartThread({}, "caller", [&cache, i, block_size]() {
std::vector<char> out;
TF_EXPECT_OK(
ReadCache(&cache, "a", i * block_size, block_size, &out));
std::vector<char> x(block_size, 'x');
EXPECT_EQ(out, x);
}));
}
}
TEST(RamFileBlockCacheTest, CoalesceConcurrentReads) {
const size_t block_size = 16;
int num_requests = 0;
Notification notification;
auto fetcher = [&num_requests, ¬ification, block_size](
const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
EXPECT_EQ(n, block_size);
EXPECT_EQ(offset, 0);
num_requests++;
memset(buffer, 'x', n);
*bytes_transferred = n;
notification.Notify();
Env::Default()->SleepForMicroseconds(100000);
return absl::OkStatus();
};
RamFileBlockCache cache(block_size, block_size, 0, fetcher);
std::unique_ptr<Thread> concurrent(
Env::Default()->StartThread({}, "concurrent", [&cache, block_size] {
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache, "", 0, block_size / 2, &out));
EXPECT_EQ(out.size(), block_size / 2);
}));
notification.WaitForNotification();
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache, "", block_size / 2, block_size / 2, &out));
EXPECT_EQ(out.size(), block_size / 2);
EXPECT_EQ(1, num_requests);
}
TEST(RamFileBlockCacheTest, Flush) {
int calls = 0;
auto fetcher = [&calls](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
calls++;
memset(buffer, 'x', n);
*bytes_transferred = n;
return absl::OkStatus();
};
RamFileBlockCache cache(16, 32, 0, fetcher);
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache, "", 0, 16, &out));
TF_EXPECT_OK(ReadCache(&cache, "", 0, 16, &out));
EXPECT_EQ(calls, 1);
cache.Flush();
TF_EXPECT_OK(ReadCache(&cache, "", 0, 16, &out));
EXPECT_EQ(calls, 2);
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/cloud/ram_file_block_cache.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/cloud/ram_file_block_cache_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
4854e98f-6b0a-450f-b9b6-840922cf00e1 | cpp | google/tensorstore | codec_spec | tensorstore/driver/zarr3/codec/codec_spec.cc | tensorstore/codec_spec_test.cc | #include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include <stddef.h>
#include "absl/base/no_destructor.h"
#include "tensorstore/driver/zarr3/codec/registry.h"
namespace tensorstore {
namespace internal_zarr3 {
ZarrCodecSpec::~ZarrCodecSpec() = default;
ZarrCodecKind ZarrArrayToArrayCodecSpec::kind() const {
return ZarrCodecKind::kArrayToArray;
}
ZarrCodecKind ZarrArrayToBytesCodecSpec::kind() const {
return ZarrCodecKind::kArrayToBytes;
}
size_t ZarrArrayToBytesCodecSpec::sharding_height() const { return 0; }
ZarrCodecKind ZarrBytesToBytesCodecSpec::kind() const {
return ZarrCodecKind::kBytesToBytes;
}
CodecRegistry& GetCodecRegistry() {
static absl::NoDestructor<CodecRegistry> registry;
return *registry;
}
}
} | #include "tensorstore/codec_spec.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
namespace {
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(CodecSpecSerializationTest, SerializationRoundTrip) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto codec,
tensorstore::CodecSpec::FromJson({
{"driver", "zarr"},
{"compressor", nullptr},
{"filters", nullptr},
}));
TestSerializationRoundTrip(codec);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/codec_spec.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/codec_spec_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
6558b08c-70ef-430e-a29e-211a324b5e75 | cpp | tensorflow/tensorflow | convolution_transposed_4x4 | tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_4x4.cc | tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_4x4_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_4x4.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
namespace {
ConvolutionTransposed4x4::WeightsUploadType GetBestWeightsUploadType(
const GpuInfo& gpu_info) {
ConvolutionTransposed4x4::WeightsUploadType weights_upload_type =
ConvolutionTransposed4x4::WeightsUploadType::GLOBAL_MEM;
if (gpu_info.IsApple()) {
if (gpu_info.apple_info.IsBionic()) {
weights_upload_type =
ConvolutionTransposed4x4::WeightsUploadType::GLOBAL_MEM;
} else {
weights_upload_type =
ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_BY_THREADS;
}
} else if (gpu_info.IsPowerVR()) {
weights_upload_type =
ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_ASYNC;
} else if (gpu_info.IsNvidia() || gpu_info.IsIntel()) {
weights_upload_type =
ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_BY_THREADS;
} else if (gpu_info.IsAMD()) {
weights_upload_type =
ConvolutionTransposed4x4::WeightsUploadType::CONSTANT_MEM;
} else {
weights_upload_type =
ConvolutionTransposed4x4::WeightsUploadType::GLOBAL_MEM;
}
return weights_upload_type;
}
}
ConvolutionTransposed4x4::ConvolutionTransposed4x4(
const OperationDef& definition, const GpuInfo& gpu_info)
: GPUOperation(definition) {
work_group_size_ = int3(8, 4, 1);
if (gpu_info.IsApple()) {
work_group_launch_order_ = int3(2, 0, 1);
}
if (gpu_info.IsApple()) {
weights_layout_ = WeightsLayout::kOICustomSpatialO4I4;
} else {
weights_layout_ = WeightsLayout::kOICustomSpatialI4O4;
}
code_ = GenerateConvolutionTransposedCode(gpu_info, definition_,
GetBestWeightsUploadType(gpu_info));
if (definition_.precision == CalculationsPrecision::F16 &&
gpu_info.IsPowerVR()) {
compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath);
}
}
std::string ConvolutionTransposed4x4::GenerateConvolutionTransposedCode(
const GpuInfo& gpu_info, const OperationDef& op_def,
WeightsUploadType weights_upload_type) {
auto src_desc = op_def.src_tensors[0];
AddSrcTensor("src_tensor", src_desc);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
if (op_def.src_tensors.size() == 2) {
BufferDescriptor desc;
desc.element_type = op_def.src_tensors[1].GetDataType();
desc.element_size = 4;
desc.memory_type =
weights_upload_type ==
ConvolutionTransposed4x4::WeightsUploadType::CONSTANT_MEM
? MemoryType::CONSTANT
: MemoryType::GLOBAL;
AddSrcBuffer("weights", desc);
}
args_.AddInt("filter_offset");
const bool need_local_mem =
weights_upload_type ==
ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_BY_THREADS ||
weights_upload_type ==
ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_ASYNC;
const int wg_total_size =
work_group_size_.x * work_group_size_.y * work_group_size_.z;
const std::string barrier =
wg_total_size == 32 && gpu_info.IsWaveSizeEqualTo32()
? "SIMD_LOCAL_MEM_BARRIER"
: "LOCAL_MEM_BARRIER";
std::string c;
if (GetWeightsDescription().IsI4O4()) {
switch (op_def.precision) {
case CalculationsPrecision::F32:
case CalculationsPrecision::F16:
c += "#define CONV(R, SRC, F) \\\n";
c += " R += SRC.x * weights_cache[F]; \\\n";
c += " R += SRC.y * weights_cache[F + 1]; \\\n";
c += " R += SRC.z * weights_cache[F + 2]; \\\n";
c += " R += SRC.w * weights_cache[F + 3]; \n";
break;
case CalculationsPrecision::F32_F16:
c += "#define CONV(R, SRC, F) \\\n";
c += " R += TO_ACCUM_TYPE(SRC.x * weights_cache[F] + SRC.y * "
"weights_cache[F + 1] + SRC.z * weights_cache[F + 2] + SRC.w * "
"weights_cache[F + 3]);\n";
break;
}
} else {
c += "#define CONV(R, SRC, F) \\\n";
c += " R.x += dot(SRC, weights_cache[F]); \\\n";
c += " R.y += dot(SRC, weights_cache[F + 1]); \\\n";
c += " R.z += dot(SRC, weights_cache[F + 2]); \\\n";
c += " R.w += dot(SRC, weights_cache[F + 3]); \n";
}
const std::string weights_space =
weights_upload_type ==
ConvolutionTransposed4x4::WeightsUploadType::CONSTANT_MEM
? "__constant"
: "__global";
if (gpu_info.IsApiOpenCl()) {
c += "__attribute__((reqd_work_group_size(8, 4, 1)))\n";
}
c += "MAIN_FUNCTION($0) {\n";
std::string grid_coords[3];
int3 launch_remap;
launch_remap[work_group_launch_order_.x] = 0;
launch_remap[work_group_launch_order_.y] = 1;
launch_remap[work_group_launch_order_.z] = 2;
if (work_group_launch_order_[0] == 0) {
grid_coords[0] = "GLOBAL_ID_0";
} else {
grid_coords[0] = "(GROUP_ID_" + std::to_string(launch_remap[0]) +
" * GROUP_SIZE_0 + LOCAL_ID_0);\n";
}
if (work_group_launch_order_[1] == 1) {
grid_coords[1] = "GLOBAL_ID_1";
} else {
grid_coords[1] = "(GROUP_ID_" + std::to_string(launch_remap[1]) +
" * GROUP_SIZE_1 + LOCAL_ID_1);\n";
}
if (work_group_launch_order_[2] == 2) {
grid_coords[2] = "GLOBAL_ID_2";
} else {
grid_coords[2] = "(GROUP_ID_" + std::to_string(launch_remap[2]) +
" * GROUP_SIZE_2 + LOCAL_ID_2);\n";
}
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = " + grid_coords[0] + ";\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = " + grid_coords[0] + ";\n";
}
c += " int Y = " + grid_coords[1] + ";\n";
c += " int Z = " + grid_coords[2] + ";\n";
if (!need_local_mem) {
c += " if (X * 2 > args.dst_tensor.Width() || Y * 2 > "
"args.dst_tensor.Height() || Z >= args.dst_tensor.Slices()) "
"return;\n";
}
c += " ACCUM_FLT4 r0 = INIT_ACCUM_FLT4(0.0f);\n";
c += " ACCUM_FLT4 r1 = INIT_ACCUM_FLT4(0.0f);\n";
c += " ACCUM_FLT4 r2 = INIT_ACCUM_FLT4(0.0f);\n";
c += " ACCUM_FLT4 r3 = INIT_ACCUM_FLT4(0.0f);\n";
c += " int f_offset = Z * args.filter_offset;\n";
if (need_local_mem) {
c += " __local FLT4 weights_cache[64];\n";
}
if (weights_upload_type ==
ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_BY_THREADS) {
c += " int local_id = LOCAL_ID_1 * 8 + LOCAL_ID_0;\n";
}
if (!src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
c += " bool in_x0 = X - 1 >= 0 && X - 1 < args.src_tensor.Width();\n";
c += " bool in_x1 = X >= 0 && X < args.src_tensor.Width();\n";
}
if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
c += " bool in_y0 = Y - 1 >= 0 && Y - 1 < args.src_tensor.Height();\n";
c += " bool in_y1 = Y >= 0 && Y < args.src_tensor.Height();\n";
}
auto generate_check = [&](int x, int y) {
std::string check;
const std::vector<Axis> axes{Axis::WIDTH, Axis::HEIGHT};
const std::vector<std::string> names{"in_x" + std::to_string(x),
"in_y" + std::to_string(y)};
for (int i = 0; i < axes.size(); ++i) {
const auto& axis = axes[i];
if (src_desc.HasAxis(axis) &&
!src_desc.SupportsZeroClamp(axis, gpu_info)) {
if (!check.empty()) {
check += " && ";
}
check += names[i];
}
}
return check;
};
if (src_desc.IsLinear()) {
if (src_desc.ReturnsZeroForNegOneRead(gpu_info)) {
c += " int addr_0 = args.src_tensor.GetAddress(X - 1, Y - 1, 0);\n";
c += " int addr_1 = args.src_tensor.GetAddress(X, Y - 1, 0);\n";
c += " int addr_2 = args.src_tensor.GetAddress(X - 1, Y, 0);\n";
c += " int addr_3 = args.src_tensor.GetAddress(X, Y, 0);\n";
c += " addr_0 = select(-1, addr_0, (in_x0 && in_y0));\n";
c += " addr_1 = select(-1, addr_1, (in_x1 && in_y0));\n";
c += " addr_2 = select(-1, addr_2, (in_x0 && in_y1));\n";
c += " addr_3 = select(-1, addr_3, (in_x1 && in_y1));\n";
c += " int dz_0 = select(0, args.src_tensor.SliceStride(), (in_x0 && "
"in_y0));\n";
c += " int dz_1 = select(0, args.src_tensor.SliceStride(), (in_x1 && "
"in_y0));\n";
c += " int dz_2 = select(0, args.src_tensor.SliceStride(), (in_x0 && "
"in_y1));\n";
c += " int dz_3 = select(0, args.src_tensor.SliceStride(), (in_x1 && "
"in_y1));\n";
} else {
c += " int xc0 = clamp(X - 1, 0, args.src_tensor.Width() - 1);\n";
c += " int xc1 = clamp(X, 0, args.src_tensor.Width() - 1);\n";
c += " int yc0 = clamp(Y - 1, 0, args.src_tensor.Height() - 1);\n";
c += " int yc1 = clamp(Y, 0, args.src_tensor.Height() - 1);\n";
c += " int addr_0 = args.src_tensor.GetAddress(xc0, yc0, 0);\n";
c += " int addr_1 = args.src_tensor.GetAddress(xc1, yc0, 0);\n";
c += " int addr_2 = args.src_tensor.GetAddress(xc0, yc1, 0);\n";
c += " int addr_3 = args.src_tensor.GetAddress(xc1, yc1, 0);\n";
c += " int dz = args.src_tensor.SliceStride();\n";
}
}
auto read_src = [&](int x, int y) {
if (src_desc.IsLinear()) {
const std::string id = std::to_string(y * 2 + x);
const std::string addr = "addr_" + std::to_string(y * 2 + x);
if (src_desc.ReturnsZeroForNegOneRead(gpu_info)) {
return "args.src_tensor.Read(" + addr + "); " + addr + " += dz_" + id +
";";
} else {
return "args.src_tensor.Read(" + addr + ") * INIT_FLT(in_x" +
std::to_string(x) + " && in_y" + std::to_string(y) + "); " +
addr + " += dz;";
}
} else {
std::string check = generate_check(x, y);
if (!check.empty()) {
check = " * INIT_FLT(" + check + ")";
}
return "args.src_tensor.Read(X + " + std::to_string(x - 1) + ", Y + " +
std::to_string(y - 1) + ", s)" + check + ";";
}
};
c += " for (int s = 0; s < args.src_tensor.Slices(); ++s) {\n";
if (need_local_mem) {
c += " " + barrier + ";\n";
}
if (weights_upload_type ==
ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_ASYNC) {
c += " async_work_group_copy(weights_cache, "
"args.weights.GetPtr(f_offset), 64, "
"0);\n";
} else if (weights_upload_type ==
ConvolutionTransposed4x4::WeightsUploadType::
LOCAL_MEM_BY_THREADS) {
c += " weights_cache[local_id] = args.weights.Read(f_offset + "
"local_id);\n";
c += " weights_cache[local_id + 32] = args.weights.Read(f_offset + "
"local_id + "
"32);\n";
} else {
c += " " + weights_space +
" FLT4* weights_cache = args.weights.GetPtr(f_offset);\n";
}
c += " FLT4 src0 = " + read_src(0, 0) + ";\n";
c += " FLT4 src1 = " + read_src(1, 0) + ";\n";
c += " FLT4 src2 = " + read_src(0, 1) + ";\n";
c += " FLT4 src3 = " + read_src(1, 1) + ";\n";
c += " f_offset += 64;\n";
if (need_local_mem) {
c += " " + barrier + ";\n";
}
c += " CONV(r0, src0, 0);\n";
c += " CONV(r1, src0, 4);\n";
c += " CONV(r2, src0, 8);\n";
c += " CONV(r3, src0, 12);\n";
c += " CONV(r0, src1, 16);\n";
c += " CONV(r1, src1, 20);\n";
c += " CONV(r2, src1, 24);\n";
c += " CONV(r3, src1, 28);\n";
c += " CONV(r0, src2, 32);\n";
c += " CONV(r1, src2, 36);\n";
c += " CONV(r2, src2, 40);\n";
c += " CONV(r3, src2, 44);\n";
c += " CONV(r0, src3, 48);\n";
c += " CONV(r1, src3, 52);\n";
c += " CONV(r2, src3, 56);\n";
c += " CONV(r3, src3, 60);\n";
c += " }\n";
c += "\n";
if (need_local_mem) {
c += " if (X * 2 > args.dst_tensor.Width() || Y * 2 > "
"args.dst_tensor.Height() || Z >= args.dst_tensor.Slices()) "
"return;\n";
}
c += " X = X * 2 - 1;\n";
c += " Y = Y * 2 - 1;\n";
c += "\n";
c += " FLT4 bias_val = args.biases.Read(Z);\n";
c += " if (X >= 0 && Y >= 0) {\n";
c += " FLT4 result = TO_FLT4(r0) + bias_val;\n";
c += " args.dst_tensor.Write(result, X, Y, Z);\n";
c += " }\n";
c += " if (X + 1 < args.dst_tensor.Width() && Y >= 0) {\n";
c += " FLT4 result = TO_FLT4(r1) + bias_val;\n";
c += " args.dst_tensor.Write(result, X + 1, Y, Z);\n";
c += " }\n";
c += " if (X >= 0 && Y + 1 < args.dst_tensor.Height()) {\n";
c += " FLT4 result = TO_FLT4(r2) + bias_val;\n";
c += " args.dst_tensor.Write(result, X, Y + 1, Z);\n";
c += " }\n";
c += " if (X + 1 < args.dst_tensor.Width() && Y + 1 < "
"args.dst_tensor.Height()) {\n";
c += " FLT4 result = TO_FLT4(r3) + bias_val;\n";
c += " args.dst_tensor.Write(result, X + 1, Y + 1, Z);\n";
c += " }\n";
c += "}\n";
return c;
}
absl::Status ConvolutionTransposed4x4::BindArguments(ArgumentsBinder* args) {
return args->SetInt("filter_offset", 4 * 16 * src_[0]->Slices());
}
int3 ConvolutionTransposed4x4::GetGridSize() const {
const int grid_x = DivideRoundUp(dst_[0]->Width() + 2, 2) * dst_[0]->Batch();
const int grid_y = DivideRoundUp(dst_[0]->Height() + 2, 2);
const int grid_z = dst_[0]->Slices();
return int3(grid_x, grid_y, grid_z);
}
std::vector<int> ConvolutionTransposed4x4::GetSpatialWeightsRemap() const {
return std::vector<int>{10, 11, 14, 15, 8, 9, 12, 13, 2, 3, 6, 7, 0, 1, 4, 5};
}
void ConvolutionTransposed4x4::UploadWeights(
const tflite::gpu::Tensor<OHWI, DataType::FLOAT32>& weights,
WeightsUploadType weights_upload_type) {
const auto weights_desc = GetWeightsDescription();
const int flt_count =
GetTotalElementsCountForLayout(weights_desc, weights.shape);
BufferDescriptor desc;
desc.element_type = weights_desc.type;
desc.element_size = 4;
desc.memory_type =
weights_upload_type ==
ConvolutionTransposed4x4::WeightsUploadType::CONSTANT_MEM
? MemoryType::CONSTANT
: MemoryType::GLOBAL;
desc.size = flt_count * SizeOf(desc.element_type);
desc.data.resize(desc.size);
RearrangeWeights(weights, weights_desc, absl::MakeSpan(desc.data));
args_.AddObject("weights",
std::make_unique<BufferDescriptor>(std::move(desc)));
}
bool IsConvolutionTransposed4x4Supported(
const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
return attr.weights.shape.w == 4 && attr.weights.shape.h == 4 &&
attr.stride.w == 2 && attr.stride.h == 2 &&
attr.padding.prepended.w == 1 && attr.padding.prepended.h == 1;
}
ConvolutionTransposed4x4 CreateConvolutionTransposed4x4(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
ConvolutionTransposed4x4 result(definition, gpu_info);
result.UploadWeights(attr.weights, GetBestWeightsUploadType(gpu_info));
TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), attr.bias);
result.args_.AddObject("biases", std::make_unique<TensorDescriptor>(
std::move(bias_tensor_desc)));
return result;
}
ConvolutionTransposed4x4 CreateConvolutionTransposed4x4DynamicWeights(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
OperationDef new_def = definition;
new_def.src_tensors = {
definition.src_tensors[0]};
const DataType weights_type = definition.GetDataType();
new_def.src_tensors.push_back(
{weights_type, TensorStorageType::BUFFER, Layout::HWC});
ConvolutionTransposed4x4 result(new_def, gpu_info);
TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), attr.bias);
result.args_.AddObject("biases", std::make_unique<TensorDescriptor>(
std::move(bias_tensor_desc)));
return result;
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_4x4_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, ConvolutionTransposed4x4SimpleWeights) {
auto status = ConvolutionTransposed4x4SimpleWeightsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_4x4.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_4x4_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ef97cfe5-51f4-4301-b684-664f141364be | cpp | tensorflow/tensorflow | tensor_format | tensorflow/core/util/tensor_format.cc | tensorflow/core/util/tensor_format_test.cc | #include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
string GetConvnetDataFormatAttrString() {
return "data_format: { 'NHWC', 'NCHW' } = 'NHWC' ";
}
string GetConvnet3dDataFormatAttrString() {
return "data_format: { 'NDHWC', 'NCDHW' } = 'NDHWC' ";
}
string GetConvnetDataFormat2D3DAttrString() {
return "data_format: { 'NHWC', 'NCHW', 'NDHWC', 'NCDHW' } = 'NHWC' ";
}
string GetConvnetFilterFormatAttrString() {
return "filter_format: { 'HWIO', 'OIHW' } = 'HWIO' ";
}
string GetConvnet3dFilterFormatAttrString() {
return "filter_format: { 'DHWIO', 'OIDHW' } = 'DHWIO' ";
}
string ToString(TensorFormat format) {
switch (format) {
case FORMAT_NHWC:
return "NHWC";
case FORMAT_NCHW:
return "NCHW";
case FORMAT_NCHW_VECT_C:
return "NCHW_VECT_C";
case FORMAT_NHWC_VECT_W:
return "NHWC_VECT_W";
case FORMAT_HWNC:
return "HWNC";
case FORMAT_HWCN:
return "HWCN";
default:
LOG(FATAL) << "Invalid Format: " << static_cast<int32>(format);
return "INVALID_FORMAT";
}
}
string ToString(FilterTensorFormat format) {
switch (format) {
case FORMAT_HWIO:
return "HWIO";
case FORMAT_OIHW:
return "OIHW";
case FORMAT_OHWI:
return "OHWI";
case FORMAT_OIHW_VECT_I:
return "OIHW_VECT_I";
default:
LOG(FATAL) << "Invalid Filter Format: " << static_cast<int32>(format);
return "INVALID_FORMAT";
}
}
bool FormatFromString(absl::string_view format_str, TensorFormat* format) {
if (format_str == "NHWC" || format_str == "NDHWC") {
*format = FORMAT_NHWC;
return true;
}
if (format_str == "NCHW" || format_str == "NCDHW") {
*format = FORMAT_NCHW;
return true;
}
if (format_str == "NCHW_VECT_C") {
*format = FORMAT_NCHW_VECT_C;
return true;
}
if (format_str == "NHWC_VECT_W") {
*format = FORMAT_NHWC_VECT_W;
return true;
}
if (format_str == "HWNC") {
*format = FORMAT_HWNC;
return true;
}
if (format_str == "HWCN") {
*format = FORMAT_HWCN;
return true;
}
return false;
}
bool FilterFormatFromString(absl::string_view format_str,
FilterTensorFormat* format) {
if (format_str == "HWIO" || format_str == "DHWIO") {
*format = FORMAT_HWIO;
return true;
}
if (format_str == "OIHW" || format_str == "OIDHW") {
*format = FORMAT_OIHW;
return true;
}
if (format_str == "OIHW_VECT_I") {
*format = FORMAT_OIHW_VECT_I;
return true;
}
return false;
}
} | #include <utility>
#include "tensorflow/core/util/tensor_format.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
#define EnumStringPair(val) \
{ val, #val }
std::pair<TensorFormat, const char*> test_data_formats[] = {
EnumStringPair(FORMAT_NHWC), EnumStringPair(FORMAT_NCHW),
EnumStringPair(FORMAT_NCHW_VECT_C), EnumStringPair(FORMAT_NHWC_VECT_W),
EnumStringPair(FORMAT_HWNC), EnumStringPair(FORMAT_HWCN),
};
std::pair<FilterTensorFormat, const char*> test_filter_formats[] = {
EnumStringPair(FORMAT_HWIO),
EnumStringPair(FORMAT_OIHW),
EnumStringPair(FORMAT_OIHW_VECT_I),
};
struct TensorDimMap {
int n() const { return dim_n; }
int h() const { return dim_h; }
int w() const { return dim_w; }
int c() const { return dim_c; }
int spatial(int spatial_index) const { return spatial_dim[spatial_index]; }
int dim_n, dim_h, dim_w, dim_c;
int spatial_dim[3];
};
struct FilterDimMap {
int h() const { return dim_h; }
int w() const { return dim_w; }
int i() const { return dim_i; }
int o() const { return dim_o; }
int spatial(int spatial_index) const { return spatial_dim[spatial_index]; }
int dim_h, dim_w, dim_i, dim_o;
int spatial_dim[3];
};
struct DimMaps {
#define StaCoExTensorDm static constexpr TensorDimMap
StaCoExTensorDm kTdmInvalid = { -1, -1, -1, -1, { -1, -1, -1 } };
StaCoExTensorDm kTdmNHWC[4] = { kTdmInvalid,
{ 0, -1, 1, 2, { 1, -1, -1 } },
{ 0, 1, 2, 3, { 1, 2, -1 } },
{ 0, 2, 3, 4, { 1, 2, 3 } }
};
StaCoExTensorDm kTdmNCHW[4] = { kTdmInvalid,
{ 0, -1, 2, 1, { 2, -1, -1 } },
{ 0, 2, 3, 1, { 2, 3, -1 } },
{ 0, 3, 4, 1, { 2, 3, 4 } }
};
StaCoExTensorDm kTdmHWNC[4] = { kTdmInvalid,
{ 1, -1, 0, 2, { 0, -1, -1 } },
{ 2, 0, 1, 3, { 0, 1, -1 } },
{ 3, 1, 2, 4, { 0, 1, 2 } }
};
StaCoExTensorDm kTdmHWCN[4] = { kTdmInvalid,
{ 2, -1, 0, 1, { 0, -1, -1 } },
{ 3, 0, 1, 2, { 0, 1, -1 } },
{ 4, 1, 2, 3, { 0, 1, 2 } }
};
#undef StaCoExTensorDm
#define StaCoExFilterDm static constexpr FilterDimMap
StaCoExFilterDm kFdmInvalid = { -1, -1, -1, -1, { -1, -1, -1 } };
StaCoExFilterDm kFdmHWIO[4] = { kFdmInvalid,
{ -1, 0, 1, 2, { 0, -1, -1 } },
{ 0, 1, 2, 3, { 0, 1, -1 } },
{ 1, 2, 3, 4, { 0, 1, 2 } }
};
StaCoExFilterDm kFdmOIHW[4] = { kFdmInvalid,
{ -1, 2, 1, 0, { 2, -1, -1 } },
{ 2, 3, 1, 0, { 2, 3, -1 } },
{ 3, 4, 1, 0, { 2, 3, 4 } }
};
#undef StaCoExFilterDm
};
inline constexpr const TensorDimMap&
GetTensorDimMap(const int num_spatial_dims, const TensorFormat format) {
return
(format == FORMAT_NHWC ||
format == FORMAT_NHWC_VECT_W) ? DimMaps::kTdmNHWC[num_spatial_dims] :
(format == FORMAT_NCHW ||
format == FORMAT_NCHW_VECT_C) ? DimMaps::kTdmNCHW[num_spatial_dims] :
(format == FORMAT_HWNC) ? DimMaps::kTdmHWNC[num_spatial_dims] :
(format == FORMAT_HWCN) ? DimMaps::kTdmHWCN[num_spatial_dims]
: DimMaps::kTdmInvalid;
}
inline constexpr const FilterDimMap&
GetFilterDimMap(const int num_spatial_dims,
const FilterTensorFormat format) {
return
(format == FORMAT_HWIO) ? DimMaps::kFdmHWIO[num_spatial_dims] :
(format == FORMAT_OIHW ||
format == FORMAT_OIHW_VECT_I) ? DimMaps::kFdmOIHW[num_spatial_dims]
: DimMaps::kFdmInvalid;
}
constexpr TensorDimMap DimMaps::kTdmInvalid;
constexpr TensorDimMap DimMaps::kTdmNHWC[4];
constexpr TensorDimMap DimMaps::kTdmNCHW[4];
constexpr TensorDimMap DimMaps::kTdmHWNC[4];
constexpr TensorDimMap DimMaps::kTdmHWCN[4];
constexpr FilterDimMap DimMaps::kFdmInvalid;
constexpr FilterDimMap DimMaps::kFdmHWIO[4];
constexpr FilterDimMap DimMaps::kFdmOIHW[4];
TEST(TensorFormatTest, FormatEnumsAndStrings) {
const string prefix = "FORMAT_";
for (auto& test_data_format : test_data_formats) {
const char* stringified_format_enum = test_data_format.second;
LOG(INFO) << stringified_format_enum << " = " << test_data_format.first;
string expected_format_str = &stringified_format_enum[prefix.size()];
TensorFormat format;
EXPECT_TRUE(FormatFromString(expected_format_str, &format));
string format_str = ToString(format);
EXPECT_EQ(expected_format_str, format_str);
EXPECT_EQ(test_data_format.first, format);
}
for (auto& test_filter_format : test_filter_formats) {
const char* stringified_format_enum = test_filter_format.second;
LOG(INFO) << stringified_format_enum << " = " << test_filter_format.first;
string expected_format_str = &stringified_format_enum[prefix.size()];
FilterTensorFormat format;
EXPECT_TRUE(FilterFormatFromString(expected_format_str, &format));
string format_str = ToString(format);
EXPECT_EQ(expected_format_str, format_str);
EXPECT_EQ(test_filter_format.first, format);
}
}
template <int num_spatial_dims>
void RunDimensionIndexesTest() {
for (auto& test_data_format : test_data_formats) {
TensorFormat format = test_data_format.first;
auto& tdm = GetTensorDimMap(num_spatial_dims, format);
int num_dims = GetTensorDimsFromSpatialDims(num_spatial_dims, format);
LOG(INFO) << ToString(format) << ", num_spatial_dims=" << num_spatial_dims
<< ", num_dims=" << num_dims;
EXPECT_EQ(GetTensorBatchDimIndex(num_dims, format), tdm.n());
EXPECT_EQ(GetTensorDimIndex<num_spatial_dims>(format, 'N'), tdm.n());
EXPECT_EQ(GetTensorFeatureDimIndex(num_dims, format), tdm.c());
EXPECT_EQ(GetTensorDimIndex<num_spatial_dims>(format, 'C'), tdm.c());
for (int i = 0; i < num_spatial_dims; ++i) {
EXPECT_EQ(GetTensorSpatialDimIndex(num_dims, format, i), tdm.spatial(i));
EXPECT_EQ(GetTensorDimIndex<num_spatial_dims>(format, '0' + i),
tdm.spatial(i));
}
}
for (auto& test_filter_format : test_filter_formats) {
FilterTensorFormat format = test_filter_format.first;
auto& fdm = GetFilterDimMap(num_spatial_dims, format);
int num_dims = GetFilterTensorDimsFromSpatialDims(num_spatial_dims, format);
LOG(INFO) << ToString(format) << ", num_spatial_dims=" << num_spatial_dims
<< ", num_dims=" << num_dims;
EXPECT_EQ(GetFilterTensorOutputChannelsDimIndex(num_dims, format), fdm.o());
EXPECT_EQ(GetFilterDimIndex<num_spatial_dims>(format, 'O'), fdm.o());
EXPECT_EQ(GetFilterTensorInputChannelsDimIndex(num_dims, format), fdm.i());
EXPECT_EQ(GetFilterDimIndex<num_spatial_dims>(format, 'I'), fdm.i());
for (int i = 0; i < num_spatial_dims; ++i) {
EXPECT_EQ(GetFilterTensorSpatialDimIndex(num_dims, format, i),
fdm.spatial(i));
EXPECT_EQ(GetFilterDimIndex<num_spatial_dims>(format, '0' + i),
fdm.spatial(i));
}
}
}
TEST(TensorFormatTest, DimensionIndexes) {
RunDimensionIndexesTest<1>();
RunDimensionIndexesTest<2>();
RunDimensionIndexesTest<3>();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/tensor_format.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/tensor_format_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fa6d2b11-f649-4767-8a92-432331a7711b | cpp | tensorflow/tensorflow | variable_ops | tensorflow/compiler/tf2xla/kernels/variable_ops.cc | tensorflow/c/experimental/saved_model/core/ops/variable_ops_test.cc | #include <functional>
#include <utility>
#include "absl/status/status.h"
#include "tensorflow/compiler/tf2xla/kernels/gather_op_helpers.h"
#include "tensorflow/compiler/tf2xla/kernels/shape_util.h"
#include "tensorflow/compiler/tf2xla/lib/scatter.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/tf2xla/xla_resource.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/resource_variable_util.h"
#include "tensorflow/core/kernels/scatter_nd_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
Status ValidateAssignUpdateVariableOpShapes(XlaOpKernelContext* ctx) {
DataType variable_dtype;
TensorShape variable_shape;
TensorShape value_shape = ctx->InputShape(1);
TF_RETURN_IF_ERROR(
ctx->GetVariableTypeAndShape(0, &variable_dtype, &variable_shape));
TF_RETURN_IF_ERROR(
ValidateAssignUpdateVariableOpShapes(variable_shape, value_shape));
return absl::OkStatus();
}
class VarIsInitializedOp : public XlaOpKernel {
public:
explicit VarIsInitializedOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
XlaResource* variable;
OP_REQUIRES_OK(ctx, ctx->GetResourceInput(0, &variable));
ctx->SetOutput(
0, xla::ConstantR0<bool>(ctx->builder(), variable->initialized()));
}
};
REGISTER_XLA_OP(Name("VarIsInitializedOp"), VarIsInitializedOp);
class VariableShapeOp : public XlaOpKernel {
public:
explicit VariableShapeOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("out_type", &out_dtype_));
}
void Compile(XlaOpKernelContext* ctx) override {
DataType variable_dtype;
TensorShape shape;
OP_REQUIRES_OK(ctx,
ctx->GetVariableTypeAndShape(0, &variable_dtype, &shape));
Tensor shape_constant(out_dtype_, TensorShape({shape.dims()}));
OP_REQUIRES_OK(ctx, TensorShapeToConstant(shape, &shape_constant));
ctx->SetConstantOutput(0, shape_constant);
}
private:
DataType out_dtype_;
};
REGISTER_XLA_OP(Name("VariableShape").CompilationOnly().IsMetadataOp(),
VariableShapeOp);
class ReadVariableOp : public XlaOpKernel {
public:
explicit ReadVariableOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("dtype", &dtype_));
}
void Compile(XlaOpKernelContext* ctx) override {
xla::XlaOp handle;
OP_REQUIRES_OK(
ctx, ctx->ReadVariableInput(0, dtype_, nullptr, &handle));
ctx->SetOutput(0, handle);
}
private:
DataType dtype_;
};
REGISTER_XLA_OP(Name("ReadVariableOp").CompilationOnly(), ReadVariableOp);
class AssignVariableOp : public XlaOpKernel {
public:
explicit AssignVariableOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
OP_REQUIRES_OK(ctx,
ctx->AssignVariable(0, ctx->input_type(1), ctx->Input(1)));
}
};
REGISTER_XLA_OP(Name("AssignVariableOp").CompilationOnly(), AssignVariableOp);
class AssignAddVariableOp : public XlaOpKernel {
public:
explicit AssignAddVariableOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
DataType type = ctx->input_type(1);
xla::XlaOp handle;
OP_REQUIRES_OK(ctx,
ctx->ReadVariableInput(0, type, nullptr, &handle));
OP_REQUIRES_OK(ctx, ValidateAssignUpdateVariableOpShapes(ctx));
handle = xla::Add(handle, ctx->Input(1));
OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, type, handle));
}
};
REGISTER_XLA_OP(
Name("AssignAddVariableOp").TypeConstraint("dtype", kNumericTypes),
AssignAddVariableOp);
class AssignSubVariableOp : public XlaOpKernel {
public:
explicit AssignSubVariableOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
DataType type = ctx->input_type(1);
xla::XlaOp handle;
OP_REQUIRES_OK(ctx,
ctx->ReadVariableInput(0, type, nullptr, &handle));
OP_REQUIRES_OK(ctx, ValidateAssignUpdateVariableOpShapes(ctx));
handle = xla::Sub(handle, ctx->Input(1));
OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, type, handle));
}
};
REGISTER_XLA_OP(
Name("AssignSubVariableOp").TypeConstraint("dtype", kNumericTypes),
AssignSubVariableOp);
class ResourceGatherOp : public XlaOpKernel {
public:
explicit ResourceGatherOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("batch_dims", &batch_dims_));
}
void Compile(XlaOpKernelContext* ctx) override {
DataType type = ctx->expected_output_dtype(0);
TensorShape input_shape;
xla::XlaOp input;
OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, type, &input_shape, &input));
xla::XlaOp gather;
OP_REQUIRES_OK(ctx, XlaGatherWithBatchDimsOpImpl(ctx, input, input_shape,
batch_dims_, &gather));
ctx->SetOutput(0, gather);
}
private:
int32 batch_dims_;
};
REGISTER_XLA_OP(Name("ResourceGather"), ResourceGatherOp);
class ResourceScatterOp : public XlaOpKernel {
public:
explicit ResourceScatterOp(
OpKernelConstruction* context, bool indices_are_vectors,
std::function<xla::XlaOp(const xla::XlaOp&, const xla::XlaOp&,
xla::XlaBuilder*)>
combiner)
: XlaOpKernel(context),
indices_are_vectors_(indices_are_vectors),
combiner_(std::move(combiner)) {}
void Compile(XlaOpKernelContext* context) override {
xla::XlaBuilder* builder = context->builder();
DataType dtype = context->input_type(2);
TensorShape var_shape;
xla::XlaOp var_value;
OP_REQUIRES_OK(
context, context->ReadVariableInput(0, dtype, &var_shape, &var_value));
if (indices_are_vectors_) {
OP_REQUIRES_OK(context, ValidateScatterNdUpdateShape(
var_shape, context->InputShape(1),
context->InputShape(2)));
}
const xla::XlaOp indices = context->Input(1);
const xla::XlaOp updates = context->Input(2);
auto result = XlaScatter(var_value, updates, indices, indices_are_vectors_,
false, combiner_, builder);
OP_REQUIRES_OK(context, result.status());
OP_REQUIRES_OK(context, context->AssignVariable(0, dtype, result.value()));
}
private:
const bool indices_are_vectors_;
const std::function<xla::XlaOp(const xla::XlaOp&, const xla::XlaOp&,
xla::XlaBuilder*)>
combiner_;
};
class ResourceScatterAddOp : public ResourceScatterOp {
public:
explicit ResourceScatterAddOp(OpKernelConstruction* context)
: ResourceScatterOp(context, false, Combine) {}
private:
static xla::XlaOp Combine(const xla::XlaOp x, const xla::XlaOp y,
xla::XlaBuilder* builder) {
return xla::Add(x, y);
}
};
REGISTER_XLA_OP(Name("ResourceScatterAdd"), ResourceScatterAddOp);
class ResourceScatterSubOp : public ResourceScatterOp {
public:
explicit ResourceScatterSubOp(OpKernelConstruction* context)
: ResourceScatterOp(context, false, Combine) {}
private:
static xla::XlaOp Combine(const xla::XlaOp x, const xla::XlaOp y,
xla::XlaBuilder* builder) {
return xla::Sub(x, y);
}
};
REGISTER_XLA_OP(Name("ResourceScatterSub"), ResourceScatterSubOp);
class ResourceScatterMulOp : public ResourceScatterOp {
public:
explicit ResourceScatterMulOp(OpKernelConstruction* context)
: ResourceScatterOp(context, false, Combine) {}
private:
static xla::XlaOp Combine(const xla::XlaOp x, const xla::XlaOp y,
xla::XlaBuilder* builder) {
return xla::Mul(x, y);
}
};
REGISTER_XLA_OP(Name("ResourceScatterMul"), ResourceScatterMulOp);
class ResourceScatterDivOp : public ResourceScatterOp {
public:
explicit ResourceScatterDivOp(OpKernelConstruction* context)
: ResourceScatterOp(context, false, Combine) {}
private:
static xla::XlaOp Combine(const xla::XlaOp x, const xla::XlaOp y,
xla::XlaBuilder* builder) {
return xla::Div(x, y);
}
};
REGISTER_XLA_OP(Name("ResourceScatterDiv"), ResourceScatterDivOp);
class ResourceScatterMinOp : public ResourceScatterOp {
public:
explicit ResourceScatterMinOp(OpKernelConstruction* context)
: ResourceScatterOp(context, false, Combine) {}
private:
static xla::XlaOp Combine(const xla::XlaOp x, const xla::XlaOp y,
xla::XlaBuilder* builder) {
return xla::Min(x, y);
}
};
REGISTER_XLA_OP(Name("ResourceScatterMin"), ResourceScatterMinOp);
class ResourceScatterMaxOp : public ResourceScatterOp {
public:
explicit ResourceScatterMaxOp(OpKernelConstruction* context)
: ResourceScatterOp(context, false, Combine) {}
private:
static xla::XlaOp Combine(const xla::XlaOp x, const xla::XlaOp y,
xla::XlaBuilder* builder) {
return xla::Max(x, y);
}
};
REGISTER_XLA_OP(Name("ResourceScatterMax"), ResourceScatterMaxOp);
class ResourceScatterUpdateOp : public ResourceScatterOp {
public:
explicit ResourceScatterUpdateOp(OpKernelConstruction* context)
: ResourceScatterOp(context, false,
{}) {}
};
REGISTER_XLA_OP(Name("ResourceScatterUpdate"), ResourceScatterUpdateOp);
class ResourceScatterNdUpdateOp : public ResourceScatterOp {
public:
explicit ResourceScatterNdUpdateOp(OpKernelConstruction* context)
: ResourceScatterOp(context, true,
{}) {}
};
REGISTER_XLA_OP(Name("ResourceScatterNdUpdate"), ResourceScatterNdUpdateOp);
class ResourceScatterNdAddOp : public ResourceScatterOp {
public:
explicit ResourceScatterNdAddOp(OpKernelConstruction* context)
: ResourceScatterOp(context, true,
Combine) {}
private:
static xla::XlaOp Combine(const xla::XlaOp x, const xla::XlaOp y,
xla::XlaBuilder* builder) {
return xla::Add(x, y);
}
};
REGISTER_XLA_OP(Name("ResourceScatterNdAdd"), ResourceScatterNdAddOp);
class ResourceScatterNdSubOp : public ResourceScatterOp {
public:
explicit ResourceScatterNdSubOp(OpKernelConstruction* context)
: ResourceScatterOp(context, true,
Combine) {}
private:
static xla::XlaOp Combine(const xla::XlaOp x, const xla::XlaOp y,
xla::XlaBuilder* builder) {
return xla::Sub(x, y);
}
};
REGISTER_XLA_OP(Name("ResourceScatterNdSub"), ResourceScatterNdSubOp);
}
} | #include "tensorflow/c/experimental/saved_model/core/ops/variable_ops.h"
#include <memory>
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/experimental/saved_model/core/test_utils.h"
#include "tensorflow/c/tensor_interface.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
ImmediateTensorHandlePtr CreateScalarTensorHandle(EagerContext* context,
float value) {
AbstractTensorPtr tensor(context->CreateFloatScalar(value));
ImmediateTensorHandlePtr handle(context->CreateLocalHandle(tensor.get()));
return handle;
}
class VariableOpsTest : public ::testing::Test {
public:
VariableOpsTest()
: device_mgr_(testing::CreateTestingDeviceMgr()),
ctx_(testing::CreateTestingEagerContext(device_mgr_.get())) {}
EagerContext* context() { return ctx_.get(); }
private:
std::unique_ptr<StaticDeviceMgr> device_mgr_;
EagerContextPtr ctx_;
};
TEST_F(VariableOpsTest, CreateVariableSuccessful) {
ImmediateTensorHandlePtr handle;
TF_EXPECT_OK(internal::CreateUninitializedResourceVariable(
context(), DT_FLOAT, {}, nullptr, &handle));
EXPECT_EQ(handle->DataType(), DT_RESOURCE);
}
TEST_F(VariableOpsTest, DestroyVariableSuccessful) {
ImmediateTensorHandlePtr handle;
TF_EXPECT_OK(internal::CreateUninitializedResourceVariable(
context(), DT_FLOAT, {}, nullptr, &handle));
TF_EXPECT_OK(internal::DestroyResource(context(), handle.get()));
}
TEST_F(VariableOpsTest, AssignVariableAndReadSuccessful) {
ImmediateTensorHandlePtr variable;
TF_EXPECT_OK(internal::CreateUninitializedResourceVariable(
context(), DT_FLOAT, {}, nullptr, &variable));
ImmediateTensorHandlePtr my_value = CreateScalarTensorHandle(context(), 42.0);
TF_EXPECT_OK(internal::AssignVariable(context(), variable.get(), DT_FLOAT,
my_value.get()));
ImmediateTensorHandlePtr read_value_handle;
TF_EXPECT_OK(internal::ReadVariable(context(), variable.get(), DT_FLOAT,
&read_value_handle));
Status status;
AbstractTensorPtr read_value(read_value_handle->Resolve(&status));
TF_EXPECT_OK(status);
EXPECT_FLOAT_EQ(42.0, *static_cast<float*>(read_value->Data()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/variable_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/saved_model/core/ops/variable_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
09d4c8e3-37f0-41f7-a410-f45ec5df047e | cpp | abseil/abseil-cpp | hashtablez_sampler | absl/container/internal/hashtablez_sampler.cc | absl/container/internal/hashtablez_sampler_test.cc | #include "absl/container/internal/hashtablez_sampler.h"
#include <algorithm>
#include <atomic>
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <limits>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/per_thread_tls.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
#include "absl/base/no_destructor.h"
#include "absl/base/optimization.h"
#include "absl/debugging/stacktrace.h"
#include "absl/memory/memory.h"
#include "absl/profiling/internal/exponential_biased.h"
#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr int HashtablezInfo::kMaxStackDepth;
#endif
namespace {
ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
false
};
ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
std::atomic<HashtablezConfigListener> g_hashtablez_config_listener{nullptr};
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased
g_exponential_biased_generator;
#endif
void TriggerHashtablezConfigListener() {
auto* listener = g_hashtablez_config_listener.load(std::memory_order_acquire);
if (listener != nullptr) listener();
}
}
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0};
#endif
HashtablezSampler& GlobalHashtablezSampler() {
static absl::NoDestructor<HashtablezSampler> sampler;
return *sampler;
}
HashtablezInfo::HashtablezInfo() = default;
HashtablezInfo::~HashtablezInfo() = default;
void HashtablezInfo::PrepareForSampling(int64_t stride,
size_t inline_element_size_value,
size_t key_size_value,
size_t value_size_value,
uint16_t soo_capacity_value) {
capacity.store(0, std::memory_order_relaxed);
size.store(0, std::memory_order_relaxed);
num_erases.store(0, std::memory_order_relaxed);
num_rehashes.store(0, std::memory_order_relaxed);
max_probe_length.store(0, std::memory_order_relaxed);
total_probe_length.store(0, std::memory_order_relaxed);
hashes_bitwise_or.store(0, std::memory_order_relaxed);
hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed);
hashes_bitwise_xor.store(0, std::memory_order_relaxed);
max_reserve.store(0, std::memory_order_relaxed);
create_time = absl::Now();
weight = stride;
depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
0);
inline_element_size = inline_element_size_value;
key_size = key_size_value;
value_size = value_size_value;
soo_capacity = soo_capacity_value;
}
static bool ShouldForceSampling() {
enum ForceState {
kDontForce,
kForce,
kUninitialized
};
ABSL_CONST_INIT static std::atomic<ForceState> global_state{
kUninitialized};
ForceState state = global_state.load(std::memory_order_relaxed);
if (ABSL_PREDICT_TRUE(state == kDontForce)) return false;
if (state == kUninitialized) {
state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)()
? kForce
: kDontForce;
global_state.store(state, std::memory_order_relaxed);
}
return state == kForce;
}
HashtablezInfo* SampleSlow(SamplingState& next_sample,
size_t inline_element_size, size_t key_size,
size_t value_size, uint16_t soo_capacity) {
if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
next_sample.next_sample = 1;
const int64_t old_stride = exchange(next_sample.sample_stride, 1);
HashtablezInfo* result = GlobalHashtablezSampler().Register(
old_stride, inline_element_size, key_size, value_size, soo_capacity);
return result;
}
#if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
next_sample = {
std::numeric_limits<int64_t>::max(),
std::numeric_limits<int64_t>::max(),
};
return nullptr;
#else
bool first = next_sample.next_sample < 0;
const int64_t next_stride = g_exponential_biased_generator.GetStride(
g_hashtablez_sample_parameter.load(std::memory_order_relaxed));
next_sample.next_sample = next_stride;
const int64_t old_stride = exchange(next_sample.sample_stride, next_stride);
ABSL_ASSERT(next_stride >= 1);
if (!g_hashtablez_enabled.load(std::memory_order_relaxed)) return nullptr;
if (first) {
if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr;
return SampleSlow(next_sample, inline_element_size, key_size, value_size,
soo_capacity);
}
return GlobalHashtablezSampler().Register(old_stride, inline_element_size,
key_size, value_size, soo_capacity);
#endif
}
void UnsampleSlow(HashtablezInfo* info) {
GlobalHashtablezSampler().Unregister(info);
}
void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
#ifdef ABSL_INTERNAL_HAVE_SSE2
total_probe_length /= 16;
#else
total_probe_length /= 8;
#endif
info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
info->num_erases.store(0, std::memory_order_relaxed);
info->num_rehashes.store(
1 + info->num_rehashes.load(std::memory_order_relaxed),
std::memory_order_relaxed);
}
void RecordReservationSlow(HashtablezInfo* info, size_t target_capacity) {
info->max_reserve.store(
(std::max)(info->max_reserve.load(std::memory_order_relaxed),
target_capacity),
std::memory_order_relaxed);
}
void RecordClearedReservationSlow(HashtablezInfo* info) {
info->max_reserve.store(0, std::memory_order_relaxed);
}
void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
size_t capacity) {
info->size.store(size, std::memory_order_relaxed);
info->capacity.store(capacity, std::memory_order_relaxed);
if (size == 0) {
info->total_probe_length.store(0, std::memory_order_relaxed);
info->num_erases.store(0, std::memory_order_relaxed);
}
}
void RecordInsertSlow(HashtablezInfo* info, size_t hash,
size_t distance_from_desired) {
size_t probe_length = distance_from_desired;
#ifdef ABSL_INTERNAL_HAVE_SSE2
probe_length /= 16;
#else
probe_length /= 8;
#endif
info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed);
info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed);
info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed);
info->max_probe_length.store(
std::max(info->max_probe_length.load(std::memory_order_relaxed),
probe_length),
std::memory_order_relaxed);
info->total_probe_length.fetch_add(probe_length, std::memory_order_relaxed);
info->size.fetch_add(1, std::memory_order_relaxed);
}
void RecordEraseSlow(HashtablezInfo* info) {
info->size.fetch_sub(1, std::memory_order_relaxed);
info->num_erases.store(1 + info->num_erases.load(std::memory_order_relaxed),
std::memory_order_relaxed);
}
void SetHashtablezConfigListener(HashtablezConfigListener l) {
g_hashtablez_config_listener.store(l, std::memory_order_release);
}
bool IsHashtablezEnabled() {
return g_hashtablez_enabled.load(std::memory_order_acquire);
}
void SetHashtablezEnabled(bool enabled) {
SetHashtablezEnabledInternal(enabled);
TriggerHashtablezConfigListener();
}
void SetHashtablezEnabledInternal(bool enabled) {
g_hashtablez_enabled.store(enabled, std::memory_order_release);
}
int32_t GetHashtablezSampleParameter() {
return g_hashtablez_sample_parameter.load(std::memory_order_acquire);
}
void SetHashtablezSampleParameter(int32_t rate) {
SetHashtablezSampleParameterInternal(rate);
TriggerHashtablezConfigListener();
}
void SetHashtablezSampleParameterInternal(int32_t rate) {
if (rate > 0) {
g_hashtablez_sample_parameter.store(rate, std::memory_order_release);
} else {
ABSL_RAW_LOG(ERROR, "Invalid hashtablez sample rate: %lld",
static_cast<long long>(rate));
}
}
size_t GetHashtablezMaxSamples() {
return GlobalHashtablezSampler().GetMaxSamples();
}
void SetHashtablezMaxSamples(size_t max) {
SetHashtablezMaxSamplesInternal(max);
TriggerHashtablezConfigListener();
}
void SetHashtablezMaxSamplesInternal(size_t max) {
if (max > 0) {
GlobalHashtablezSampler().SetMaxSamples(max);
} else {
ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: 0");
}
}
}
ABSL_NAMESPACE_END
} | #include "absl/container/internal/hashtablez_sampler.h"
#include <atomic>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <random>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/synchronization/internal/thread_pool.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#ifdef ABSL_INTERNAL_HAVE_SSE2
constexpr int kProbeLength = 16;
#else
constexpr int kProbeLength = 8;
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
class HashtablezInfoHandlePeer {
public:
static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; }
};
#else
class HashtablezInfoHandlePeer {
public:
static HashtablezInfo* GetInfo(HashtablezInfoHandle*) { return nullptr; }
};
#endif
namespace {
using ::absl::synchronization_internal::ThreadPool;
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAre;
std::vector<size_t> GetSizes(HashtablezSampler* s) {
std::vector<size_t> res;
s->Iterate([&](const HashtablezInfo& info) {
res.push_back(info.size.load(std::memory_order_acquire));
});
return res;
}
HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
const int64_t test_stride = 123;
const size_t test_element_size = 17;
const size_t test_key_size = 3;
const size_t test_value_size = 5;
auto* info =
s->Register(test_stride, test_element_size, test_key_size,
test_value_size, 0);
assert(info != nullptr);
info->size.store(size);
return info;
}
TEST(HashtablezInfoTest, PrepareForSampling) {
absl::Time test_start = absl::Now();
const int64_t test_stride = 123;
const size_t test_element_size = 17;
const size_t test_key_size = 15;
const size_t test_value_size = 13;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling(test_stride, test_element_size,
test_key_size,
test_value_size,
1);
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.num_rehashes.load(), 0);
EXPECT_EQ(info.max_probe_length.load(), 0);
EXPECT_EQ(info.total_probe_length.load(), 0);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0);
EXPECT_GE(info.create_time, test_start);
EXPECT_EQ(info.weight, test_stride);
EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_EQ(info.key_size, test_key_size);
EXPECT_EQ(info.value_size, test_value_size);
EXPECT_EQ(info.soo_capacity, 1);
info.capacity.store(1, std::memory_order_relaxed);
info.size.store(1, std::memory_order_relaxed);
info.num_erases.store(1, std::memory_order_relaxed);
info.max_probe_length.store(1, std::memory_order_relaxed);
info.total_probe_length.store(1, std::memory_order_relaxed);
info.hashes_bitwise_or.store(1, std::memory_order_relaxed);
info.hashes_bitwise_and.store(1, std::memory_order_relaxed);
info.hashes_bitwise_xor.store(1, std::memory_order_relaxed);
info.max_reserve.store(1, std::memory_order_relaxed);
info.create_time = test_start - absl::Hours(20);
info.PrepareForSampling(test_stride * 2, test_element_size,
test_key_size,
test_value_size,
0);
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.num_rehashes.load(), 0);
EXPECT_EQ(info.max_probe_length.load(), 0);
EXPECT_EQ(info.total_probe_length.load(), 0);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0);
EXPECT_EQ(info.weight, 2 * test_stride);
EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_EQ(info.key_size, test_key_size);
EXPECT_EQ(info.value_size, test_value_size);
EXPECT_GE(info.create_time, test_start);
EXPECT_EQ(info.soo_capacity, 0);
}
TEST(HashtablezInfoTest, RecordStorageChanged) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
const int64_t test_stride = 21;
const size_t test_element_size = 19;
const size_t test_key_size = 17;
const size_t test_value_size = 15;
info.PrepareForSampling(test_stride, test_element_size,
test_key_size,
test_value_size,
0);
RecordStorageChangedSlow(&info, 17, 47);
EXPECT_EQ(info.size.load(), 17);
EXPECT_EQ(info.capacity.load(), 47);
RecordStorageChangedSlow(&info, 20, 20);
EXPECT_EQ(info.size.load(), 20);
EXPECT_EQ(info.capacity.load(), 20);
}
TEST(HashtablezInfoTest, RecordInsert) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
const int64_t test_stride = 25;
const size_t test_element_size = 23;
const size_t test_key_size = 21;
const size_t test_value_size = 19;
info.PrepareForSampling(test_stride, test_element_size,
test_key_size,
test_value_size,
0);
EXPECT_EQ(info.max_probe_length.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
EXPECT_EQ(info.max_probe_length.load(), 6);
EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000FF00);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0x0000FF00);
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x0000FF00);
RecordInsertSlow(&info, 0x000FF000, 4 * kProbeLength);
EXPECT_EQ(info.max_probe_length.load(), 6);
EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000F000);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0x000FFF00);
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x000F0F00);
RecordInsertSlow(&info, 0x00FF0000, 12 * kProbeLength);
EXPECT_EQ(info.max_probe_length.load(), 12);
EXPECT_EQ(info.hashes_bitwise_and.load(), 0x00000000);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0x00FFFF00);
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x00F00F00);
}
TEST(HashtablezInfoTest, RecordErase) {
const int64_t test_stride = 31;
const size_t test_element_size = 29;
const size_t test_key_size = 27;
const size_t test_value_size = 25;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling(test_stride, test_element_size,
test_key_size,
test_value_size,
1);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.size.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
EXPECT_EQ(info.size.load(), 1);
RecordEraseSlow(&info);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 1);
EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_EQ(info.key_size, test_key_size);
EXPECT_EQ(info.value_size, test_value_size);
EXPECT_EQ(info.soo_capacity, 1);
}
TEST(HashtablezInfoTest, RecordRehash) {
const int64_t test_stride = 33;
const size_t test_element_size = 31;
const size_t test_key_size = 29;
const size_t test_value_size = 27;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling(test_stride, test_element_size,
test_key_size,
test_value_size,
0);
RecordInsertSlow(&info, 0x1, 0);
RecordInsertSlow(&info, 0x2, kProbeLength);
RecordInsertSlow(&info, 0x4, kProbeLength);
RecordInsertSlow(&info, 0x8, 2 * kProbeLength);
EXPECT_EQ(info.size.load(), 4);
EXPECT_EQ(info.total_probe_length.load(), 4);
RecordEraseSlow(&info);
RecordEraseSlow(&info);
EXPECT_EQ(info.size.load(), 2);
EXPECT_EQ(info.total_probe_length.load(), 4);
EXPECT_EQ(info.num_erases.load(), 2);
RecordRehashSlow(&info, 3 * kProbeLength);
EXPECT_EQ(info.size.load(), 2);
EXPECT_EQ(info.total_probe_length.load(), 3);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.num_rehashes.load(), 1);
EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_EQ(info.key_size, test_key_size);
EXPECT_EQ(info.value_size, test_value_size);
EXPECT_EQ(info.soo_capacity, 0);
}
TEST(HashtablezInfoTest, RecordReservation) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
const int64_t test_stride = 35;
const size_t test_element_size = 33;
const size_t test_key_size = 31;
const size_t test_value_size = 29;
info.PrepareForSampling(test_stride, test_element_size,
test_key_size,
test_value_size,
0);
RecordReservationSlow(&info, 3);
EXPECT_EQ(info.max_reserve.load(), 3);
RecordReservationSlow(&info, 2);
EXPECT_EQ(info.max_reserve.load(), 3);
RecordReservationSlow(&info, 10);
EXPECT_EQ(info.max_reserve.load(), 10);
}
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(HashtablezSamplerTest, SmallSampleParameter) {
const size_t test_element_size = 31;
const size_t test_key_size = 33;
const size_t test_value_size = 35;
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
for (int i = 0; i < 1000; ++i) {
SamplingState next_sample = {0, 0};
HashtablezInfo* sample =
SampleSlow(next_sample, test_element_size,
test_key_size, test_value_size,
0);
EXPECT_GT(next_sample.next_sample, 0);
EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride);
EXPECT_NE(sample, nullptr);
UnsampleSlow(sample);
}
}
TEST(HashtablezSamplerTest, LargeSampleParameter) {
const size_t test_element_size = 31;
const size_t test_key_size = 33;
const size_t test_value_size = 35;
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(std::numeric_limits<int32_t>::max());
for (int i = 0; i < 1000; ++i) {
SamplingState next_sample = {0, 0};
HashtablezInfo* sample =
SampleSlow(next_sample, test_element_size,
test_key_size, test_value_size,
0);
EXPECT_GT(next_sample.next_sample, 0);
EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride);
EXPECT_NE(sample, nullptr);
UnsampleSlow(sample);
}
}
TEST(HashtablezSamplerTest, Sample) {
const size_t test_element_size = 31;
const size_t test_key_size = 33;
const size_t test_value_size = 35;
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
int64_t num_sampled = 0;
int64_t total = 0;
double sample_rate = 0.0;
for (int i = 0; i < 1000000; ++i) {
HashtablezInfoHandle h =
Sample(test_element_size,
test_key_size, test_value_size,
0);
++total;
if (h.IsSampled()) {
++num_sampled;
}
sample_rate = static_cast<double>(num_sampled) / total;
if (0.005 < sample_rate && sample_rate < 0.015) break;
}
EXPECT_NEAR(sample_rate, 0.01, 0.005);
}
TEST(HashtablezSamplerTest, Handle) {
auto& sampler = GlobalHashtablezSampler();
const int64_t test_stride = 41;
const size_t test_element_size = 39;
const size_t test_key_size = 37;
const size_t test_value_size = 35;
HashtablezInfoHandle h(sampler.Register(test_stride, test_element_size,
test_key_size,
test_value_size,
0));
auto* info = HashtablezInfoHandlePeer::GetInfo(&h);
info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed);
bool found = false;
sampler.Iterate([&](const HashtablezInfo& h) {
if (&h == info) {
EXPECT_EQ(h.weight, test_stride);
EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678);
found = true;
}
});
EXPECT_TRUE(found);
h.Unregister();
h = HashtablezInfoHandle();
found = false;
sampler.Iterate([&](const HashtablezInfo& h) {
if (&h == info) {
if (h.hashes_bitwise_and.load() == 0x12345678) {
found = true;
}
}
});
EXPECT_FALSE(found);
}
#endif
TEST(HashtablezSamplerTest, Registration) {
HashtablezSampler sampler;
auto* info1 = Register(&sampler, 1);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1));
auto* info2 = Register(&sampler, 2);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2));
info1->size.store(3);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2));
sampler.Unregister(info1);
sampler.Unregister(info2);
}
TEST(HashtablezSamplerTest, Unregistration) {
HashtablezSampler sampler;
std::vector<HashtablezInfo*> infos;
for (size_t i = 0; i < 3; ++i) {
infos.push_back(Register(&sampler, i));
}
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2));
sampler.Unregister(infos[1]);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2));
infos.push_back(Register(&sampler, 3));
infos.push_back(Register(&sampler, 4));
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4));
sampler.Unregister(infos[3]);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4));
sampler.Unregister(infos[0]);
sampler.Unregister(infos[2]);
sampler.Unregister(infos[4]);
EXPECT_THAT(GetSizes(&sampler), IsEmpty());
}
TEST(HashtablezSamplerTest, MultiThreaded) {
HashtablezSampler sampler;
Notification stop;
ThreadPool pool(10);
for (int i = 0; i < 10; ++i) {
const int64_t sampling_stride = 11 + i % 3;
const size_t elt_size = 10 + i % 2;
const size_t key_size = 12 + i % 4;
const size_t value_size = 13 + i % 5;
pool.Schedule([&sampler, &stop, sampling_stride, elt_size, key_size,
value_size]() {
std::random_device rd;
std::mt19937 gen(rd());
std::vector<HashtablezInfo*> infoz;
while (!stop.HasBeenNotified()) {
if (infoz.empty()) {
infoz.push_back(sampler.Register(sampling_stride, elt_size,
key_size,
value_size,
0));
}
switch (std::uniform_int_distribution<>(0, 2)(gen)) {
case 0: {
infoz.push_back(sampler.Register(sampling_stride, elt_size,
key_size,
value_size,
0));
break;
}
case 1: {
size_t p =
std::uniform_int_distribution<>(0, infoz.size() - 1)(gen);
HashtablezInfo* info = infoz[p];
infoz[p] = infoz.back();
infoz.pop_back();
EXPECT_EQ(info->weight, sampling_stride);
sampler.Unregister(info);
break;
}
case 2: {
absl::Duration oldest = absl::ZeroDuration();
sampler.Iterate([&](const HashtablezInfo& info) {
oldest = std::max(oldest, absl::Now() - info.create_time);
});
ASSERT_GE(oldest, absl::ZeroDuration());
break;
}
}
}
});
}
absl::SleepFor(absl::Seconds(3));
stop.Notify();
}
TEST(HashtablezSamplerTest, Callback) {
HashtablezSampler sampler;
auto* info1 = Register(&sampler, 1);
auto* info2 = Register(&sampler, 2);
static const HashtablezInfo* expected;
auto callback = [](const HashtablezInfo& info) {
EXPECT_EQ(&info, expected);
};
EXPECT_EQ(sampler.SetDisposeCallback(callback), nullptr);
expected = info1;
sampler.Unregister(info1);
EXPECT_EQ(callback, sampler.SetDisposeCallback(nullptr));
expected = nullptr;
sampler.Unregister(info2);
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/internal/hashtablez_sampler.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/internal/hashtablez_sampler_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
be319639-b305-4ef3-ad85-bd35f5ecac40 | cpp | tensorflow/tensorflow | hlo_op_profiles | third_party/xla/xla/service/gpu/model/hlo_op_profiles.cc | third_party/xla/xla/service/gpu/model/hlo_op_profiles_test.cc | #include "xla/service/gpu/model/hlo_op_profiles.h"
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/model/hlo_op_profile.pb.h"
#include "xla/service/gpu/model/hlo_op_profiles_data.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/protobuf.h"
namespace xla {
namespace gpu {
const HloOpProfiles& HloOpProfiles::Singleton() {
static const auto* hlo_op_profiles =
HloOpProfiles::Load(kDeviceHloOpProfiles,
"sm_86")
.release();
return *hlo_op_profiles;
}
std::string HloOpProfiles::GetProfileName(
const se::DeviceDescription& device_info) {
if (auto* ptr = std::get_if<stream_executor::CudaComputeCapability>(
&device_info.gpu_compute_capability())) {
return absl::StrCat("sm_", ptr->major, ptr->minor);
}
return "<unknown>";
}
std::unique_ptr<HloOpProfiles> HloOpProfiles::Load(
std::string_view profiles_text_proto,
std::string_view default_profile_name) {
ProfilesNestedMap profiles_map;
DeviceHloInstructionProfiles all_device_profiles;
CHECK(tsl::protobuf::TextFormat::ParseFromString(
std::string(profiles_text_proto), &all_device_profiles));
for (const auto& device_profile : all_device_profiles.entries()) {
for (const auto& entry : device_profile.second.entries()) {
auto op_code = StringToHloOpcode(entry.instruction().opcode()).value();
auto element_type = entry.instruction().shape().element_type();
profiles_map[device_profile.first][std::make_pair(
op_code, element_type)] = entry.clock_cycles();
}
}
return absl::WrapUnique(
new HloOpProfiles(std::move(profiles_map), default_profile_name));
}
const HloOpProfiles::HloOpProfile& HloOpProfiles::GetProfile(
const se::DeviceDescription& device_info) const {
auto it = profiles_.find(GetProfileName(device_info));
if (it != profiles_.end()) return it->second;
return default_profile_;
}
}
} | #include "xla/service/gpu/model/hlo_op_profiles.h"
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
constexpr char kDeviceHloOpProfiles[] = R"pb(
entries {
key: "sm_90"
value {
entries {
instruction {
opcode: "divide"
shape { element_type: F32 }
}
clock_cycles: 32
}
}
}
entries {
key: "sm_80"
value {
entries {
instruction {
opcode: "multiply"
shape { element_type: F32 }
}
clock_cycles: 64
}
}
}
)pb";
using HloOpProfilesTest = ::testing::Test;
TEST_F(HloOpProfilesTest, GetProfile) {
auto hlo_op_profiles = HloOpProfiles::Load(kDeviceHloOpProfiles,
"sm_80");
auto device_info_sm_90 = TestGpuDeviceInfo::RTXA6000DeviceInfo(
stream_executor::CudaComputeCapability(9, 0));
const auto& op_profile = hlo_op_profiles->GetProfile(device_info_sm_90);
ASSERT_TRUE(op_profile.contains(
std::make_pair(HloOpcode::kDivide, PrimitiveType::F32)));
EXPECT_EQ(
op_profile.at(std::make_pair(HloOpcode::kDivide, PrimitiveType::F32)),
32);
}
TEST_F(HloOpProfilesTest, GetProfileDefault) {
auto hlo_op_profiles = HloOpProfiles::Load(kDeviceHloOpProfiles,
"sm_80");
auto device_info_sm_85 = TestGpuDeviceInfo::RTXA6000DeviceInfo(
stream_executor::CudaComputeCapability(8, 5));
const auto& op_profile = hlo_op_profiles->GetProfile(device_info_sm_85);
ASSERT_TRUE(op_profile.contains(
std::make_pair(HloOpcode::kMultiply, PrimitiveType::F32)));
EXPECT_EQ(
op_profile.at(std::make_pair(HloOpcode::kMultiply, PrimitiveType::F32)),
64);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/hlo_op_profiles.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/hlo_op_profiles_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f56698df-bac0-4617-9f71-d61e1f535cee | cpp | google/tensorstore | any_sender | tensorstore/util/execution/any_sender.h | tensorstore/util/execution/any_sender_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_ANY_SENDER_H_
#define TENSORSTORE_UTIL_EXECUTION_ANY_SENDER_H_
#include <utility>
#include "absl/base/attributes.h"
#include "tensorstore/internal/poly/poly.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender.h"
namespace tensorstore {
namespace internal_sender {
template <typename E, typename... V>
using SenderPoly =
poly::Poly<(sizeof(V) + ... + 0), false,
void(internal_execution::submit_t, AnyReceiver<E, V...>)>;
template <typename E, typename... V>
using FlowSenderPoly =
poly::Poly<(sizeof(V) + ... + 0), false,
void(internal_execution::submit_t, AnyFlowReceiver<E, V...>)>;
}
template <typename E, typename... V>
class AnySender : public internal_sender::SenderPoly<E, V...> {
using Base = internal_sender::SenderPoly<E, V...>;
public:
using Base::Base;
AnySender() : Base(NullSender{}) {}
ABSL_ATTRIBUTE_ALWAYS_INLINE void submit(AnyReceiver<E, V...> receiver) {
(*this)(internal_execution::submit_t{}, std::move(receiver));
}
};
template <typename E, typename... V>
class AnyFlowSender : public internal_sender::FlowSenderPoly<E, V...> {
using Base = internal_sender::FlowSenderPoly<E, V...>;
public:
using Base::Base;
AnyFlowSender() : Base(NullSender{}) {}
ABSL_ATTRIBUTE_ALWAYS_INLINE void submit(AnyFlowReceiver<E, V...> receiver) {
(*this)(internal_execution::submit_t{}, std::move(receiver));
}
};
}
#endif | #include "tensorstore/util/execution/any_sender.h"
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/executor.h"
namespace {
TEST(AnySenderTest, Construct) {
tensorstore::AnySender<int, std::string> sender(tensorstore::CancelSender{});
}
TEST(AnySenderTest, Assignment) {
tensorstore::AnySender<int, std::string> sender;
sender = tensorstore::CancelSender{};
}
TEST(AnySenderTest, Submit) {
tensorstore::AnySender<int, std::string> sender;
tensorstore::execution::submit(
tensorstore::AnySender<int>(tensorstore::NullSender{}),
tensorstore::NullReceiver{});
}
TEST(AnySenderTest, CancelSender) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<int>(tensorstore::CancelSender{}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_cancel"));
}
TEST(AnySenderTest, ErrorSender) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<int>(tensorstore::ErrorSender<int>{3}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_error: 3"));
}
TEST(AnySenderTest, ValueSender) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<int, int, std::string>(
tensorstore::ValueSender<int, std::string>{3, "hello"}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_value: 3, hello"));
}
template <typename Sender, typename Executor>
struct SenderWithExecutor {
Executor executor;
Sender sender;
template <typename Receiver>
void submit(Receiver receiver) {
struct Callback {
Sender sender;
Receiver receiver;
void operator()() {
tensorstore::execution::submit(sender, std::move(receiver));
}
};
executor(Callback{std::move(sender), std::move(receiver)});
}
};
struct QueueExecutor {
std::vector<tensorstore::ExecutorTask>* queue;
void operator()(tensorstore::ExecutorTask task) const {
queue->push_back(std::move(task));
}
};
TEST(AnySenderWithExecutor, SetValue) {
std::vector<tensorstore::ExecutorTask> queue;
std::vector<std::string> log;
QueueExecutor executor{&queue};
tensorstore::execution::submit(
tensorstore::AnySender<int, int, std::string>(
SenderWithExecutor<tensorstore::ValueSender<int, std::string>,
tensorstore::Executor>{executor, {3, "hello"}}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_EQ(1, queue.size());
std::move(queue[0])();
EXPECT_THAT(log, ::testing::ElementsAre("set_value: 3, hello"));
}
TEST(AnySenderWithExecutor, SetCancel) {
std::vector<tensorstore::ExecutorTask> queue;
std::vector<std::string> log;
QueueExecutor executor{&queue};
tensorstore::execution::submit(
tensorstore::AnySender<int>(
SenderWithExecutor<tensorstore::CancelSender, tensorstore::Executor>{
executor}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_EQ(1, queue.size());
std::move(queue[0])();
EXPECT_THAT(log, ::testing::ElementsAre("set_cancel"));
}
TEST(AnySenderWithExecutor, SetError) {
std::vector<tensorstore::ExecutorTask> queue;
std::vector<std::string> log;
QueueExecutor executor{&queue};
tensorstore::execution::submit(
tensorstore::AnySender<int>(
SenderWithExecutor<tensorstore::ErrorSender<int>,
tensorstore::Executor>{executor, {3}}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_EQ(1, queue.size());
std::move(queue[0])();
EXPECT_THAT(log, ::testing::ElementsAre("set_error: 3"));
}
TEST(AnyFlowSenderTest, Construct) {
tensorstore::AnyFlowSender<int, std::string> sender(
tensorstore::NullSender{});
}
TEST(AnyFlowSenderTest, Assignment) {
tensorstore::AnyFlowSender<int, std::string> sender;
sender = tensorstore::NullSender{};
}
TEST(AnyFlowSenderTest, Submit) {
tensorstore::AnyFlowSender<int, std::string> sender;
tensorstore::execution::submit(std::move(sender),
tensorstore::NullReceiver{});
}
TEST(AnyFlowSenderTest, ValueSender) {
std::vector<std::string> log;
tensorstore::AnyFlowSender<int, std::string> sender(
tensorstore::ValueSender("A"));
tensorstore::execution::submit(std::move(sender),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_value: A"));
}
TEST(AnyFlowSenderTest, ErrorSender) {
std::vector<std::string> log;
tensorstore::AnyFlowSender<int, std::string> sender(
tensorstore::ErrorSender<int>{4});
tensorstore::execution::submit(std::move(sender),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_error: 4"));
}
struct MySender {
template <typename Receiver>
void submit(Receiver receiver) {
tensorstore::execution::set_starting(receiver, []() {});
tensorstore::execution::set_value(receiver, "B");
tensorstore::execution::set_value(receiver, "C");
tensorstore::execution::set_done(receiver);
tensorstore::execution::set_stopping(receiver);
}
};
TEST(AnyFlowSenderTest, MySender) {
std::vector<std::string> log;
tensorstore::AnyFlowSender<int, std::string> sender(MySender{});
tensorstore::execution::submit(std::move(sender),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(
log, ::testing::ElementsAre("set_starting", "set_value: B",
"set_value: C", "set_done", "set_stopping"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/any_sender.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/any_sender_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f82ffd9e-8b2e-4ecd-b255-4dcf792fe58e | cpp | abseil/abseil-cpp | container | absl/algorithm/container.h | absl/algorithm/container_test.cc | #ifndef ABSL_ALGORITHM_CONTAINER_H_
#define ABSL_ALGORITHM_CONTAINER_H_
#include <algorithm>
#include <cassert>
#include <iterator>
#include <numeric>
#include <random>
#include <type_traits>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/algorithm/algorithm.h"
#include "absl/base/config.h"
#include "absl/base/macros.h"
#include "absl/base/nullability.h"
#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_algorithm_internal {
using std::begin;
using std::end;
template <typename C>
using ContainerIter = decltype(begin(std::declval<C&>()));
template <typename C1, typename C2>
using ContainerIterPairType =
decltype(std::make_pair(ContainerIter<C1>(), ContainerIter<C2>()));
template <typename C>
using ContainerDifferenceType = decltype(std::distance(
std::declval<ContainerIter<C>>(), std::declval<ContainerIter<C>>()));
template <typename C>
using ContainerPointerType =
typename std::iterator_traits<ContainerIter<C>>::pointer;
template <typename C>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17 ContainerIter<C> c_begin(C& c) {
return begin(c);
}
template <typename C>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17 ContainerIter<C> c_end(C& c) {
return end(c);
}
template <typename T>
struct IsUnorderedContainer : std::false_type {};
template <class Key, class T, class Hash, class KeyEqual, class Allocator>
struct IsUnorderedContainer<
std::unordered_map<Key, T, Hash, KeyEqual, Allocator>> : std::true_type {};
template <class Key, class Hash, class KeyEqual, class Allocator>
struct IsUnorderedContainer<std::unordered_set<Key, Hash, KeyEqual, Allocator>>
: std::true_type {};
}
template <typename C, typename EqualityComparable>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_linear_search(
const C& c, EqualityComparable&& value) {
return absl::linear_search(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<EqualityComparable>(value));
}
template <typename C>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
container_algorithm_internal::ContainerDifferenceType<const C>
c_distance(const C& c) {
return std::distance(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c));
}
template <typename C, typename Pred>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_all_of(const C& c, Pred&& pred) {
return std::all_of(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Pred>(pred));
}
template <typename C, typename Pred>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_any_of(const C& c, Pred&& pred) {
return std::any_of(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Pred>(pred));
}
template <typename C, typename Pred>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_none_of(const C& c, Pred&& pred) {
return std::none_of(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Pred>(pred));
}
template <typename C, typename Function>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 decay_t<Function> c_for_each(C&& c,
Function&& f) {
return std::for_each(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Function>(f));
}
template <typename C, typename T>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<C>
c_find(C& c, T&& value) {
return std::find(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<T>(value));
}
template <typename Sequence, typename T>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_contains(const Sequence& sequence,
T&& value) {
return absl::c_find(sequence, std::forward<T>(value)) !=
container_algorithm_internal::c_end(sequence);
}
template <typename C, typename Pred>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<C>
c_find_if(C& c, Pred&& pred) {
return std::find_if(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Pred>(pred));
}
template <typename C, typename Pred>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<C>
c_find_if_not(C& c, Pred&& pred) {
return std::find_if_not(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Pred>(pred));
}
template <typename Sequence1, typename Sequence2>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<Sequence1>
c_find_end(Sequence1& sequence, Sequence2& subsequence) {
return std::find_end(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
container_algorithm_internal::c_begin(subsequence),
container_algorithm_internal::c_end(subsequence));
}
template <typename Sequence1, typename Sequence2, typename BinaryPredicate>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<Sequence1>
c_find_end(Sequence1& sequence, Sequence2& subsequence,
BinaryPredicate&& pred) {
return std::find_end(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
container_algorithm_internal::c_begin(subsequence),
container_algorithm_internal::c_end(subsequence),
std::forward<BinaryPredicate>(pred));
}
template <typename C1, typename C2>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<C1>
c_find_first_of(C1& container, C2& options) {
return std::find_first_of(container_algorithm_internal::c_begin(container),
container_algorithm_internal::c_end(container),
container_algorithm_internal::c_begin(options),
container_algorithm_internal::c_end(options));
}
template <typename C1, typename C2, typename BinaryPredicate>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<C1>
c_find_first_of(C1& container, C2& options, BinaryPredicate&& pred) {
return std::find_first_of(container_algorithm_internal::c_begin(container),
container_algorithm_internal::c_end(container),
container_algorithm_internal::c_begin(options),
container_algorithm_internal::c_end(options),
std::forward<BinaryPredicate>(pred));
}
template <typename Sequence>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<Sequence>
c_adjacent_find(Sequence& sequence) {
return std::adjacent_find(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence));
}
template <typename Sequence, typename BinaryPredicate>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<Sequence>
c_adjacent_find(Sequence& sequence, BinaryPredicate&& pred) {
return std::adjacent_find(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
std::forward<BinaryPredicate>(pred));
}
template <typename C, typename T>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerDifferenceType<const C>
c_count(const C& c, T&& value) {
return std::count(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<T>(value));
}
template <typename C, typename Pred>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerDifferenceType<const C>
c_count_if(const C& c, Pred&& pred) {
return std::count_if(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Pred>(pred));
}
template <typename C1, typename C2>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIterPairType<C1, C2>
c_mismatch(C1& c1, C2& c2) {
return std::mismatch(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2));
}
template <typename C1, typename C2, typename BinaryPredicate>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIterPairType<C1, C2>
c_mismatch(C1& c1, C2& c2, BinaryPredicate pred) {
return std::mismatch(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2), pred);
}
template <typename C1, typename C2>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_equal(const C1& c1, const C2& c2) {
return std::equal(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2));
}
template <typename C1, typename C2, typename BinaryPredicate>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_equal(const C1& c1, const C2& c2,
BinaryPredicate&& pred) {
return std::equal(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2),
std::forward<BinaryPredicate>(pred));
}
template <typename C1, typename C2>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_is_permutation(const C1& c1,
const C2& c2) {
return std::is_permutation(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2));
}
template <typename C1, typename C2, typename BinaryPredicate>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_is_permutation(
const C1& c1, const C2& c2, BinaryPredicate&& pred) {
return std::is_permutation(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2),
std::forward<BinaryPredicate>(pred));
}
template <typename Sequence1, typename Sequence2>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<Sequence1>
c_search(Sequence1& sequence, Sequence2& subsequence) {
return std::search(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
container_algorithm_internal::c_begin(subsequence),
container_algorithm_internal::c_end(subsequence));
}
template <typename Sequence1, typename Sequence2, typename BinaryPredicate>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<Sequence1>
c_search(Sequence1& sequence, Sequence2& subsequence,
BinaryPredicate&& pred) {
return std::search(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
container_algorithm_internal::c_begin(subsequence),
container_algorithm_internal::c_end(subsequence),
std::forward<BinaryPredicate>(pred));
}
template <typename Sequence1, typename Sequence2>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_contains_subrange(
Sequence1& sequence, Sequence2& subsequence) {
return absl::c_search(sequence, subsequence) !=
container_algorithm_internal::c_end(sequence);
}
template <typename Sequence1, typename Sequence2, typename BinaryPredicate>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_contains_subrange(
Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) {
return absl::c_search(sequence, subsequence,
std::forward<BinaryPredicate>(pred)) !=
container_algorithm_internal::c_end(sequence);
}
template <typename Sequence, typename Size, typename T>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<Sequence>
c_search_n(Sequence& sequence, Size count, T&& value) {
return std::search_n(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence), count,
std::forward<T>(value));
}
template <typename Sequence, typename Size, typename T,
typename BinaryPredicate>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<Sequence>
c_search_n(Sequence& sequence, Size count, T&& value,
BinaryPredicate&& pred) {
return std::search_n(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence), count,
std::forward<T>(value),
std::forward<BinaryPredicate>(pred));
}
template <typename InputSequence, typename OutputIterator>
OutputIterator c_copy(const InputSequence& input, OutputIterator output) {
return std::copy(container_algorithm_internal::c_begin(input),
container_algorithm_internal::c_end(input), output);
}
template <typename C, typename Size, typename OutputIterator>
OutputIterator c_copy_n(const C& input, Size n, OutputIterator output) {
return std::copy_n(container_algorithm_internal::c_begin(input), n, output);
}
template <typename InputSequence, typename OutputIterator, typename Pred>
OutputIterator c_copy_if(const InputSequence& input, OutputIterator output,
Pred&& pred) {
return std::copy_if(container_algorithm_internal::c_begin(input),
container_algorithm_internal::c_end(input), output,
std::forward<Pred>(pred));
}
template <typename C, typename BidirectionalIterator>
BidirectionalIterator c_copy_backward(const C& src,
BidirectionalIterator dest) {
return std::copy_backward(container_algorithm_internal::c_begin(src),
container_algorithm_internal::c_end(src), dest);
}
template <typename C, typename OutputIterator>
OutputIterator c_move(C&& src, OutputIterator dest) {
return std::move(container_algorithm_internal::c_begin(src),
container_algorithm_internal::c_end(src), dest);
}
template <typename C, typename BidirectionalIterator>
BidirectionalIterator c_move_backward(C&& src, BidirectionalIterator dest) {
return std::move_backward(container_algorithm_internal::c_begin(src),
container_algorithm_internal::c_end(src), dest);
}
template <typename C1, typename C2>
container_algorithm_internal::ContainerIter<C2> c_swap_ranges(C1& c1, C2& c2) {
auto first1 = container_algorithm_internal::c_begin(c1);
auto last1 = container_algorithm_internal::c_end(c1);
auto first2 = container_algorithm_internal::c_begin(c2);
auto last2 = container_algorithm_internal::c_end(c2);
using std::swap;
for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) {
swap(*first1, *first2);
}
return first2;
}
template <typename InputSequence, typename OutputIterator, typename UnaryOp>
OutputIterator c_transform(const InputSequence& input, OutputIterator output,
UnaryOp&& unary_op) {
return std::transform(container_algorithm_internal::c_begin(input),
container_algorithm_internal::c_end(input), output,
std::forward<UnaryOp>(unary_op));
}
template <typename InputSequence1, typename InputSequence2,
typename OutputIterator, typename BinaryOp>
OutputIterator c_transform(const InputSequence1& input1,
const InputSequence2& input2, OutputIterator output,
BinaryOp&& binary_op) {
auto first1 = container_algorithm_internal::c_begin(input1);
auto last1 = container_algorithm_internal::c_end(input1);
auto first2 = container_algorithm_internal::c_begin(input2);
auto last2 = container_algorithm_internal::c_end(input2);
for (; first1 != last1 && first2 != last2;
++first1, (void)++first2, ++output) {
*output = binary_op(*first1, *first2);
}
return output;
}
template <typename Sequence, typename T>
void c_replace(Sequence& sequence, const T& old_value, const T& new_value) {
std::replace(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence), old_value,
new_value);
}
template <typename C, typename Pred, typename T>
void c_replace_if(C& c, Pred&& pred, T&& new_value) {
std::replace_if(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Pred>(pred), std::forward<T>(new_value));
}
template <typename C, typename OutputIterator, typename T>
OutputIterator c_replace_copy(const C& c, OutputIterator result, T&& old_value,
T&& new_value) {
return std::replace_copy(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c), result,
std::forward<T>(old_value),
std::forward<T>(new_value));
}
template <typename C, typename OutputIterator, typename Pred, typename T>
OutputIterator c_replace_copy_if(const C& c, OutputIterator result, Pred&& pred,
const T& new_value) {
return std::replace_copy_if(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c), result,
std::forward<Pred>(pred), new_value);
}
template <typename C, typename T>
void c_fill(C& c, const T& value) {
std::fill(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c), value);
}
template <typename C, typename Size, typename T>
void c_fill_n(C& c, Size n, const T& value) {
std::fill_n(container_algorithm_internal::c_begin(c), n, value);
}
template <typename C, typename Generator>
void c_generate(C& c, Generator&& gen) {
std::generate(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Generator>(gen));
}
template <typename C, typename Size, typename Generator>
container_algorithm_internal::ContainerIter<C> c_generate_n(C& c, Size n,
Generator&& gen) {
return std::generate_n(container_algorithm_internal::c_begin(c), n,
std::forward<Generator>(gen));
}
template <typename C, typename OutputIterator, typename T>
OutputIterator c_remove_copy(const C& c, OutputIterator result,
const T& value) {
return std::remove_copy(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c), result,
value);
}
template <typename C, typename OutputIterator, typename Pred>
OutputIterator c_remove_copy_if(const C& c, OutputIterator result,
Pred&& pred) {
return std::remove_copy_if(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c), result,
std::forward<Pred>(pred));
}
template <typename C, typename OutputIterator>
OutputIterator c_unique_copy(const C& c, OutputIterator result) {
return std::unique_copy(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c), result);
}
template <typename C, typename OutputIterator, typename BinaryPredicate>
OutputIterator c_unique_copy(const C& c, OutputIterator result,
BinaryPredicate&& pred) {
return std::unique_copy(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c), result,
std::forward<BinaryPredicate>(pred));
}
template <typename Sequence>
void c_reverse(Sequence& sequence) {
std::reverse(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence));
}
template <typename C, typename OutputIterator>
OutputIterator c_reverse_copy(const C& sequence, OutputIterator result) {
return std::reverse_copy(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
result);
}
template <typename C,
typename Iterator = container_algorithm_internal::ContainerIter<C>>
Iterator c_rotate(C& sequence, Iterator middle) {
return absl::rotate(container_algorithm_internal::c_begin(sequence), middle,
container_algorithm_internal::c_end(sequence));
}
template <typename C, typename OutputIterator>
OutputIterator c_rotate_copy(
const C& sequence,
container_algorithm_internal::ContainerIter<const C> middle,
OutputIterator result) {
return std::rotate_copy(container_algorithm_internal::c_begin(sequence),
middle, container_algorithm_internal::c_end(sequence),
result);
}
template <typename RandomAccessContainer, typename UniformRandomBitGenerator>
void c_shuffle(RandomAccessContainer& c, UniformRandomBitGenerator&& gen) {
std::shuffle(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<UniformRandomBitGenerator>(gen));
}
template <typename C, typename OutputIterator, typename Distance,
typename UniformRandomBitGenerator>
OutputIterator c_sample(const C& c, OutputIterator result, Distance n,
UniformRandomBitGenerator&& gen) {
#if defined(__cpp_lib_sample) && __cpp_lib_sample >= 201603L
return std::sample(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c), result, n,
std::forward<UniformRandomBitGenerator>(gen));
#else
auto first = container_algorithm_internal::c_begin(c);
Distance unsampled_elements = c_distance(c);
n = (std::min)(n, unsampled_elements);
for (; n != 0; ++first) {
Distance r =
std::uniform_int_distribution<Distance>(0, --unsampled_elements)(gen);
if (r < n) {
*result++ = *first;
--n;
}
}
return result;
#endif
}
template <typename C, typename Pred>
bool c_is_partitioned(const C& c, Pred&& pred) {
return std::is_partitioned(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Pred>(pred));
}
template <typename C, typename Pred>
container_algorithm_internal::ContainerIter<C> c_partition(C& c, Pred&& pred) {
return std::partition(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Pred>(pred));
}
template <typename C, typename Pred>
container_algorithm_internal::ContainerIter<C> c_stable_partition(C& c,
Pred&& pred) {
return std::stable_partition(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Pred>(pred));
}
template <typename C, typename OutputIterator1, typename OutputIterator2,
typename Pred>
std::pair<OutputIterator1, OutputIterator2> c_partition_copy(
const C& c, OutputIterator1 out_true, OutputIterator2 out_false,
Pred&& pred) {
return std::partition_copy(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c), out_true,
out_false, std::forward<Pred>(pred));
}
template <typename C, typename Pred>
container_algorithm_internal::ContainerIter<C> c_partition_point(C& c,
Pred&& pred) {
return std::partition_point(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Pred>(pred));
}
template <typename C>
void c_sort(C& c) {
std::sort(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c));
}
template <typename C, typename LessThan>
void c_sort(C& c, LessThan&& comp) {
std::sort(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<LessThan>(comp));
}
template <typename C>
void c_stable_sort(C& c) {
std::stable_sort(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c));
}
template <typename C, typename LessThan>
void c_stable_sort(C& c, LessThan&& comp) {
std::stable_sort(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<LessThan>(comp));
}
template <typename C>
bool c_is_sorted(const C& c) {
return std::is_sorted(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c));
}
template <typename C, typename LessThan>
bool c_is_sorted(const C& c, LessThan&& comp) {
return std::is_sorted(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<LessThan>(comp));
}
template <typename RandomAccessContainer>
void c_partial_sort(
RandomAccessContainer& sequence,
container_algorithm_internal::ContainerIter<RandomAccessContainer> middle) {
std::partial_sort(container_algorithm_internal::c_begin(sequence), middle,
container_algorithm_internal::c_end(sequence));
}
template <typename RandomAccessContainer, typename LessThan>
void c_partial_sort(
RandomAccessContainer& sequence,
container_algorithm_internal::ContainerIter<RandomAccessContainer> middle,
LessThan&& comp) {
std::partial_sort(container_algorithm_internal::c_begin(sequence), middle,
container_algorithm_internal::c_end(sequence),
std::forward<LessThan>(comp));
}
template <typename C, typename RandomAccessContainer>
container_algorithm_internal::ContainerIter<RandomAccessContainer>
c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) {
return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
container_algorithm_internal::c_begin(result),
container_algorithm_internal::c_end(result));
}
template <typename C, typename RandomAccessContainer, typename LessThan>
container_algorithm_internal::ContainerIter<RandomAccessContainer>
c_partial_sort_copy(const C& sequence, RandomAccessContainer& result,
LessThan&& comp) {
return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
container_algorithm_internal::c_begin(result),
container_algorithm_internal::c_end(result),
std::forward<LessThan>(comp));
}
template <typename C>
container_algorithm_internal::ContainerIter<C> c_is_sorted_until(C& c) {
return std::is_sorted_until(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c));
}
template <typename C, typename LessThan>
container_algorithm_internal::ContainerIter<C> c_is_sorted_until(
C& c, LessThan&& comp) {
return std::is_sorted_until(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<LessThan>(comp));
}
template <typename RandomAccessContainer>
void c_nth_element(
RandomAccessContainer& sequence,
container_algorithm_internal::ContainerIter<RandomAccessContainer> nth) {
std::nth_element(container_algorithm_internal::c_begin(sequence), nth,
container_algorithm_internal::c_end(sequence));
}
template <typename RandomAccessContainer, typename LessThan>
void c_nth_element(
RandomAccessContainer& sequence,
container_algorithm_internal::ContainerIter<RandomAccessContainer> nth,
LessThan&& comp) {
std::nth_element(container_algorithm_internal::c_begin(sequence), nth,
container_algorithm_internal::c_end(sequence),
std::forward<LessThan>(comp));
}
template <typename Sequence, typename T>
container_algorithm_internal::ContainerIter<Sequence> c_lower_bound(
Sequence& sequence, const T& value) {
return std::lower_bound(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence), value);
}
template <typename Sequence, typename T, typename LessThan>
container_algorithm_internal::ContainerIter<Sequence> c_lower_bound(
Sequence& sequence, const T& value, LessThan&& comp) {
return std::lower_bound(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence), value,
std::forward<LessThan>(comp));
}
template <typename Sequence, typename T>
container_algorithm_internal::ContainerIter<Sequence> c_upper_bound(
Sequence& sequence, const T& value) {
return std::upper_bound(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence), value);
}
template <typename Sequence, typename T, typename LessThan>
container_algorithm_internal::ContainerIter<Sequence> c_upper_bound(
Sequence& sequence, const T& value, LessThan&& comp) {
return std::upper_bound(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence), value,
std::forward<LessThan>(comp));
}
template <typename Sequence, typename T>
container_algorithm_internal::ContainerIterPairType<Sequence, Sequence>
c_equal_range(Sequence& sequence, const T& value) {
return std::equal_range(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence), value);
}
template <typename Sequence, typename T, typename LessThan>
container_algorithm_internal::ContainerIterPairType<Sequence, Sequence>
c_equal_range(Sequence& sequence, const T& value, LessThan&& comp) {
return std::equal_range(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence), value,
std::forward<LessThan>(comp));
}
template <typename Sequence, typename T>
bool c_binary_search(const Sequence& sequence, const T& value) {
return std::binary_search(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
value);
}
template <typename Sequence, typename T, typename LessThan>
bool c_binary_search(const Sequence& sequence, const T& value,
LessThan&& comp) {
return std::binary_search(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
value, std::forward<LessThan>(comp));
}
template <typename C1, typename C2, typename OutputIterator>
OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result) {
return std::merge(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2), result);
}
template <typename C1, typename C2, typename OutputIterator, typename LessThan>
OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result,
LessThan&& comp) {
return std::merge(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2), result,
std::forward<LessThan>(comp));
}
template <typename C>
void c_inplace_merge(C& c,
container_algorithm_internal::ContainerIter<C> middle) {
std::inplace_merge(container_algorithm_internal::c_begin(c), middle,
container_algorithm_internal::c_end(c));
}
template <typename C, typename LessThan>
void c_inplace_merge(C& c,
container_algorithm_internal::ContainerIter<C> middle,
LessThan&& comp) {
std::inplace_merge(container_algorithm_internal::c_begin(c), middle,
container_algorithm_internal::c_end(c),
std::forward<LessThan>(comp));
}
template <typename C1, typename C2>
bool c_includes(const C1& c1, const C2& c2) {
return std::includes(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2));
}
template <typename C1, typename C2, typename LessThan>
bool c_includes(const C1& c1, const C2& c2, LessThan&& comp) {
return std::includes(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2),
std::forward<LessThan>(comp));
}
template <typename C1, typename C2, typename OutputIterator,
typename = typename std::enable_if<
!container_algorithm_internal::IsUnorderedContainer<C1>::value,
void>::type,
typename = typename std::enable_if<
!container_algorithm_internal::IsUnorderedContainer<C2>::value,
void>::type>
OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output) {
return std::set_union(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2), output);
}
template <typename C1, typename C2, typename OutputIterator, typename LessThan,
typename = typename std::enable_if<
!container_algorithm_internal::IsUnorderedContainer<C1>::value,
void>::type,
typename = typename std::enable_if<
!container_algorithm_internal::IsUnorderedContainer<C2>::value,
void>::type>
OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output,
LessThan&& comp) {
return std::set_union(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2), output,
std::forward<LessThan>(comp));
}
template <typename C1, typename C2, typename OutputIterator,
typename = typename std::enable_if<
!container_algorithm_internal::IsUnorderedContainer<C1>::value,
void>::type,
typename = typename std::enable_if<
!container_algorithm_internal::IsUnorderedContainer<C2>::value,
void>::type>
OutputIterator c_set_intersection(const C1& c1, const C2& c2,
OutputIterator output) {
assert(absl::c_is_sorted(c1));
assert(absl::c_is_sorted(c2));
return std::set_intersection(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2), output);
}
template <typename C1, typename C2, typename OutputIterator, typename LessThan,
typename = typename std::enable_if<
!container_algorithm_internal::IsUnorderedContainer<C1>::value,
void>::type,
typename = typename std::enable_if<
!container_algorithm_internal::IsUnorderedContainer<C2>::value,
void>::type>
OutputIterator c_set_intersection(const C1& c1, const C2& c2,
OutputIterator output, LessThan&& comp) {
assert(absl::c_is_sorted(c1, comp));
assert(absl::c_is_sorted(c2, comp));
return std::set_intersection(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2), output,
std::forward<LessThan>(comp));
}
template <typename C1, typename C2, typename OutputIterator,
typename = typename std::enable_if<
!container_algorithm_internal::IsUnorderedContainer<C1>::value,
void>::type,
typename = typename std::enable_if<
!container_algorithm_internal::IsUnorderedContainer<C2>::value,
void>::type>
OutputIterator c_set_difference(const C1& c1, const C2& c2,
OutputIterator output) {
return std::set_difference(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2), output);
}
template <typename C1, typename C2, typename OutputIterator, typename LessThan,
typename = typename std::enable_if<
!container_algorithm_internal::IsUnorderedContainer<C1>::value,
void>::type,
typename = typename std::enable_if<
!container_algorithm_internal::IsUnorderedContainer<C2>::value,
void>::type>
OutputIterator c_set_difference(const C1& c1, const C2& c2,
OutputIterator output, LessThan&& comp) {
return std::set_difference(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2), output,
std::forward<LessThan>(comp));
}
template <typename C1, typename C2, typename OutputIterator,
typename = typename std::enable_if<
!container_algorithm_internal::IsUnorderedContainer<C1>::value,
void>::type,
typename = typename std::enable_if<
!container_algorithm_internal::IsUnorderedContainer<C2>::value,
void>::type>
OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2,
OutputIterator output) {
return std::set_symmetric_difference(
container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2), output);
}
template <typename C1, typename C2, typename OutputIterator, typename LessThan,
typename = typename std::enable_if<
!container_algorithm_internal::IsUnorderedContainer<C1>::value,
void>::type,
typename = typename std::enable_if<
!container_algorithm_internal::IsUnorderedContainer<C2>::value,
void>::type>
OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2,
OutputIterator output,
LessThan&& comp) {
return std::set_symmetric_difference(
container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
container_algorithm_internal::c_end(c2), output,
std::forward<LessThan>(comp));
}
template <typename RandomAccessContainer>
void c_push_heap(RandomAccessContainer& sequence) {
std::push_heap(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence));
}
template <typename RandomAccessContainer, typename LessThan>
void c_push_heap(RandomAccessContainer& sequence, LessThan&& comp) {
std::push_heap(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
std::forward<LessThan>(comp));
}
template <typename RandomAccessContainer>
void c_pop_heap(RandomAccessContainer& sequence) {
std::pop_heap(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence));
}
template <typename RandomAccessContainer, typename LessThan>
void c_pop_heap(RandomAccessContainer& sequence, LessThan&& comp) {
std::pop_heap(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
std::forward<LessThan>(comp));
}
template <typename RandomAccessContainer>
void c_make_heap(RandomAccessContainer& sequence) {
std::make_heap(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence));
}
template <typename RandomAccessContainer, typename LessThan>
void c_make_heap(RandomAccessContainer& sequence, LessThan&& comp) {
std::make_heap(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
std::forward<LessThan>(comp));
}
template <typename RandomAccessContainer>
void c_sort_heap(RandomAccessContainer& sequence) {
std::sort_heap(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence));
}
template <typename RandomAccessContainer, typename LessThan>
void c_sort_heap(RandomAccessContainer& sequence, LessThan&& comp) {
std::sort_heap(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
std::forward<LessThan>(comp));
}
template <typename RandomAccessContainer>
bool c_is_heap(const RandomAccessContainer& sequence) {
return std::is_heap(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence));
}
template <typename RandomAccessContainer, typename LessThan>
bool c_is_heap(const RandomAccessContainer& sequence, LessThan&& comp) {
return std::is_heap(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
std::forward<LessThan>(comp));
}
template <typename RandomAccessContainer>
container_algorithm_internal::ContainerIter<RandomAccessContainer>
c_is_heap_until(RandomAccessContainer& sequence) {
return std::is_heap_until(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence));
}
template <typename RandomAccessContainer, typename LessThan>
container_algorithm_internal::ContainerIter<RandomAccessContainer>
c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) {
return std::is_heap_until(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
std::forward<LessThan>(comp));
}
template <typename Sequence>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
container_algorithm_internal::ContainerIter<Sequence>
c_min_element(Sequence& sequence) {
return std::min_element(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence));
}
template <typename Sequence, typename LessThan>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
container_algorithm_internal::ContainerIter<Sequence>
c_min_element(Sequence& sequence, LessThan&& comp) {
return std::min_element(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
std::forward<LessThan>(comp));
}
template <typename Sequence>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
container_algorithm_internal::ContainerIter<Sequence>
c_max_element(Sequence& sequence) {
return std::max_element(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence));
}
template <typename Sequence, typename LessThan>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
container_algorithm_internal::ContainerIter<Sequence>
c_max_element(Sequence& sequence, LessThan&& comp) {
return std::max_element(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
std::forward<LessThan>(comp));
}
template <typename C>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
container_algorithm_internal::ContainerIterPairType<C, C>
c_minmax_element(C& c) {
return std::minmax_element(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c));
}
template <typename C, typename LessThan>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
container_algorithm_internal::ContainerIterPairType<C, C>
c_minmax_element(C& c, LessThan&& comp) {
return std::minmax_element(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<LessThan>(comp));
}
template <typename Sequence1, typename Sequence2>
bool c_lexicographical_compare(const Sequence1& sequence1,
const Sequence2& sequence2) {
return std::lexicographical_compare(
container_algorithm_internal::c_begin(sequence1),
container_algorithm_internal::c_end(sequence1),
container_algorithm_internal::c_begin(sequence2),
container_algorithm_internal::c_end(sequence2));
}
template <typename Sequence1, typename Sequence2, typename LessThan>
bool c_lexicographical_compare(const Sequence1& sequence1,
const Sequence2& sequence2, LessThan&& comp) {
return std::lexicographical_compare(
container_algorithm_internal::c_begin(sequence1),
container_algorithm_internal::c_end(sequence1),
container_algorithm_internal::c_begin(sequence2),
container_algorithm_internal::c_end(sequence2),
std::forward<LessThan>(comp));
}
template <typename C>
bool c_next_permutation(C& c) {
return std::next_permutation(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c));
}
template <typename C, typename LessThan>
bool c_next_permutation(C& c, LessThan&& comp) {
return std::next_permutation(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<LessThan>(comp));
}
template <typename C>
bool c_prev_permutation(C& c) {
return std::prev_permutation(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c));
}
template <typename C, typename LessThan>
bool c_prev_permutation(C& c, LessThan&& comp) {
return std::prev_permutation(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<LessThan>(comp));
}
template <typename Sequence, typename T>
void c_iota(Sequence& sequence, const T& value) {
std::iota(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence), value);
}
template <typename Sequence, typename T>
decay_t<T> c_accumulate(const Sequence& sequence, T&& init) {
return std::accumulate(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
std::forward<T>(init));
}
template <typename Sequence, typename T, typename BinaryOp>
decay_t<T> c_accumulate(const Sequence& sequence, T&& init,
BinaryOp&& binary_op) {
return std::accumulate(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
std::forward<T>(init),
std::forward<BinaryOp>(binary_op));
}
template <typename Sequence1, typename Sequence2, typename T>
decay_t<T> c_inner_product(const Sequence1& factors1, const Sequence2& factors2,
T&& sum) {
return std::inner_product(container_algorithm_internal::c_begin(factors1),
container_algorithm_internal::c_end(factors1),
container_algorithm_internal::c_begin(factors2),
std::forward<T>(sum));
}
template <typename Sequence1, typename Sequence2, typename T,
typename BinaryOp1, typename BinaryOp2>
decay_t<T> c_inner_product(const Sequence1& factors1, const Sequence2& factors2,
T&& sum, BinaryOp1&& op1, BinaryOp2&& op2) {
return std::inner_product(container_algorithm_internal::c_begin(factors1),
container_algorithm_internal::c_end(factors1),
container_algorithm_internal::c_begin(factors2),
std::forward<T>(sum), std::forward<BinaryOp1>(op1),
std::forward<BinaryOp2>(op2));
}
template <typename InputSequence, typename OutputIt>
OutputIt c_adjacent_difference(const InputSequence& input,
OutputIt output_first) {
return std::adjacent_difference(container_algorithm_internal::c_begin(input),
container_algorithm_internal::c_end(input),
output_first);
}
template <typename InputSequence, typename OutputIt, typename BinaryOp>
OutputIt c_adjacent_difference(const InputSequence& input,
OutputIt output_first, BinaryOp&& op) {
return std::adjacent_difference(container_algorithm_internal::c_begin(input),
container_algorithm_internal::c_end(input),
output_first, std::forward<BinaryOp>(op));
}
template <typename InputSequence, typename OutputIt>
OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first) {
return std::partial_sum(container_algorithm_internal::c_begin(input),
container_algorithm_internal::c_end(input),
output_first);
}
template <typename InputSequence, typename OutputIt, typename BinaryOp>
OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first,
BinaryOp&& op) {
return std::partial_sum(container_algorithm_internal::c_begin(input),
container_algorithm_internal::c_end(input),
output_first, std::forward<BinaryOp>(op));
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/algorithm/container.h"
#include <algorithm>
#include <array>
#include <functional>
#include <initializer_list>
#include <iterator>
#include <list>
#include <memory>
#include <ostream>
#include <random>
#include <set>
#include <unordered_set>
#include <utility>
#include <valarray>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/casts.h"
#include "absl/base/config.h"
#include "absl/base/macros.h"
#include "absl/memory/memory.h"
#include "absl/types/span.h"
namespace {
using ::testing::Each;
using ::testing::ElementsAre;
using ::testing::Gt;
using ::testing::IsNull;
using ::testing::IsSubsetOf;
using ::testing::Lt;
using ::testing::Pointee;
using ::testing::SizeIs;
using ::testing::Truly;
using ::testing::UnorderedElementsAre;
class NonMutatingTest : public testing::Test {
protected:
std::unordered_set<int> container_ = {1, 2, 3};
std::list<int> sequence_ = {1, 2, 3};
std::vector<int> vector_ = {1, 2, 3};
int array_[3] = {1, 2, 3};
};
struct AccumulateCalls {
void operator()(int value) { calls.push_back(value); }
std::vector<int> calls;
};
bool Predicate(int value) { return value < 3; }
bool BinPredicate(int v1, int v2) { return v1 < v2; }
bool Equals(int v1, int v2) { return v1 == v2; }
bool IsOdd(int x) { return x % 2 != 0; }
TEST_F(NonMutatingTest, Distance) {
EXPECT_EQ(container_.size(),
static_cast<size_t>(absl::c_distance(container_)));
EXPECT_EQ(sequence_.size(), static_cast<size_t>(absl::c_distance(sequence_)));
EXPECT_EQ(vector_.size(), static_cast<size_t>(absl::c_distance(vector_)));
EXPECT_EQ(ABSL_ARRAYSIZE(array_),
static_cast<size_t>(absl::c_distance(array_)));
EXPECT_EQ(vector_.size(),
static_cast<size_t>(absl::c_distance(std::vector<int>(vector_))));
}
TEST_F(NonMutatingTest, Distance_OverloadedBeginEnd) {
std::initializer_list<int> a = {1, 2, 3};
std::valarray<int> b = {1, 2, 3};
EXPECT_EQ(3, absl::c_distance(a));
EXPECT_EQ(3, absl::c_distance(b));
}
TEST_F(NonMutatingTest, ForEach) {
AccumulateCalls c = absl::c_for_each(container_, AccumulateCalls());
std::sort(c.calls.begin(), c.calls.end());
EXPECT_EQ(vector_, c.calls);
AccumulateCalls c2 =
absl::c_for_each(std::unordered_set<int>(container_), AccumulateCalls());
std::sort(c2.calls.begin(), c2.calls.end());
EXPECT_EQ(vector_, c2.calls);
}
TEST_F(NonMutatingTest, FindReturnsCorrectType) {
auto it = absl::c_find(container_, 3);
EXPECT_EQ(3, *it);
absl::c_find(absl::implicit_cast<const std::list<int>&>(sequence_), 3);
}
TEST_F(NonMutatingTest, Contains) {
EXPECT_TRUE(absl::c_contains(container_, 3));
EXPECT_FALSE(absl::c_contains(container_, 4));
}
TEST_F(NonMutatingTest, FindIf) { absl::c_find_if(container_, Predicate); }
TEST_F(NonMutatingTest, FindIfNot) {
absl::c_find_if_not(container_, Predicate);
}
TEST_F(NonMutatingTest, FindEnd) {
absl::c_find_end(sequence_, vector_);
absl::c_find_end(vector_, sequence_);
}
TEST_F(NonMutatingTest, FindEndWithPredicate) {
absl::c_find_end(sequence_, vector_, BinPredicate);
absl::c_find_end(vector_, sequence_, BinPredicate);
}
TEST_F(NonMutatingTest, FindFirstOf) {
absl::c_find_first_of(container_, sequence_);
absl::c_find_first_of(sequence_, container_);
}
TEST_F(NonMutatingTest, FindFirstOfWithPredicate) {
absl::c_find_first_of(container_, sequence_, BinPredicate);
absl::c_find_first_of(sequence_, container_, BinPredicate);
}
TEST_F(NonMutatingTest, AdjacentFind) { absl::c_adjacent_find(sequence_); }
TEST_F(NonMutatingTest, AdjacentFindWithPredicate) {
absl::c_adjacent_find(sequence_, BinPredicate);
}
TEST_F(NonMutatingTest, Count) { EXPECT_EQ(1, absl::c_count(container_, 3)); }
TEST_F(NonMutatingTest, CountIf) {
EXPECT_EQ(2, absl::c_count_if(container_, Predicate));
const std::unordered_set<int>& const_container = container_;
EXPECT_EQ(2, absl::c_count_if(const_container, Predicate));
}
TEST_F(NonMutatingTest, Mismatch) {
{
auto result = absl::c_mismatch(vector_, sequence_);
EXPECT_EQ(result.first, vector_.end());
EXPECT_EQ(result.second, sequence_.end());
}
{
auto result = absl::c_mismatch(sequence_, vector_);
EXPECT_EQ(result.first, sequence_.end());
EXPECT_EQ(result.second, vector_.end());
}
sequence_.back() = 5;
{
auto result = absl::c_mismatch(vector_, sequence_);
EXPECT_EQ(result.first, std::prev(vector_.end()));
EXPECT_EQ(result.second, std::prev(sequence_.end()));
}
{
auto result = absl::c_mismatch(sequence_, vector_);
EXPECT_EQ(result.first, std::prev(sequence_.end()));
EXPECT_EQ(result.second, std::prev(vector_.end()));
}
sequence_.pop_back();
{
auto result = absl::c_mismatch(vector_, sequence_);
EXPECT_EQ(result.first, std::prev(vector_.end()));
EXPECT_EQ(result.second, sequence_.end());
}
{
auto result = absl::c_mismatch(sequence_, vector_);
EXPECT_EQ(result.first, sequence_.end());
EXPECT_EQ(result.second, std::prev(vector_.end()));
}
{
struct NoNotEquals {
constexpr bool operator==(NoNotEquals) const { return true; }
constexpr bool operator!=(NoNotEquals) const = delete;
};
std::vector<NoNotEquals> first;
std::list<NoNotEquals> second;
absl::c_mismatch(first, second);
}
}
TEST_F(NonMutatingTest, MismatchWithPredicate) {
{
auto result = absl::c_mismatch(vector_, sequence_, BinPredicate);
EXPECT_EQ(result.first, vector_.begin());
EXPECT_EQ(result.second, sequence_.begin());
}
{
auto result = absl::c_mismatch(sequence_, vector_, BinPredicate);
EXPECT_EQ(result.first, sequence_.begin());
EXPECT_EQ(result.second, vector_.begin());
}
sequence_.front() = 0;
{
auto result = absl::c_mismatch(vector_, sequence_, BinPredicate);
EXPECT_EQ(result.first, vector_.begin());
EXPECT_EQ(result.second, sequence_.begin());
}
{
auto result = absl::c_mismatch(sequence_, vector_, BinPredicate);
EXPECT_EQ(result.first, std::next(sequence_.begin()));
EXPECT_EQ(result.second, std::next(vector_.begin()));
}
sequence_.clear();
{
auto result = absl::c_mismatch(vector_, sequence_, BinPredicate);
EXPECT_EQ(result.first, vector_.begin());
EXPECT_EQ(result.second, sequence_.end());
}
{
auto result = absl::c_mismatch(sequence_, vector_, BinPredicate);
EXPECT_EQ(result.first, sequence_.end());
EXPECT_EQ(result.second, vector_.begin());
}
}
TEST_F(NonMutatingTest, Equal) {
EXPECT_TRUE(absl::c_equal(vector_, sequence_));
EXPECT_TRUE(absl::c_equal(sequence_, vector_));
EXPECT_TRUE(absl::c_equal(sequence_, array_));
EXPECT_TRUE(absl::c_equal(array_, vector_));
std::vector<int> vector_plus = {1, 2, 3};
vector_plus.push_back(4);
EXPECT_FALSE(absl::c_equal(vector_plus, sequence_));
EXPECT_FALSE(absl::c_equal(sequence_, vector_plus));
EXPECT_FALSE(absl::c_equal(array_, vector_plus));
}
TEST_F(NonMutatingTest, EqualWithPredicate) {
EXPECT_TRUE(absl::c_equal(vector_, sequence_, Equals));
EXPECT_TRUE(absl::c_equal(sequence_, vector_, Equals));
EXPECT_TRUE(absl::c_equal(array_, sequence_, Equals));
EXPECT_TRUE(absl::c_equal(vector_, array_, Equals));
std::vector<int> vector_plus = {1, 2, 3};
vector_plus.push_back(4);
EXPECT_FALSE(absl::c_equal(vector_plus, sequence_, Equals));
EXPECT_FALSE(absl::c_equal(sequence_, vector_plus, Equals));
EXPECT_FALSE(absl::c_equal(vector_plus, array_, Equals));
}
TEST_F(NonMutatingTest, IsPermutation) {
auto vector_permut_ = vector_;
std::next_permutation(vector_permut_.begin(), vector_permut_.end());
EXPECT_TRUE(absl::c_is_permutation(vector_permut_, sequence_));
EXPECT_TRUE(absl::c_is_permutation(sequence_, vector_permut_));
std::vector<int> vector_plus = {1, 2, 3};
vector_plus.push_back(4);
EXPECT_FALSE(absl::c_is_permutation(vector_plus, sequence_));
EXPECT_FALSE(absl::c_is_permutation(sequence_, vector_plus));
}
TEST_F(NonMutatingTest, IsPermutationWithPredicate) {
auto vector_permut_ = vector_;
std::next_permutation(vector_permut_.begin(), vector_permut_.end());
EXPECT_TRUE(absl::c_is_permutation(vector_permut_, sequence_, Equals));
EXPECT_TRUE(absl::c_is_permutation(sequence_, vector_permut_, Equals));
std::vector<int> vector_plus = {1, 2, 3};
vector_plus.push_back(4);
EXPECT_FALSE(absl::c_is_permutation(vector_plus, sequence_, Equals));
EXPECT_FALSE(absl::c_is_permutation(sequence_, vector_plus, Equals));
}
TEST_F(NonMutatingTest, Search) {
absl::c_search(sequence_, vector_);
absl::c_search(vector_, sequence_);
absl::c_search(array_, sequence_);
}
TEST_F(NonMutatingTest, SearchWithPredicate) {
absl::c_search(sequence_, vector_, BinPredicate);
absl::c_search(vector_, sequence_, BinPredicate);
}
TEST_F(NonMutatingTest, ContainsSubrange) {
EXPECT_TRUE(absl::c_contains_subrange(sequence_, vector_));
EXPECT_TRUE(absl::c_contains_subrange(vector_, sequence_));
EXPECT_TRUE(absl::c_contains_subrange(array_, sequence_));
}
TEST_F(NonMutatingTest, ContainsSubrangeWithPredicate) {
EXPECT_TRUE(absl::c_contains_subrange(sequence_, vector_, Equals));
EXPECT_TRUE(absl::c_contains_subrange(vector_, sequence_, Equals));
}
TEST_F(NonMutatingTest, SearchN) { absl::c_search_n(sequence_, 3, 1); }
TEST_F(NonMutatingTest, SearchNWithPredicate) {
absl::c_search_n(sequence_, 3, 1, BinPredicate);
}
TEST_F(NonMutatingTest, LowerBound) {
std::list<int>::iterator i = absl::c_lower_bound(sequence_, 3);
ASSERT_TRUE(i != sequence_.end());
EXPECT_EQ(2, std::distance(sequence_.begin(), i));
EXPECT_EQ(3, *i);
}
TEST_F(NonMutatingTest, LowerBoundWithPredicate) {
std::vector<int> v(vector_);
std::sort(v.begin(), v.end(), std::greater<int>());
std::vector<int>::iterator i = absl::c_lower_bound(v, 3, std::greater<int>());
EXPECT_TRUE(i == v.begin());
EXPECT_EQ(3, *i);
}
TEST_F(NonMutatingTest, UpperBound) {
std::list<int>::iterator i = absl::c_upper_bound(sequence_, 1);
ASSERT_TRUE(i != sequence_.end());
EXPECT_EQ(1, std::distance(sequence_.begin(), i));
EXPECT_EQ(2, *i);
}
TEST_F(NonMutatingTest, UpperBoundWithPredicate) {
std::vector<int> v(vector_);
std::sort(v.begin(), v.end(), std::greater<int>());
std::vector<int>::iterator i = absl::c_upper_bound(v, 1, std::greater<int>());
EXPECT_EQ(3, i - v.begin());
EXPECT_TRUE(i == v.end());
}
TEST_F(NonMutatingTest, EqualRange) {
std::pair<std::list<int>::iterator, std::list<int>::iterator> p =
absl::c_equal_range(sequence_, 2);
EXPECT_EQ(1, std::distance(sequence_.begin(), p.first));
EXPECT_EQ(2, std::distance(sequence_.begin(), p.second));
}
TEST_F(NonMutatingTest, EqualRangeArray) {
auto p = absl::c_equal_range(array_, 2);
EXPECT_EQ(1, std::distance(std::begin(array_), p.first));
EXPECT_EQ(2, std::distance(std::begin(array_), p.second));
}
TEST_F(NonMutatingTest, EqualRangeWithPredicate) {
std::vector<int> v(vector_);
std::sort(v.begin(), v.end(), std::greater<int>());
std::pair<std::vector<int>::iterator, std::vector<int>::iterator> p =
absl::c_equal_range(v, 2, std::greater<int>());
EXPECT_EQ(1, std::distance(v.begin(), p.first));
EXPECT_EQ(2, std::distance(v.begin(), p.second));
}
TEST_F(NonMutatingTest, BinarySearch) {
EXPECT_TRUE(absl::c_binary_search(vector_, 2));
EXPECT_TRUE(absl::c_binary_search(std::vector<int>(vector_), 2));
}
TEST_F(NonMutatingTest, BinarySearchWithPredicate) {
std::vector<int> v(vector_);
std::sort(v.begin(), v.end(), std::greater<int>());
EXPECT_TRUE(absl::c_binary_search(v, 2, std::greater<int>()));
EXPECT_TRUE(
absl::c_binary_search(std::vector<int>(v), 2, std::greater<int>()));
}
TEST_F(NonMutatingTest, MinElement) {
std::list<int>::iterator i = absl::c_min_element(sequence_);
ASSERT_TRUE(i != sequence_.end());
EXPECT_EQ(*i, 1);
}
TEST_F(NonMutatingTest, MinElementWithPredicate) {
std::list<int>::iterator i =
absl::c_min_element(sequence_, std::greater<int>());
ASSERT_TRUE(i != sequence_.end());
EXPECT_EQ(*i, 3);
}
TEST_F(NonMutatingTest, MaxElement) {
std::list<int>::iterator i = absl::c_max_element(sequence_);
ASSERT_TRUE(i != sequence_.end());
EXPECT_EQ(*i, 3);
}
TEST_F(NonMutatingTest, MaxElementWithPredicate) {
std::list<int>::iterator i =
absl::c_max_element(sequence_, std::greater<int>());
ASSERT_TRUE(i != sequence_.end());
EXPECT_EQ(*i, 1);
}
TEST_F(NonMutatingTest, LexicographicalCompare) {
EXPECT_FALSE(absl::c_lexicographical_compare(sequence_, sequence_));
std::vector<int> v;
v.push_back(1);
v.push_back(2);
v.push_back(4);
EXPECT_TRUE(absl::c_lexicographical_compare(sequence_, v));
EXPECT_TRUE(absl::c_lexicographical_compare(std::list<int>(sequence_), v));
}
TEST_F(NonMutatingTest, LexicographicalCopmareWithPredicate) {
EXPECT_FALSE(absl::c_lexicographical_compare(sequence_, sequence_,
std::greater<int>()));
std::vector<int> v;
v.push_back(1);
v.push_back(2);
v.push_back(4);
EXPECT_TRUE(
absl::c_lexicographical_compare(v, sequence_, std::greater<int>()));
EXPECT_TRUE(absl::c_lexicographical_compare(
std::vector<int>(v), std::list<int>(sequence_), std::greater<int>()));
}
TEST_F(NonMutatingTest, Includes) {
std::set<int> s(vector_.begin(), vector_.end());
s.insert(4);
EXPECT_TRUE(absl::c_includes(s, vector_));
}
TEST_F(NonMutatingTest, IncludesWithPredicate) {
std::vector<int> v = {3, 2, 1};
std::set<int, std::greater<int>> s(v.begin(), v.end());
s.insert(4);
EXPECT_TRUE(absl::c_includes(s, v, std::greater<int>()));
}
class NumericMutatingTest : public testing::Test {
protected:
std::list<int> list_ = {1, 2, 3};
std::vector<int> output_;
};
TEST_F(NumericMutatingTest, Iota) {
absl::c_iota(list_, 5);
std::list<int> expected{5, 6, 7};
EXPECT_EQ(list_, expected);
}
TEST_F(NonMutatingTest, Accumulate) {
EXPECT_EQ(absl::c_accumulate(sequence_, 4), 1 + 2 + 3 + 4);
}
TEST_F(NonMutatingTest, AccumulateWithBinaryOp) {
EXPECT_EQ(absl::c_accumulate(sequence_, 4, std::multiplies<int>()),
1 * 2 * 3 * 4);
}
TEST_F(NonMutatingTest, AccumulateLvalueInit) {
int lvalue = 4;
EXPECT_EQ(absl::c_accumulate(sequence_, lvalue), 1 + 2 + 3 + 4);
}
TEST_F(NonMutatingTest, AccumulateWithBinaryOpLvalueInit) {
int lvalue = 4;
EXPECT_EQ(absl::c_accumulate(sequence_, lvalue, std::multiplies<int>()),
1 * 2 * 3 * 4);
}
TEST_F(NonMutatingTest, InnerProduct) {
EXPECT_EQ(absl::c_inner_product(sequence_, vector_, 1000),
1000 + 1 * 1 + 2 * 2 + 3 * 3);
}
TEST_F(NonMutatingTest, InnerProductWithBinaryOps) {
EXPECT_EQ(absl::c_inner_product(sequence_, vector_, 10,
std::multiplies<int>(), std::plus<int>()),
10 * (1 + 1) * (2 + 2) * (3 + 3));
}
TEST_F(NonMutatingTest, InnerProductLvalueInit) {
int lvalue = 1000;
EXPECT_EQ(absl::c_inner_product(sequence_, vector_, lvalue),
1000 + 1 * 1 + 2 * 2 + 3 * 3);
}
TEST_F(NonMutatingTest, InnerProductWithBinaryOpsLvalueInit) {
int lvalue = 10;
EXPECT_EQ(absl::c_inner_product(sequence_, vector_, lvalue,
std::multiplies<int>(), std::plus<int>()),
10 * (1 + 1) * (2 + 2) * (3 + 3));
}
TEST_F(NumericMutatingTest, AdjacentDifference) {
auto last = absl::c_adjacent_difference(list_, std::back_inserter(output_));
*last = 1000;
std::vector<int> expected{1, 2 - 1, 3 - 2, 1000};
EXPECT_EQ(output_, expected);
}
TEST_F(NumericMutatingTest, AdjacentDifferenceWithBinaryOp) {
auto last = absl::c_adjacent_difference(list_, std::back_inserter(output_),
std::multiplies<int>());
*last = 1000;
std::vector<int> expected{1, 2 * 1, 3 * 2, 1000};
EXPECT_EQ(output_, expected);
}
TEST_F(NumericMutatingTest, PartialSum) {
auto last = absl::c_partial_sum(list_, std::back_inserter(output_));
*last = 1000;
std::vector<int> expected{1, 1 + 2, 1 + 2 + 3, 1000};
EXPECT_EQ(output_, expected);
}
TEST_F(NumericMutatingTest, PartialSumWithBinaryOp) {
auto last = absl::c_partial_sum(list_, std::back_inserter(output_),
std::multiplies<int>());
*last = 1000;
std::vector<int> expected{1, 1 * 2, 1 * 2 * 3, 1000};
EXPECT_EQ(output_, expected);
}
TEST_F(NonMutatingTest, LinearSearch) {
EXPECT_TRUE(absl::c_linear_search(container_, 3));
EXPECT_FALSE(absl::c_linear_search(container_, 4));
}
TEST_F(NonMutatingTest, AllOf) {
const std::vector<int>& v = vector_;
EXPECT_FALSE(absl::c_all_of(v, [](int x) { return x > 1; }));
EXPECT_TRUE(absl::c_all_of(v, [](int x) { return x > 0; }));
}
TEST_F(NonMutatingTest, AnyOf) {
const std::vector<int>& v = vector_;
EXPECT_TRUE(absl::c_any_of(v, [](int x) { return x > 2; }));
EXPECT_FALSE(absl::c_any_of(v, [](int x) { return x > 5; }));
}
TEST_F(NonMutatingTest, NoneOf) {
const std::vector<int>& v = vector_;
EXPECT_FALSE(absl::c_none_of(v, [](int x) { return x > 2; }));
EXPECT_TRUE(absl::c_none_of(v, [](int x) { return x > 5; }));
}
TEST_F(NonMutatingTest, MinMaxElementLess) {
std::pair<std::vector<int>::const_iterator, std::vector<int>::const_iterator>
p = absl::c_minmax_element(vector_, std::less<int>());
EXPECT_TRUE(p.first == vector_.begin());
EXPECT_TRUE(p.second == vector_.begin() + 2);
}
TEST_F(NonMutatingTest, MinMaxElementGreater) {
std::pair<std::vector<int>::const_iterator, std::vector<int>::const_iterator>
p = absl::c_minmax_element(vector_, std::greater<int>());
EXPECT_TRUE(p.first == vector_.begin() + 2);
EXPECT_TRUE(p.second == vector_.begin());
}
TEST_F(NonMutatingTest, MinMaxElementNoPredicate) {
std::pair<std::vector<int>::const_iterator, std::vector<int>::const_iterator>
p = absl::c_minmax_element(vector_);
EXPECT_TRUE(p.first == vector_.begin());
EXPECT_TRUE(p.second == vector_.begin() + 2);
}
class SortingTest : public testing::Test {
protected:
std::list<int> sorted_ = {1, 2, 3, 4};
std::list<int> unsorted_ = {2, 4, 1, 3};
std::list<int> reversed_ = {4, 3, 2, 1};
};
TEST_F(SortingTest, IsSorted) {
EXPECT_TRUE(absl::c_is_sorted(sorted_));
EXPECT_FALSE(absl::c_is_sorted(unsorted_));
EXPECT_FALSE(absl::c_is_sorted(reversed_));
}
TEST_F(SortingTest, IsSortedWithPredicate) {
EXPECT_FALSE(absl::c_is_sorted(sorted_, std::greater<int>()));
EXPECT_FALSE(absl::c_is_sorted(unsorted_, std::greater<int>()));
EXPECT_TRUE(absl::c_is_sorted(reversed_, std::greater<int>()));
}
TEST_F(SortingTest, IsSortedUntil) {
EXPECT_EQ(1, *absl::c_is_sorted_until(unsorted_));
EXPECT_EQ(4, *absl::c_is_sorted_until(unsorted_, std::greater<int>()));
}
TEST_F(SortingTest, NthElement) {
std::vector<int> unsorted = {2, 4, 1, 3};
absl::c_nth_element(unsorted, unsorted.begin() + 2);
EXPECT_THAT(unsorted, ElementsAre(Lt(3), Lt(3), 3, Gt(3)));
absl::c_nth_element(unsorted, unsorted.begin() + 2, std::greater<int>());
EXPECT_THAT(unsorted, ElementsAre(Gt(2), Gt(2), 2, Lt(2)));
}
TEST(MutatingTest, IsPartitioned) {
EXPECT_TRUE(
absl::c_is_partitioned(std::vector<int>{1, 3, 5, 2, 4, 6}, IsOdd));
EXPECT_FALSE(
absl::c_is_partitioned(std::vector<int>{1, 2, 3, 4, 5, 6}, IsOdd));
EXPECT_FALSE(
absl::c_is_partitioned(std::vector<int>{2, 4, 6, 1, 3, 5}, IsOdd));
}
TEST(MutatingTest, Partition) {
std::vector<int> actual = {1, 2, 3, 4, 5};
absl::c_partition(actual, IsOdd);
EXPECT_THAT(actual, Truly([](const std::vector<int>& c) {
return absl::c_is_partitioned(c, IsOdd);
}));
}
TEST(MutatingTest, StablePartition) {
std::vector<int> actual = {1, 2, 3, 4, 5};
absl::c_stable_partition(actual, IsOdd);
EXPECT_THAT(actual, ElementsAre(1, 3, 5, 2, 4));
}
TEST(MutatingTest, PartitionCopy) {
const std::vector<int> initial = {1, 2, 3, 4, 5};
std::vector<int> odds, evens;
auto ends = absl::c_partition_copy(initial, back_inserter(odds),
back_inserter(evens), IsOdd);
*ends.first = 7;
*ends.second = 6;
EXPECT_THAT(odds, ElementsAre(1, 3, 5, 7));
EXPECT_THAT(evens, ElementsAre(2, 4, 6));
}
TEST(MutatingTest, PartitionPoint) {
const std::vector<int> initial = {1, 3, 5, 2, 4};
auto middle = absl::c_partition_point(initial, IsOdd);
EXPECT_EQ(2, *middle);
}
TEST(MutatingTest, CopyMiddle) {
const std::vector<int> initial = {4, -1, -2, -3, 5};
const std::list<int> input = {1, 2, 3};
const std::vector<int> expected = {4, 1, 2, 3, 5};
std::list<int> test_list(initial.begin(), initial.end());
absl::c_copy(input, ++test_list.begin());
EXPECT_EQ(std::list<int>(expected.begin(), expected.end()), test_list);
std::vector<int> test_vector = initial;
absl::c_copy(input, test_vector.begin() + 1);
EXPECT_EQ(expected, test_vector);
}
TEST(MutatingTest, CopyFrontInserter) {
const std::list<int> initial = {4, 5};
const std::list<int> input = {1, 2, 3};
const std::list<int> expected = {3, 2, 1, 4, 5};
std::list<int> test_list = initial;
absl::c_copy(input, std::front_inserter(test_list));
EXPECT_EQ(expected, test_list);
}
TEST(MutatingTest, CopyBackInserter) {
const std::vector<int> initial = {4, 5};
const std::list<int> input = {1, 2, 3};
const std::vector<int> expected = {4, 5, 1, 2, 3};
std::list<int> test_list(initial.begin(), initial.end());
absl::c_copy(input, std::back_inserter(test_list));
EXPECT_EQ(std::list<int>(expected.begin(), expected.end()), test_list);
std::vector<int> test_vector = initial;
absl::c_copy(input, std::back_inserter(test_vector));
EXPECT_EQ(expected, test_vector);
}
TEST(MutatingTest, CopyN) {
const std::vector<int> initial = {1, 2, 3, 4, 5};
const std::vector<int> expected = {1, 2};
std::vector<int> actual;
absl::c_copy_n(initial, 2, back_inserter(actual));
EXPECT_EQ(expected, actual);
}
TEST(MutatingTest, CopyIf) {
const std::list<int> input = {1, 2, 3};
std::vector<int> output;
absl::c_copy_if(input, std::back_inserter(output),
[](int i) { return i != 2; });
EXPECT_THAT(output, ElementsAre(1, 3));
}
TEST(MutatingTest, CopyBackward) {
std::vector<int> actual = {1, 2, 3, 4, 5};
std::vector<int> expected = {1, 2, 1, 2, 3};
absl::c_copy_backward(absl::MakeSpan(actual.data(), 3), actual.end());
EXPECT_EQ(expected, actual);
}
TEST(MutatingTest, Move) {
std::vector<std::unique_ptr<int>> src;
src.emplace_back(absl::make_unique<int>(1));
src.emplace_back(absl::make_unique<int>(2));
src.emplace_back(absl::make_unique<int>(3));
src.emplace_back(absl::make_unique<int>(4));
src.emplace_back(absl::make_unique<int>(5));
std::vector<std::unique_ptr<int>> dest = {};
absl::c_move(src, std::back_inserter(dest));
EXPECT_THAT(src, Each(IsNull()));
EXPECT_THAT(dest, ElementsAre(Pointee(1), Pointee(2), Pointee(3), Pointee(4),
Pointee(5)));
}
TEST(MutatingTest, MoveBackward) {
std::vector<std::unique_ptr<int>> actual;
actual.emplace_back(absl::make_unique<int>(1));
actual.emplace_back(absl::make_unique<int>(2));
actual.emplace_back(absl::make_unique<int>(3));
actual.emplace_back(absl::make_unique<int>(4));
actual.emplace_back(absl::make_unique<int>(5));
auto subrange = absl::MakeSpan(actual.data(), 3);
absl::c_move_backward(subrange, actual.end());
EXPECT_THAT(actual, ElementsAre(IsNull(), IsNull(), Pointee(1), Pointee(2),
Pointee(3)));
}
TEST(MutatingTest, MoveWithRvalue) {
auto MakeRValueSrc = [] {
std::vector<std::unique_ptr<int>> src;
src.emplace_back(absl::make_unique<int>(1));
src.emplace_back(absl::make_unique<int>(2));
src.emplace_back(absl::make_unique<int>(3));
return src;
};
std::vector<std::unique_ptr<int>> dest = MakeRValueSrc();
absl::c_move(MakeRValueSrc(), std::back_inserter(dest));
EXPECT_THAT(dest, ElementsAre(Pointee(1), Pointee(2), Pointee(3), Pointee(1),
Pointee(2), Pointee(3)));
}
TEST(MutatingTest, SwapRanges) {
std::vector<int> odds = {2, 4, 6};
std::vector<int> evens = {1, 3, 5};
absl::c_swap_ranges(odds, evens);
EXPECT_THAT(odds, ElementsAre(1, 3, 5));
EXPECT_THAT(evens, ElementsAre(2, 4, 6));
odds.pop_back();
absl::c_swap_ranges(odds, evens);
EXPECT_THAT(odds, ElementsAre(2, 4));
EXPECT_THAT(evens, ElementsAre(1, 3, 6));
absl::c_swap_ranges(evens, odds);
EXPECT_THAT(odds, ElementsAre(1, 3));
EXPECT_THAT(evens, ElementsAre(2, 4, 6));
}
TEST_F(NonMutatingTest, Transform) {
std::vector<int> x{0, 2, 4}, y, z;
auto end = absl::c_transform(x, back_inserter(y), std::negate<int>());
EXPECT_EQ(std::vector<int>({0, -2, -4}), y);
*end = 7;
EXPECT_EQ(std::vector<int>({0, -2, -4, 7}), y);
y = {1, 3, 0};
end = absl::c_transform(x, y, back_inserter(z), std::plus<int>());
EXPECT_EQ(std::vector<int>({1, 5, 4}), z);
*end = 7;
EXPECT_EQ(std::vector<int>({1, 5, 4, 7}), z);
z.clear();
y.pop_back();
end = absl::c_transform(x, y, std::back_inserter(z), std::plus<int>());
EXPECT_EQ(std::vector<int>({1, 5}), z);
*end = 7;
EXPECT_EQ(std::vector<int>({1, 5, 7}), z);
z.clear();
std::swap(x, y);
end = absl::c_transform(x, y, std::back_inserter(z), std::plus<int>());
EXPECT_EQ(std::vector<int>({1, 5}), z);
*end = 7;
EXPECT_EQ(std::vector<int>({1, 5, 7}), z);
}
TEST(MutatingTest, Replace) {
const std::vector<int> initial = {1, 2, 3, 1, 4, 5};
const std::vector<int> expected = {4, 2, 3, 4, 4, 5};
std::vector<int> test_vector = initial;
absl::c_replace(test_vector, 1, 4);
EXPECT_EQ(expected, test_vector);
std::list<int> test_list(initial.begin(), initial.end());
absl::c_replace(test_list, 1, 4);
EXPECT_EQ(std::list<int>(expected.begin(), expected.end()), test_list);
}
TEST(MutatingTest, ReplaceIf) {
std::vector<int> actual = {1, 2, 3, 4, 5};
const std::vector<int> expected = {0, 2, 0, 4, 0};
absl::c_replace_if(actual, IsOdd, 0);
EXPECT_EQ(expected, actual);
}
TEST(MutatingTest, ReplaceCopy) {
const std::vector<int> initial = {1, 2, 3, 1, 4, 5};
const std::vector<int> expected = {4, 2, 3, 4, 4, 5};
std::vector<int> actual;
absl::c_replace_copy(initial, back_inserter(actual), 1, 4);
EXPECT_EQ(expected, actual);
}
TEST(MutatingTest, Sort) {
std::vector<int> test_vector = {2, 3, 1, 4};
absl::c_sort(test_vector);
EXPECT_THAT(test_vector, ElementsAre(1, 2, 3, 4));
}
TEST(MutatingTest, SortWithPredicate) {
std::vector<int> test_vector = {2, 3, 1, 4};
absl::c_sort(test_vector, std::greater<int>());
EXPECT_THAT(test_vector, ElementsAre(4, 3, 2, 1));
}
struct Element {
int key;
int value;
friend bool operator<(const Element& e1, const Element& e2) {
return e1.key < e2.key;
}
friend std::ostream& operator<<(std::ostream& o, const Element& e) {
return o << "{" << e.key << ", " << e.value << "}";
}
};
MATCHER_P2(IsElement, key, value, "") {
return arg.key == key && arg.value == value;
}
TEST(MutatingTest, StableSort) {
std::vector<Element> test_vector = {{1, 1}, {2, 1}, {2, 0}, {1, 0}, {2, 2}};
absl::c_stable_sort(test_vector);
EXPECT_THAT(test_vector,
ElementsAre(IsElement(1, 1), IsElement(1, 0), IsElement(2, 1),
IsElement(2, 0), IsElement(2, 2)));
}
TEST(MutatingTest, StableSortWithPredicate) {
std::vector<Element> test_vector = {{1, 1}, {2, 1}, {2, 0}, {1, 0}, {2, 2}};
absl::c_stable_sort(test_vector, [](const Element& e1, const Element& e2) {
return e2 < e1;
});
EXPECT_THAT(test_vector,
ElementsAre(IsElement(2, 1), IsElement(2, 0), IsElement(2, 2),
IsElement(1, 1), IsElement(1, 0)));
}
TEST(MutatingTest, ReplaceCopyIf) {
const std::vector<int> initial = {1, 2, 3, 4, 5};
const std::vector<int> expected = {0, 2, 0, 4, 0};
std::vector<int> actual;
absl::c_replace_copy_if(initial, back_inserter(actual), IsOdd, 0);
EXPECT_EQ(expected, actual);
}
TEST(MutatingTest, Fill) {
std::vector<int> actual(5);
absl::c_fill(actual, 1);
EXPECT_THAT(actual, ElementsAre(1, 1, 1, 1, 1));
}
TEST(MutatingTest, FillN) {
std::vector<int> actual(5, 0);
absl::c_fill_n(actual, 2, 1);
EXPECT_THAT(actual, ElementsAre(1, 1, 0, 0, 0));
}
TEST(MutatingTest, Generate) {
std::vector<int> actual(5);
int x = 0;
absl::c_generate(actual, [&x]() { return ++x; });
EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5));
}
TEST(MutatingTest, GenerateN) {
std::vector<int> actual(5, 0);
int x = 0;
absl::c_generate_n(actual, 3, [&x]() { return ++x; });
EXPECT_THAT(actual, ElementsAre(1, 2, 3, 0, 0));
}
TEST(MutatingTest, RemoveCopy) {
std::vector<int> actual;
absl::c_remove_copy(std::vector<int>{1, 2, 3}, back_inserter(actual), 2);
EXPECT_THAT(actual, ElementsAre(1, 3));
}
TEST(MutatingTest, RemoveCopyIf) {
std::vector<int> actual;
absl::c_remove_copy_if(std::vector<int>{1, 2, 3}, back_inserter(actual),
IsOdd);
EXPECT_THAT(actual, ElementsAre(2));
}
TEST(MutatingTest, UniqueCopy) {
std::vector<int> actual;
absl::c_unique_copy(std::vector<int>{1, 2, 2, 2, 3, 3, 2},
back_inserter(actual));
EXPECT_THAT(actual, ElementsAre(1, 2, 3, 2));
}
TEST(MutatingTest, UniqueCopyWithPredicate) {
std::vector<int> actual;
absl::c_unique_copy(std::vector<int>{1, 2, 3, -1, -2, -3, 1},
back_inserter(actual),
[](int x, int y) { return (x < 0) == (y < 0); });
EXPECT_THAT(actual, ElementsAre(1, -1, 1));
}
TEST(MutatingTest, Reverse) {
std::vector<int> test_vector = {1, 2, 3, 4};
absl::c_reverse(test_vector);
EXPECT_THAT(test_vector, ElementsAre(4, 3, 2, 1));
std::list<int> test_list = {1, 2, 3, 4};
absl::c_reverse(test_list);
EXPECT_THAT(test_list, ElementsAre(4, 3, 2, 1));
}
TEST(MutatingTest, ReverseCopy) {
std::vector<int> actual;
absl::c_reverse_copy(std::vector<int>{1, 2, 3, 4}, back_inserter(actual));
EXPECT_THAT(actual, ElementsAre(4, 3, 2, 1));
}
TEST(MutatingTest, Rotate) {
std::vector<int> actual = {1, 2, 3, 4};
auto it = absl::c_rotate(actual, actual.begin() + 2);
EXPECT_THAT(actual, testing::ElementsAreArray({3, 4, 1, 2}));
EXPECT_EQ(*it, 1);
}
TEST(MutatingTest, RotateCopy) {
std::vector<int> initial = {1, 2, 3, 4};
std::vector<int> actual;
auto end =
absl::c_rotate_copy(initial, initial.begin() + 2, back_inserter(actual));
*end = 5;
EXPECT_THAT(actual, ElementsAre(3, 4, 1, 2, 5));
}
template <typename T>
T RandomlySeededPrng() {
std::random_device rdev;
std::seed_seq::result_type data[T::state_size];
std::generate_n(data, T::state_size, std::ref(rdev));
std::seed_seq prng_seed(data, data + T::state_size);
return T(prng_seed);
}
TEST(MutatingTest, Shuffle) {
std::vector<int> actual = {1, 2, 3, 4, 5};
absl::c_shuffle(actual, RandomlySeededPrng<std::mt19937_64>());
EXPECT_THAT(actual, UnorderedElementsAre(1, 2, 3, 4, 5));
}
TEST(MutatingTest, Sample) {
std::vector<int> actual;
absl::c_sample(std::vector<int>{1, 2, 3, 4, 5}, std::back_inserter(actual), 3,
RandomlySeededPrng<std::mt19937_64>());
EXPECT_THAT(actual, IsSubsetOf({1, 2, 3, 4, 5}));
EXPECT_THAT(actual, SizeIs(3));
}
TEST(MutatingTest, PartialSort) {
std::vector<int> sequence{5, 3, 42, 0};
absl::c_partial_sort(sequence, sequence.begin() + 2);
EXPECT_THAT(absl::MakeSpan(sequence.data(), 2), ElementsAre(0, 3));
absl::c_partial_sort(sequence, sequence.begin() + 2, std::greater<int>());
EXPECT_THAT(absl::MakeSpan(sequence.data(), 2), ElementsAre(42, 5));
}
TEST(MutatingTest, PartialSortCopy) {
const std::vector<int> initial = {5, 3, 42, 0};
std::vector<int> actual(2);
absl::c_partial_sort_copy(initial, actual);
EXPECT_THAT(actual, ElementsAre(0, 3));
absl::c_partial_sort_copy(initial, actual, std::greater<int>());
EXPECT_THAT(actual, ElementsAre(42, 5));
}
TEST(MutatingTest, Merge) {
std::vector<int> actual;
absl::c_merge(std::vector<int>{1, 3, 5}, std::vector<int>{2, 4},
back_inserter(actual));
EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5));
}
TEST(MutatingTest, MergeWithComparator) {
std::vector<int> actual;
absl::c_merge(std::vector<int>{5, 3, 1}, std::vector<int>{4, 2},
back_inserter(actual), std::greater<int>());
EXPECT_THAT(actual, ElementsAre(5, 4, 3, 2, 1));
}
TEST(MutatingTest, InplaceMerge) {
std::vector<int> actual = {1, 3, 5, 2, 4};
absl::c_inplace_merge(actual, actual.begin() + 3);
EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5));
}
TEST(MutatingTest, InplaceMergeWithComparator) {
std::vector<int> actual = {5, 3, 1, 4, 2};
absl::c_inplace_merge(actual, actual.begin() + 3, std::greater<int>());
EXPECT_THAT(actual, ElementsAre(5, 4, 3, 2, 1));
}
class SetOperationsTest : public testing::Test {
protected:
std::vector<int> a_ = {1, 2, 3};
std::vector<int> b_ = {1, 3, 5};
std::vector<int> a_reversed_ = {3, 2, 1};
std::vector<int> b_reversed_ = {5, 3, 1};
};
TEST_F(SetOperationsTest, SetUnion) {
std::vector<int> actual;
absl::c_set_union(a_, b_, back_inserter(actual));
EXPECT_THAT(actual, ElementsAre(1, 2, 3, 5));
}
TEST_F(SetOperationsTest, SetUnionWithComparator) {
std::vector<int> actual;
absl::c_set_union(a_reversed_, b_reversed_, back_inserter(actual),
std::greater<int>());
EXPECT_THAT(actual, ElementsAre(5, 3, 2, 1));
}
TEST_F(SetOperationsTest, SetIntersection) {
std::vector<int> actual;
absl::c_set_intersection(a_, b_, back_inserter(actual));
EXPECT_THAT(actual, ElementsAre(1, 3));
}
TEST_F(SetOperationsTest, SetIntersectionWithComparator) {
std::vector<int> actual;
absl::c_set_intersection(a_reversed_, b_reversed_, back_inserter(actual),
std::greater<int>());
EXPECT_THAT(actual, ElementsAre(3, 1));
}
TEST_F(SetOperationsTest, SetDifference) {
std::vector<int> actual;
absl::c_set_difference(a_, b_, back_inserter(actual));
EXPECT_THAT(actual, ElementsAre(2));
}
TEST_F(SetOperationsTest, SetDifferenceWithComparator) {
std::vector<int> actual;
absl::c_set_difference(a_reversed_, b_reversed_, back_inserter(actual),
std::greater<int>());
EXPECT_THAT(actual, ElementsAre(2));
}
TEST_F(SetOperationsTest, SetSymmetricDifference) {
std::vector<int> actual;
absl::c_set_symmetric_difference(a_, b_, back_inserter(actual));
EXPECT_THAT(actual, ElementsAre(2, 5));
}
TEST_F(SetOperationsTest, SetSymmetricDifferenceWithComparator) {
std::vector<int> actual;
absl::c_set_symmetric_difference(a_reversed_, b_reversed_,
back_inserter(actual), std::greater<int>());
EXPECT_THAT(actual, ElementsAre(5, 2));
}
TEST(HeapOperationsTest, WithoutComparator) {
std::vector<int> heap = {1, 2, 3};
EXPECT_FALSE(absl::c_is_heap(heap));
absl::c_make_heap(heap);
EXPECT_TRUE(absl::c_is_heap(heap));
heap.push_back(4);
EXPECT_EQ(3, absl::c_is_heap_until(heap) - heap.begin());
absl::c_push_heap(heap);
EXPECT_EQ(4, heap[0]);
absl::c_pop_heap(heap);
EXPECT_EQ(4, heap[3]);
absl::c_make_heap(heap);
absl::c_sort_heap(heap);
EXPECT_THAT(heap, ElementsAre(1, 2, 3, 4));
EXPECT_FALSE(absl::c_is_heap(heap));
}
TEST(HeapOperationsTest, WithComparator) {
using greater = std::greater<int>;
std::vector<int> heap = {3, 2, 1};
EXPECT_FALSE(absl::c_is_heap(heap, greater()));
absl::c_make_heap(heap, greater());
EXPECT_TRUE(absl::c_is_heap(heap, greater()));
heap.push_back(0);
EXPECT_EQ(3, absl::c_is_heap_until(heap, greater()) - heap.begin());
absl::c_push_heap(heap, greater());
EXPECT_EQ(0, heap[0]);
absl::c_pop_heap(heap, greater());
EXPECT_EQ(0, heap[3]);
absl::c_make_heap(heap, greater());
absl::c_sort_heap(heap, greater());
EXPECT_THAT(heap, ElementsAre(3, 2, 1, 0));
EXPECT_FALSE(absl::c_is_heap(heap, greater()));
}
TEST(MutatingTest, PermutationOperations) {
std::vector<int> initial = {1, 2, 3, 4};
std::vector<int> permuted = initial;
absl::c_next_permutation(permuted);
EXPECT_TRUE(absl::c_is_permutation(initial, permuted));
EXPECT_TRUE(absl::c_is_permutation(initial, permuted, std::equal_to<int>()));
std::vector<int> permuted2 = initial;
absl::c_prev_permutation(permuted2, std::greater<int>());
EXPECT_EQ(permuted, permuted2);
absl::c_prev_permutation(permuted);
EXPECT_EQ(initial, permuted);
}
#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
TEST(ConstexprTest, Distance) {
static_assert(absl::c_distance(std::array<int, 3>()) == 3);
}
TEST(ConstexprTest, MinElement) {
constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(*absl::c_min_element(kArray) == 1);
}
TEST(ConstexprTest, MinElementWithPredicate) {
constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(*absl::c_min_element(kArray, std::greater<int>()) == 3);
}
TEST(ConstexprTest, MaxElement) {
constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(*absl::c_max_element(kArray) == 3);
}
TEST(ConstexprTest, MaxElementWithPredicate) {
constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(*absl::c_max_element(kArray, std::greater<int>()) == 1);
}
TEST(ConstexprTest, MinMaxElement) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
constexpr auto kMinMaxPair = absl::c_minmax_element(kArray);
static_assert(*kMinMaxPair.first == 1);
static_assert(*kMinMaxPair.second == 3);
}
TEST(ConstexprTest, MinMaxElementWithPredicate) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
constexpr auto kMinMaxPair =
absl::c_minmax_element(kArray, std::greater<int>());
static_assert(*kMinMaxPair.first == 3);
static_assert(*kMinMaxPair.second == 1);
}
#endif
#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
TEST(ConstexprTest, LinearSearch) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_linear_search(kArray, 3));
static_assert(!absl::c_linear_search(kArray, 4));
}
TEST(ConstexprTest, AllOf) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(!absl::c_all_of(kArray, [](int x) { return x > 1; }));
static_assert(absl::c_all_of(kArray, [](int x) { return x > 0; }));
}
TEST(ConstexprTest, AnyOf) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_any_of(kArray, [](int x) { return x > 2; }));
static_assert(!absl::c_any_of(kArray, [](int x) { return x > 5; }));
}
TEST(ConstexprTest, NoneOf) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(!absl::c_none_of(kArray, [](int x) { return x > 2; }));
static_assert(absl::c_none_of(kArray, [](int x) { return x > 5; }));
}
TEST(ConstexprTest, ForEach) {
static constexpr std::array<int, 3> kArray = [] {
std::array<int, 3> array = {1, 2, 3};
absl::c_for_each(array, [](int& x) { x += 1; });
return array;
}();
static_assert(kArray == std::array{2, 3, 4});
}
TEST(ConstexprTest, Find) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_find(kArray, 1) == kArray.begin());
static_assert(absl::c_find(kArray, 4) == kArray.end());
}
TEST(ConstexprTest, Contains) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_contains(kArray, 1));
static_assert(!absl::c_contains(kArray, 4));
}
TEST(ConstexprTest, FindIf) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_find_if(kArray, [](int x) { return x > 2; }) ==
kArray.begin() + 2);
static_assert(absl::c_find_if(kArray, [](int x) { return x > 5; }) ==
kArray.end());
}
TEST(ConstexprTest, FindIfNot) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_find_if_not(kArray, [](int x) { return x > 1; }) ==
kArray.begin());
static_assert(absl::c_find_if_not(kArray, [](int x) { return x > 0; }) ==
kArray.end());
}
TEST(ConstexprTest, FindEnd) {
static constexpr std::array<int, 5> kHaystack = {1, 2, 3, 2, 3};
static constexpr std::array<int, 2> kNeedle = {2, 3};
static_assert(absl::c_find_end(kHaystack, kNeedle) == kHaystack.begin() + 3);
}
TEST(ConstexprTest, FindFirstOf) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_find_first_of(kArray, kArray) == kArray.begin());
}
TEST(ConstexprTest, AdjacentFind) {
static constexpr std::array<int, 4> kArray = {1, 2, 2, 3};
static_assert(absl::c_adjacent_find(kArray) == kArray.begin() + 1);
}
TEST(ConstexprTest, AdjacentFindWithPredicate) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_adjacent_find(kArray, std::less<int>()) ==
kArray.begin());
}
TEST(ConstexprTest, Count) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_count(kArray, 1) == 1);
static_assert(absl::c_count(kArray, 2) == 1);
static_assert(absl::c_count(kArray, 3) == 1);
static_assert(absl::c_count(kArray, 4) == 0);
}
TEST(ConstexprTest, CountIf) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_count_if(kArray, [](int x) { return x > 0; }) == 3);
static_assert(absl::c_count_if(kArray, [](int x) { return x > 1; }) == 2);
}
TEST(ConstexprTest, Mismatch) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {1, 2, 3};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(absl::c_mismatch(kArray1, kArray2) ==
std::pair{kArray1.end(), kArray2.end()});
static_assert(absl::c_mismatch(kArray1, kArray3) ==
std::pair{kArray1.begin(), kArray3.begin()});
}
TEST(ConstexprTest, MismatchWithPredicate) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {1, 2, 3};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(absl::c_mismatch(kArray1, kArray2, std::not_equal_to<int>()) ==
std::pair{kArray1.begin(), kArray2.begin()});
static_assert(absl::c_mismatch(kArray1, kArray3, std::not_equal_to<int>()) ==
std::pair{kArray1.end(), kArray3.end()});
}
TEST(ConstexprTest, Equal) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {1, 2, 3};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(absl::c_equal(kArray1, kArray2));
static_assert(!absl::c_equal(kArray1, kArray3));
}
TEST(ConstexprTest, EqualWithPredicate) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {1, 2, 3};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(!absl::c_equal(kArray1, kArray2, std::not_equal_to<int>()));
static_assert(absl::c_equal(kArray1, kArray3, std::not_equal_to<int>()));
}
TEST(ConstexprTest, IsPermutation) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {3, 2, 1};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(absl::c_is_permutation(kArray1, kArray2));
static_assert(!absl::c_is_permutation(kArray1, kArray3));
}
TEST(ConstexprTest, IsPermutationWithPredicate) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {3, 2, 1};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(absl::c_is_permutation(kArray1, kArray2, std::equal_to<int>()));
static_assert(
!absl::c_is_permutation(kArray1, kArray3, std::equal_to<int>()));
}
TEST(ConstexprTest, Search) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {1, 2, 3};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(absl::c_search(kArray1, kArray2) == kArray1.begin());
static_assert(absl::c_search(kArray1, kArray3) == kArray1.end());
}
TEST(ConstexprTest, SearchWithPredicate) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {1, 2, 3};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(absl::c_search(kArray1, kArray2, std::not_equal_to<int>()) ==
kArray1.end());
static_assert(absl::c_search(kArray1, kArray3, std::not_equal_to<int>()) ==
kArray1.begin());
}
TEST(ConstexprTest, ContainsSubrange) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {1, 2, 3};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(absl::c_contains_subrange(kArray1, kArray2));
static_assert(!absl::c_contains_subrange(kArray1, kArray3));
}
TEST(ConstexprTest, ContainsSubrangeWithPredicate) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {1, 2, 3};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(
!absl::c_contains_subrange(kArray1, kArray2, std::not_equal_to<>()));
static_assert(
absl::c_contains_subrange(kArray1, kArray3, std::not_equal_to<>()));
}
TEST(ConstexprTest, SearchN) {
static constexpr std::array<int, 4> kArray = {1, 2, 2, 3};
static_assert(absl::c_search_n(kArray, 1, 1) == kArray.begin());
static_assert(absl::c_search_n(kArray, 2, 2) == kArray.begin() + 1);
static_assert(absl::c_search_n(kArray, 1, 4) == kArray.end());
}
TEST(ConstexprTest, SearchNWithPredicate) {
static constexpr std::array<int, 4> kArray = {1, 2, 2, 3};
static_assert(absl::c_search_n(kArray, 1, 1, std::not_equal_to<int>()) ==
kArray.begin() + 1);
static_assert(absl::c_search_n(kArray, 2, 2, std::not_equal_to<int>()) ==
kArray.end());
static_assert(absl::c_search_n(kArray, 1, 4, std::not_equal_to<int>()) ==
kArray.begin());
}
#endif
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/algorithm/container.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/algorithm/container_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
9acf0627-e7b8-4eef-a6d0-4196bf7a131e | cpp | google/arolla | compile_where_operator | arolla/expr/eval/compile_where_operator.cc | arolla/expr/eval/compile_where_operator_test.cc | #include "arolla/expr/eval/compile_where_operator.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/algorithm/control_flow_graph.h"
#include "arolla/expr/annotation_utils.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/eval/dynamic_compiled_operator.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/eval/executable_builder.h"
#include "arolla/expr/eval/expr_utils.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/expr/qtype_utils.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/bound_operators.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr::eval_internal {
namespace {
using Stage = DynamicEvaluationEngineOptions::PreparationStage;
class ExprDominatorTree {
public:
static absl::StatusOr<ExprDominatorTree> Build(const ExprNodePtr& root) {
auto node_order = expr::VisitorOrder(root);
std::reverse(node_order.begin(), node_order.end());
absl::flat_hash_map<Fingerprint, AcyclicCFG::NodeId> node_ids;
node_ids.reserve(node_order.size());
for (size_t i = 0; i < node_order.size(); ++i) {
node_ids[node_order[i]->fingerprint()] = i;
}
std::vector<std::vector<AcyclicCFG::NodeId>> deps;
deps.reserve(node_order.size());
for (const auto& node : node_order) {
deps.emplace_back();
deps.back().reserve(node->node_deps().size());
for (const auto& dep : node->node_deps()) {
deps.back().push_back(node_ids.at(dep->fingerprint()));
}
}
ASSIGN_OR_RETURN(auto graph, AcyclicCFG::Create(std::move(deps)));
DominatorTree tree(*graph);
return ExprDominatorTree(std::move(graph), std::move(tree),
std::move(node_ids));
}
bool StrictlyDominates(const ExprNodePtr& descendant,
const ExprNodePtr& ancestor) const {
int64_t descendant_id = GetNodeId(descendant);
int64_t ancestor_id = GetNodeId(ancestor);
return tree_.depth(descendant_id) > tree_.depth(ancestor_id);
}
bool HasSingleParentInExprDag(const ExprNodePtr& node) const {
int64_t id = GetNodeId(node);
return graph_->reverse_deps(id).size() == 1;
}
void AddNodeAlias(const ExprNodePtr& new_node, const ExprNodePtr& old_node) {
node_ids_.emplace(new_node->fingerprint(), GetNodeId(old_node));
}
private:
AcyclicCFG::NodeId GetNodeId(const ExprNodePtr& node) const {
DCHECK(node_ids_.contains(node->fingerprint()))
<< "No node id registered for node " << GetDebugSnippet(node);
return node_ids_.at(node->fingerprint());
}
ExprDominatorTree(
std::unique_ptr<AcyclicCFG> graph, DominatorTree tree,
absl::flat_hash_map<Fingerprint, AcyclicCFG::NodeId> node_ids)
: graph_(std::move(graph)),
tree_(std::move(tree)),
node_ids_(std::move(node_ids)) {}
std::unique_ptr<AcyclicCFG> graph_;
DominatorTree tree_;
absl::flat_hash_map<Fingerprint, AcyclicCFG::NodeId> node_ids_;
};
absl::Status VerifyArgQTypes(const QType* cond_qtype, const QType* true_qtype,
const QType* false_qtype) {
if (cond_qtype == nullptr || true_qtype == nullptr ||
false_qtype == nullptr) {
return absl::InternalError(
"all types must be known before core._short_circuit_where "
"transformation");
}
if (cond_qtype != GetQType<OptionalUnit>()) {
return absl::InternalError(
absl::StrFormat("core._short_circuit_where operator supports only "
"OPTIONAL_UNIT conditions, got %s",
cond_qtype->name()));
}
if (true_qtype != false_qtype) {
return absl::InternalError(
absl::StrFormat("true and false branches of core._short_circuit_where "
"must have the same QType; got %s and %s",
true_qtype->name(), false_qtype->name()));
}
return absl::OkStatus();
}
absl::Status CheckTypesUnchangedOrStripped(
absl::Span<const QTypePtr> expected,
absl::Span<const ExprAttributes> given) {
if (expected.size() != given.size()) {
return absl::InternalError(
"number of args for internal.packed_where operator changed during "
"compilation");
}
for (size_t i = 0; i < expected.size(); ++i) {
if (given[i].qtype() != nullptr && given[i].qtype() != expected[i]) {
return absl::InternalError(
"input types for internal.packed_where operator changed during "
"compilation");
}
}
return absl::OkStatus();
}
}
absl::StatusOr<ExprOperatorPtr> PackedWhereOp::Create(
DynamicCompiledOperator true_op, DynamicCompiledOperator false_op) {
if (true_op.output_qtype() != false_op.output_qtype()) {
return absl::InternalError(
"inconsistent output types for internal.packed_where operator "
"branches");
}
return std::make_shared<PackedWhereOp>(
PrivateConstructorTag{}, std::move(true_op), std::move(false_op));
}
PackedWhereOp::PackedWhereOp(PrivateConstructorTag,
DynamicCompiledOperator true_op,
DynamicCompiledOperator false_op)
: ExprOperatorWithFixedSignature(
"internal.packed_where",
ExprOperatorSignature{{.name = "condition"},
{.name = "_leaves",
.kind = ExprOperatorSignature::Parameter::
Kind::kVariadicPositional}},
"(Internal) Stateful short circuit where operator.",
FingerprintHasher("arolla::expr::PackedWhereOp")
.Combine(true_op.fingerprint(), false_op.fingerprint())
.Finish()),
true_op_(std::move(true_op)),
false_op_(std::move(false_op)) {}
absl::StatusOr<ExprAttributes> PackedWhereOp::InferAttributes(
absl::Span<const ExprAttributes> inputs) const {
size_t expected_arg_count =
1 + true_op_.input_qtypes().size() + false_op_.input_qtypes().size();
if (expected_arg_count != inputs.size()) {
return absl::InternalError(
"number of args for internal.packed_where operator changed during "
"compilation");
}
auto true_inputs = inputs.subspan(1, true_op_.input_qtypes().size());
RETURN_IF_ERROR(
CheckTypesUnchangedOrStripped(true_op_.input_qtypes(), true_inputs));
auto false_inputs = inputs.subspan(1 + true_op_.input_qtypes().size());
RETURN_IF_ERROR(
CheckTypesUnchangedOrStripped(false_op_.input_qtypes(), false_inputs));
return ExprAttributes(true_op_.output_qtype());
}
absl::StatusOr<ExprNodePtr> WhereOperatorTransformationImpl(
const DynamicEvaluationEngineOptions& options, ExprNodePtr node,
const ExprDominatorTree& dominator_tree) {
ASSIGN_OR_RETURN(auto op, DecayRegisteredOperator(node->op()));
if (!IsBackendOperator(op, "core._short_circuit_where")) {
return node;
}
const auto& deps = node->node_deps();
if (deps.size() != 3) {
return absl::InternalError(absl::StrFormat(
"incorrect number of dependencies passed to an "
"core._short_circuit_where operator node: expected 3 but got %d.",
deps.size()));
}
const ExprNodePtr& condition_branch = deps[0];
const ExprNodePtr& true_branch = deps[1];
const ExprNodePtr& false_branch = deps[2];
RETURN_IF_ERROR(VerifyArgQTypes(condition_branch->qtype(),
true_branch->qtype(), false_branch->qtype()));
auto must_be_short_circuited = [&](ExprNodePtr branch_root) {
return [branch_root = std::move(branch_root),
&dominator_tree](const ExprNodePtr& n) -> absl::StatusOr<bool> {
ASSIGN_OR_RETURN(auto annotationless_n, StripTopmostAnnotations(n));
if (annotationless_n->is_leaf()) {
return false;
}
if (annotationless_n.get() != n.get()) {
return absl::InternalError(
absl::StrFormat("WhereOperatorGlobalTransformation does not "
"support annotations except for leaves, got %s",
GetDebugSnippet(n)));
}
if (n->is_literal()) {
return false;
}
if (n.get() == branch_root.get()) {
return dominator_tree.HasSingleParentInExprDag(n);
}
return dominator_tree.StrictlyDominates(annotationless_n, branch_root);
};
};
ASSIGN_OR_RETURN(bool true_branch_must_be_short_circuited,
must_be_short_circuited(true_branch)(true_branch));
ASSIGN_OR_RETURN(bool false_branch_must_be_short_circuited,
must_be_short_circuited(false_branch)(false_branch));
if (!true_branch_must_be_short_circuited &&
!false_branch_must_be_short_circuited) {
ASSIGN_OR_RETURN(ExprOperatorPtr core_where_op,
LookupOperator("core.where"));
ASSIGN_OR_RETURN(core_where_op, DecayRegisteredOperator(core_where_op));
if (!HasBackendExprOperatorTag(core_where_op)) {
return absl::InternalError(
"core.where operator must be a backend operator");
}
return MakeOpNode(core_where_op,
{condition_branch, true_branch, false_branch});
}
DynamicEvaluationEngineOptions subexpression_options(options);
subexpression_options.enabled_preparation_stages =
Stage::kPopulateQTypes | Stage::kToLower;
subexpression_options.allow_overriding_input_slots = false;
ASSIGN_OR_RETURN(
ExprNodePtr true_lambda_expr,
ExtractLambda(true_branch, must_be_short_circuited(true_branch)));
ASSIGN_OR_RETURN(auto precompiled_true,
DynamicCompiledOperator::Build(
subexpression_options, true_lambda_expr->op(),
GetExprQTypes(true_lambda_expr->node_deps())));
ASSIGN_OR_RETURN(
ExprNodePtr false_lambda_expr,
ExtractLambda(false_branch, must_be_short_circuited(false_branch)));
ASSIGN_OR_RETURN(auto precompiled_false,
DynamicCompiledOperator::Build(
subexpression_options, false_lambda_expr->op(),
GetExprQTypes(false_lambda_expr->node_deps())));
ASSIGN_OR_RETURN(ExprOperatorPtr packed_op,
PackedWhereOp::Create(std::move(precompiled_true),
std::move(precompiled_false)));
std::vector<ExprNodePtr> args = {condition_branch};
args.insert(args.end(), true_lambda_expr->node_deps().begin(),
true_lambda_expr->node_deps().end());
args.insert(args.end(), false_lambda_expr->node_deps().begin(),
false_lambda_expr->node_deps().end());
return MakeOpNode(std::move(packed_op), std::move(args));
}
absl::StatusOr<ExprNodePtr> WhereOperatorGlobalTransformation(
const DynamicEvaluationEngineOptions& options, ExprNodePtr node) {
ASSIGN_OR_RETURN(auto dominator_tree, ExprDominatorTree::Build(node));
return PostOrderTraverse(
node,
[&](const ExprNodePtr& node,
absl::Span<const ExprNodePtr* const> arg_visits)
-> absl::StatusOr<ExprNodePtr> {
ASSIGN_OR_RETURN(
auto transformed_node,
WithNewDependencies(node, DereferenceVisitPointers(arg_visits)));
ASSIGN_OR_RETURN(
transformed_node,
WhereOperatorTransformationImpl(
options, std::move(transformed_node), dominator_tree));
dominator_tree.AddNodeAlias(transformed_node, node);
return transformed_node;
});
}
absl::StatusOr<TypedSlot> CompileWhereOperator(
const DynamicEvaluationEngineOptions& options,
const PackedWhereOp& where_op, absl::Span<const TypedSlot> input_slots,
TypedSlot output_slot,
eval_internal::ExecutableBuilder* executable_builder) {
size_t expected_arg_count = 1 + where_op.true_op().input_qtypes().size() +
where_op.false_op().input_qtypes().size();
if (expected_arg_count != input_slots.size()) {
return absl::InternalError(
"incorrect number of input slots passed to internal.packed_where "
"operator");
}
auto true_input_slots =
input_slots.subspan(1, where_op.true_op().input_qtypes().size());
auto before_true_branch = executable_builder->SkipEvalOp();
RETURN_IF_ERROR(where_op.true_op().BindTo(*executable_builder,
true_input_slots, output_slot));
auto false_input_slots =
input_slots.subspan(1 + where_op.true_op().input_qtypes().size());
auto before_false_branch = executable_builder->SkipEvalOp();
RETURN_IF_ERROR(where_op.false_op().BindTo(*executable_builder,
false_input_slots, output_slot));
if (input_slots[0].GetType() != GetQType<OptionalUnit>()) {
return absl::InternalError(
"unexpected condition slot type for internal.packed_where operator");
}
ASSIGN_OR_RETURN(auto cond_slot, input_slots[0].SubSlot(0).ToSlot<bool>());
int64_t jump_to_false_branch = before_false_branch - before_true_branch;
auto before_true_branch_op_name =
absl::StrFormat("jump_if_not<%+d>", jump_to_false_branch);
if (jump_to_false_branch == 0) {
return absl::InternalError(
"true branch of internal.packed_where compiled into no operators");
}
RETURN_IF_ERROR(executable_builder->SetEvalOp(
before_true_branch,
JumpIfNotBoundOperator(cond_slot, jump_to_false_branch),
eval_internal::FormatOperatorCall(before_true_branch_op_name,
{input_slots[0]}, {}),
before_true_branch_op_name));
int64_t jump_after_false_branch =
executable_builder->current_eval_ops_size() - before_false_branch - 1;
auto before_false_branch_op_name =
absl::StrFormat("jump<%+d>", jump_after_false_branch);
if (jump_after_false_branch == 0) {
return absl::InternalError(
"false branch of internal.packed_where compiled into no operators");
}
RETURN_IF_ERROR(executable_builder->SetEvalOp(
before_false_branch, JumpBoundOperator(jump_after_false_branch),
eval_internal::FormatOperatorCall(before_false_branch_op_name, {}, {}),
before_false_branch_op_name));
return output_slot;
}
} | #include "arolla/expr/eval/compile_where_operator.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/array/qtype/types.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/eval/dynamic_compiled_operator.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/eval/invoke.h"
#include "arolla/expr/eval/prepare_expression.h"
#include "arolla/expr/eval/test_utils.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/optimization/optimizer.h"
#include "arolla/expr/optimization/peephole_optimizations/short_circuit_where.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/expr/qtype_utils.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/expr/visitors/substitution.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/testing/qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/bytes.h"
#include "arolla/util/unit.h"
namespace arolla::expr::eval_internal {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::TypedValueWith;
using ::arolla::testing::WithNameAnnotation;
using ::arolla::testing::WithQTypeAnnotation;
using ::testing::AllOf;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::NotNull;
absl::StatusOr<std::unique_ptr<BoundExpr>> CompileExprWithTypes(
DynamicEvaluationEngineOptions options, ExprNodePtr expr,
absl::flat_hash_map<std::string, QTypePtr> leaf_qtypes) {
std::vector<std::string> leaves_in_order;
for (const auto& [leaf, _] : leaf_qtypes) {
leaves_in_order.push_back(leaf);
}
std::sort(leaves_in_order.begin(), leaves_in_order.end());
absl::flat_hash_map<std::string, TypedSlot> input_slots;
FrameLayout::Builder layout_builder;
for (const auto& leaf : leaves_in_order) {
input_slots.emplace(leaf, AddSlot(leaf_qtypes.at(leaf), &layout_builder));
}
return CompileAndBindForDynamicEvaluation(options, &layout_builder, expr,
input_slots);
}
class WhereOperatorTest
: public ::testing::TestWithParam<DynamicEvaluationEngineOptions> {
protected:
DynamicEvaluationEngineOptions GetOptions() const { return GetParam(); }
};
INSTANTIATE_TEST_SUITE_P(
GarbageCollection, WhereOperatorTest,
::testing::Values(
DynamicEvaluationEngineOptions{.collect_op_descriptions = true,
.allow_overriding_input_slots = false},
DynamicEvaluationEngineOptions{.collect_op_descriptions = true,
.allow_overriding_input_slots = true}));
TEST_P(WhereOperatorTest,
WhereOperatorGlobalTransformation_AnnotationHandling) {
ASSERT_OK_AND_ASSIGN(
auto cond, WithQTypeAnnotation(Leaf("cond"), GetOptionalQType<Unit>()));
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto y,
WithQTypeAnnotation(Leaf("y"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y}));
ASSERT_OK_AND_ASSIGN(auto named_x_plus_y,
WithNameAnnotation(x_plus_y, "name_for_x_plus_y"));
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp(
"core._short_circuit_where",
{
cond,
WithQTypeAnnotation(CallOp("math.multiply", {named_x_plus_y, y}),
GetQType<float>()),
CallOp("math.multiply",
{x_plus_y, CallOp("math.add", {y, Literal<float>(1.)})}),
}));
EXPECT_THAT(WhereOperatorGlobalTransformation(GetOptions(), expr),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("WhereOperatorGlobalTransformation does not "
"support annotations except for leaves")));
ASSERT_OK_AND_ASSIGN(auto prepared_expr,
PrepareExpression(expr, {}, GetOptions()));
const auto* packed_where =
dynamic_cast<const PackedWhereOp*>(prepared_expr->op().get());
ASSERT_THAT(packed_where, NotNull());
ASSERT_THAT(prepared_expr->node_deps(),
ElementsAre(EqualsExpr(cond),
EqualsExpr(x_plus_y), EqualsExpr(y),
EqualsExpr(x_plus_y), EqualsExpr(y),
EqualsExpr(Literal<float>(1))));
}
TEST_P(WhereOperatorTest, SimpleWhere) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("core._short_circuit_where",
{Leaf("cond"), CallOp("math.add", {Leaf("x"), Leaf("y")}),
CallOp("math.subtract", {Leaf("x"), Leaf("y")})}));
EXPECT_THAT(
CompileExprWithTypes(GetOptions(), expr,
{{"cond", GetQType<OptionalUnit>()},
{"x", GetQType<int32_t>()},
{"y", GetQType<int32_t>()}}),
IsOkAndHolds(AllOf(
InitOperationsAre(),
EvalOperationsAre(
"jump_if_not<+2>(OPTIONAL_UNIT [0x00])",
"INT32 [0x0C] = math.add(INT32 [0x04], INT32 [0x08])",
"jump<+1>()",
"INT32 [0x0C] = math.subtract(INT32 [0x04], INT32 [0x08])"))));
EXPECT_THAT(Invoke(expr,
{{"cond", TypedValue::FromValue(kPresent)},
{"x", TypedValue::FromValue(1)},
{"y", TypedValue::FromValue(2)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(3))));
EXPECT_THAT(Invoke(expr,
{{"cond", TypedValue::FromValue(kMissing)},
{"x", TypedValue::FromValue(1)},
{"y", TypedValue::FromValue(2)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(-1))));
}
TEST_P(WhereOperatorTest, PackedWhereOpComputeOutputQType) {
ASSERT_OK_AND_ASSIGN(ExprOperatorPtr math_add, LookupOperator("math.add"));
ASSERT_OK_AND_ASSIGN(
auto add_float_double,
DynamicCompiledOperator::Build(GetOptions(), math_add,
{GetQType<float>(), GetQType<double>()}));
ASSERT_OK_AND_ASSIGN(
auto add_doubles,
DynamicCompiledOperator::Build(GetOptions(), math_add,
{GetQType<double>(), GetQType<double>()}));
ASSERT_OK_AND_ASSIGN(ExprOperatorPtr packed_where,
PackedWhereOp::Create(std::move(add_float_double),
std::move(add_doubles)));
EXPECT_THAT(packed_where->InferAttributes({}),
StatusIs(absl::StatusCode::kInternal,
"number of args for internal.packed_where operator "
"changed during compilation"));
auto b = ExprAttributes(GetQType<bool>());
auto f = ExprAttributes(GetQType<float>());
auto d = ExprAttributes(GetQType<double>());
EXPECT_THAT(packed_where->InferAttributes({b, f, f, d, d}),
StatusIs(absl::StatusCode::kInternal,
"input types for internal.packed_where operator changed "
"during compilation"));
{
ASSERT_OK_AND_ASSIGN(auto attr,
packed_where->InferAttributes({b, f, d, d, d}));
EXPECT_THAT(attr.qtype(), Eq(GetQType<double>()));
}
{
ASSERT_OK_AND_ASSIGN(
auto attr, packed_where->InferAttributes(
{ExprAttributes{}, ExprAttributes{}, ExprAttributes{},
ExprAttributes{}, ExprAttributes{}}));
EXPECT_THAT(attr.qtype(), Eq(GetQType<double>()));
}
}
TEST_P(WhereOperatorTest, WhereWithTypeCasting) {
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("core._short_circuit_where",
{Leaf("cond"), Leaf("x"), Leaf("y")}));
EXPECT_THAT(
CompileExprWithTypes(GetOptions(), expr,
{{"cond", GetQType<OptionalUnit>()},
{"x", GetQType<int32_t>()},
{"y", GetOptionalQType<int32_t>()}}),
IsOkAndHolds(AllOf(
InitOperationsAre(),
EvalOperationsAre(
"jump_if_not<+2>(OPTIONAL_UNIT [0x00])",
"OPTIONAL_INT32 [0x10] = core.to_optional._scalar(INT32 [0x04])",
"jump<+1>()",
"OPTIONAL_INT32 [0x10] = core._copy(OPTIONAL_INT32 [0x08])"))));
EXPECT_THAT(Invoke(expr,
{{"cond", TypedValue::FromValue(kPresent)},
{"x", TypedValue::FromValue(1)},
{"y", TypedValue::FromValue(OptionalValue(0))}},
GetOptions()),
IsOkAndHolds(TypedValueWith<OptionalValue<int32_t>>(Eq(1))));
}
TEST_P(WhereOperatorTest, WhereWithEqualBranches) {
ASSERT_OK_AND_ASSIGN(auto x_plus_y,
CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("core._short_circuit_where",
{Leaf("cond"), x_plus_y, x_plus_y}));
EXPECT_THAT(CompileExprWithTypes(GetOptions(), expr,
{{"cond", GetQType<OptionalUnit>()},
{"x", GetQType<int32_t>()},
{"y", GetQType<int32_t>()}}),
IsOkAndHolds(AllOf(
InitOperationsAre(),
EvalOperationsAre(
"INT32 [0x10] = math.add(INT32 [0x04], INT32 [0x08])",
"INT32 [0x0C] = core.where(OPTIONAL_UNIT [0x00], INT32 "
"[0x10], INT32 [0x10])"))));
EXPECT_THAT(Invoke(expr,
{{"cond", TypedValue::FromValue(kPresent)},
{"x", TypedValue::FromValue(1)},
{"y", TypedValue::FromValue(2)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(3))));
}
TEST_P(WhereOperatorTest, NothingToShortCircuit) {
auto x_plus_y = CallOp("math.add", {Leaf("x"), Leaf("y")});
auto cond = Leaf("cond");
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add", {CallOp("core._short_circuit_where",
{Leaf("cond"), x_plus_y, Leaf("y")}),
x_plus_y}));
DynamicEvaluationEngineOptions options = GetOptions();
options.allow_overriding_input_slots = true;
EXPECT_THAT(CompileExprWithTypes(options, expr,
{{"cond", GetQType<OptionalUnit>()},
{"x", GetQType<int32_t>()},
{"y", GetQType<int32_t>()}}),
IsOkAndHolds(AllOf(
InitOperationsAre(),
EvalOperationsAre(
"INT32 [0x10] = math.add(INT32 [0x04], INT32 [0x08])",
"INT32 [0x04] = core.where(OPTIONAL_UNIT [0x00], INT32 "
"[0x10], INT32 [0x08])",
"INT32 [0x0C] = math.add(INT32 [0x04], INT32 [0x10])"))));
EXPECT_THAT(
CompileExprWithTypes(options, expr,
{{"cond", GetQType<OptionalUnit>()},
{"x", GetDenseArrayQType<int32_t>()},
{"y", GetDenseArrayQType<int32_t>()}}),
IsOkAndHolds(AllOf(
InitOperationsAre(),
EvalOperationsAre(
"DENSE_ARRAY_INT32 [0xE0] = math.add(DENSE_ARRAY_INT32 [0x08], "
"DENSE_ARRAY_INT32 [0x50])",
"DENSE_ARRAY_INT32 [0x08] = core.where(OPTIONAL_UNIT [0x00], "
"DENSE_ARRAY_INT32 [0xE0], DENSE_ARRAY_INT32 [0x50])",
"DENSE_ARRAY_INT32 [0x98] = math.add(DENSE_ARRAY_INT32 [0x08], "
"DENSE_ARRAY_INT32 [0xE0])"))));
EXPECT_THAT(
CompileExprWithTypes(options, expr,
{{"cond", GetQType<OptionalUnit>()},
{"x", GetArrayQType<int32_t>()},
{"y", GetArrayQType<int32_t>()}}),
IsOkAndHolds(AllOf(
InitOperationsAre(),
EvalOperationsAre("ARRAY_INT32 [0x1A0] = math.add(ARRAY_INT32 "
"[0x08], ARRAY_INT32 [0x90])",
"ARRAY_INT32 [0x08] = core.where(OPTIONAL_UNIT "
"[0x00], ARRAY_INT32 [0x1A0], ARRAY_INT32 [0x90])",
"ARRAY_INT32 [0x118] = math.add(ARRAY_INT32 "
"[0x08], ARRAY_INT32 [0x1A0])"))));
}
TEST_P(WhereOperatorTest, WhereWithIndependentBranches) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("core._short_circuit_where",
{Leaf("cond"), CallOp("math.add", {Literal(1), Literal(2)}),
CallOp("math.add", {Literal(2), Literal(3)})}));
auto options = GetOptions();
options.enabled_preparation_stages &=
~DynamicEvaluationEngineOptions::PreparationStage::kLiteralFolding;
EXPECT_THAT(
CompileExprWithTypes(options, expr, {{"cond", GetQType<OptionalUnit>()}}),
IsOkAndHolds(
AllOf(InitOperationsAre("INT32 [0x08] = 1\n"
"INT32 [0x0C] = 2\n"
"INT32 [0x10] = 3"),
EvalOperationsAre(
"jump_if_not<+2>(OPTIONAL_UNIT [0x00])",
"INT32 [0x04] = math.add(INT32 [0x08], INT32 [0x0C])",
"jump<+1>()",
"INT32 [0x04] = math.add(INT32 [0x0C], INT32 [0x10])"))));
EXPECT_THAT(
Invoke(expr, {{"cond", TypedValue::FromValue(kPresent)}}, options),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(3))));
EXPECT_THAT(
Invoke(expr, {{"cond", TypedValue::FromValue(kMissing)}}, options),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(5))));
}
TEST_P(WhereOperatorTest, WhereWithIncompatibleTypes) {
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("core._short_circuit_where",
{Leaf("cond"), Leaf("x"), Leaf("y")}));
EXPECT_THAT(CompileExprWithTypes(GetOptions(), expr,
{{"cond", GetQType<OptionalUnit>()},
{"x", GetQType<int32_t>()},
{"y", GetQType<Bytes>()}}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("incompatible types true_branch: INT32 and "
"false_branch: BYTES")));
}
TEST_P(WhereOperatorTest, WhereWithExpressions) {
auto cond = Leaf("cond");
auto x = Leaf("x");
auto y = Leaf("y");
ASSERT_OK_AND_ASSIGN(auto x_mul_y, CallOp("math.multiply", {x, y}));
ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr, CallOp("core._short_circuit_where",
{cond, x_mul_y, x_plus_y}));
EXPECT_THAT(
CompileExprWithTypes(GetOptions(), expr,
{{"cond", GetQType<OptionalUnit>()},
{"x", GetQType<int32_t>()},
{"y", GetQType<int32_t>()}}),
IsOkAndHolds(
AllOf(InitOperationsAre(),
EvalOperationsAre(
"jump_if_not<+2>(OPTIONAL_UNIT [0x00])",
"INT32 [0x0C] = math.multiply(INT32 [0x04], INT32 [0x08])",
"jump<+1>()",
"INT32 [0x0C] = math.add(INT32 [0x04], INT32 [0x08])"))));
EXPECT_THAT(Invoke(expr,
{{"cond", TypedValue::FromValue(kPresent)},
{"x", TypedValue::FromValue(3)},
{"y", TypedValue::FromValue(19)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(57))));
EXPECT_THAT(Invoke(expr,
{{"cond", TypedValue::FromValue(kMissing)},
{"x", TypedValue::FromValue(50)},
{"y", TypedValue::FromValue(7)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(57))));
}
TEST_P(WhereOperatorTest, WhereWithInputSlotsOverwriting) {
auto cond = Leaf("cond");
auto x = Leaf("x");
ASSERT_OK_AND_ASSIGN(auto mult, CallOp("math.multiply", {x, x}));
ASSERT_OK_AND_ASSIGN(mult, CallOp("math.multiply", {mult, mult}));
ASSERT_OK_AND_ASSIGN(mult, CallOp("math.multiply", {mult, mult}));
ASSERT_OK_AND_ASSIGN(auto sum, CallOp("math.add", {mult, mult}));
ASSERT_OK_AND_ASSIGN(sum, CallOp("math.add", {sum, sum}));
ASSERT_OK_AND_ASSIGN(sum, CallOp("math.add", {sum, sum}));
ASSERT_OK_AND_ASSIGN(auto sub, CallOp("math.subtract", {mult, mult}));
ASSERT_OK_AND_ASSIGN(sub, CallOp("math.subtract", {sub, sub}));
ASSERT_OK_AND_ASSIGN(sub, CallOp("math.subtract", {sub, sub}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp("core._short_circuit_where", {cond, sum, sub}));
if (GetOptions().allow_overriding_input_slots) {
EXPECT_THAT(
CompileExprWithTypes(
GetOptions(), expr,
{{"cond", GetQType<OptionalUnit>()}, {"x", GetQType<int32_t>()}}),
IsOkAndHolds(AllOf(
InitOperationsAre(),
EvalOperationsAre(
"INT32 [0x0C] = math.multiply(INT32 [0x04], INT32 [0x04])",
"INT32 [0x04] = math.multiply(INT32 [0x0C], INT32 [0x0C])",
"INT32 [0x0C] = math.multiply(INT32 [0x04], INT32 [0x04])",
"jump_if_not<+4>(OPTIONAL_UNIT [0x00])",
"INT32 [0x10] = math.add(INT32 [0x0C], INT32 [0x0C])",
"INT32 [0x14] = math.add(INT32 [0x10], INT32 [0x10])",
"INT32 [0x08] = math.add(INT32 [0x14], INT32 [0x14])",
"jump<+3>()",
"INT32 [0x18] = math.subtract(INT32 [0x0C], INT32 [0x0C])",
"INT32 [0x1C] = math.subtract(INT32 [0x18], INT32 [0x18])",
"INT32 [0x08] = math.subtract(INT32 [0x1C], INT32 [0x1C])"))));
} else {
EXPECT_THAT(
CompileExprWithTypes(
GetOptions(), expr,
{{"cond", GetQType<OptionalUnit>()}, {"x", GetQType<int32_t>()}}),
IsOkAndHolds(AllOf(
InitOperationsAre(),
EvalOperationsAre(
"INT32 [0x0C] = math.multiply(INT32 [0x04], INT32 [0x04])",
"INT32 [0x10] = math.multiply(INT32 [0x0C], INT32 [0x0C])",
"INT32 [0x0C] = math.multiply(INT32 [0x10], INT32 [0x10])",
"jump_if_not<+4>(OPTIONAL_UNIT [0x00])",
"INT32 [0x14] = math.add(INT32 [0x0C], INT32 [0x0C])",
"INT32 [0x18] = math.add(INT32 [0x14], INT32 [0x14])",
"INT32 [0x08] = math.add(INT32 [0x18], INT32 [0x18])",
"jump<+3>()",
"INT32 [0x1C] = math.subtract(INT32 [0x0C], INT32 [0x0C])",
"INT32 [0x20] = math.subtract(INT32 [0x1C], INT32 [0x1C])",
"INT32 [0x08] = math.subtract(INT32 [0x20], INT32 [0x20])"))));
}
EXPECT_THAT(Invoke(expr,
{{"cond", TypedValue::FromValue(kPresent)},
{"x", TypedValue::FromValue(2)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(2048))));
EXPECT_THAT(Invoke(expr,
{{"cond", TypedValue::FromValue(kMissing)},
{"x", TypedValue::FromValue(2)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(0))));
}
TEST_P(WhereOperatorTest, ShortCircuit) {
ASSERT_OK_AND_ASSIGN(ExprNodePtr x_plus_1,
CallOp("math.add", {Leaf("x"), Literal(1)}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr x_div_0,
CallOp("math.floordiv", {Leaf("x"), Literal(0)}));
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expr,
CallOp("core._short_circuit_where", {Leaf("cond"), x_plus_1, x_div_0}));
EXPECT_THAT(Invoke(expr,
{{"cond", TypedValue::FromValue(kPresent)},
{"x", TypedValue::FromValue(56)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(57))));
EXPECT_THAT(Invoke(expr,
{{"cond", TypedValue::FromValue(kMissing)},
{"x", TypedValue::FromValue(56)}},
GetOptions()),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("division by zero")));
}
TEST_P(WhereOperatorTest, WhereWithLiteral) {
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expr,
CallOp("core._short_circuit_where",
{Leaf("cond"), CallOp("math.add", {Leaf("x"), Literal(27)}),
CallOp("math.subtract", {Leaf("x"), Literal(27)})}));
EXPECT_THAT(
CompileExprWithTypes(GetOptions(), expr,
{{"cond", GetQType<OptionalUnit>()},
{"x", GetQType<int32_t>()},
{"y", GetQType<int32_t>()}}),
IsOkAndHolds(AllOf(
InitOperationsAre("INT32 [0x10] = 27"),
EvalOperationsAre(
"jump_if_not<+2>(OPTIONAL_UNIT [0x00])",
"INT32 [0x0C] = math.add(INT32 [0x04], INT32 [0x10])",
"jump<+1>()",
"INT32 [0x0C] = math.subtract(INT32 [0x04], INT32 [0x10])"))));
EXPECT_THAT(Invoke(expr,
{{"cond", TypedValue::FromValue(kPresent)},
{"x", TypedValue::FromValue(30)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(57))));
EXPECT_THAT(Invoke(expr,
{{"cond", TypedValue::FromValue(kMissing)},
{"x", TypedValue::FromValue(30)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(3))));
}
TEST_P(WhereOperatorTest, WhereWithCommonBranches) {
ASSERT_OK_AND_ASSIGN(ExprNodePtr x_plus_y,
CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr x_plus_y_plus_1,
CallOp("math.add", {x_plus_y, Literal(1)}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp("core._short_circuit_where",
{Leaf("cond"), x_plus_y, x_plus_y_plus_1}));
EXPECT_THAT(CompileExprWithTypes(GetOptions(), expr,
{{"cond", GetQType<OptionalUnit>()},
{"x", GetQType<int32_t>()},
{"y", GetQType<int32_t>()}}),
IsOkAndHolds(AllOf(
InitOperationsAre("INT32 [0x14] = 1"),
EvalOperationsAre(
"INT32 [0x10] = math.add(INT32 [0x04], INT32 [0x08])",
"jump_if_not<+2>(OPTIONAL_UNIT [0x00])",
"INT32 [0x0C] = core._copy(INT32 [0x10])",
"jump<+1>()",
"INT32 [0x0C] = math.add(INT32 [0x10], INT32 [0x14])"))));
EXPECT_THAT(Invoke(expr,
{{"cond", TypedValue::FromValue(kPresent)},
{"x", TypedValue::FromValue(50)},
{"y", TypedValue::FromValue(7)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(57))));
EXPECT_THAT(Invoke(expr,
{{"cond", TypedValue::FromValue(kMissing)},
{"x", TypedValue::FromValue(50)},
{"y", TypedValue::FromValue(7)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(58))));
}
TEST_P(WhereOperatorTest, NestedWhere) {
auto cond1 = Leaf("cond1");
auto cond2 = Leaf("cond2");
auto x = Leaf("x");
auto y = Leaf("y");
ASSERT_OK_AND_ASSIGN(auto true_case_where,
CallOp("core._short_circuit_where",
{cond2, CallOp("math.add", {x, y}), y}));
ASSERT_OK_AND_ASSIGN(auto false_case_where,
CallOp("core._short_circuit_where",
{cond2, CallOp("math.subtract", {x, y}), x}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp("core._short_circuit_where",
{cond1, true_case_where, false_case_where}));
EXPECT_THAT(
CompileExprWithTypes(GetOptions(), expr,
{{"cond1", GetQType<OptionalUnit>()},
{"cond2", GetQType<OptionalUnit>()},
{"x", GetQType<int32_t>()},
{"y", GetQType<int32_t>()}}),
IsOkAndHolds(
AllOf(InitOperationsAre(),
EvalOperationsAre(
"jump_if_not<+5>(OPTIONAL_UNIT [0x00])",
"jump_if_not<+2>(OPTIONAL_UNIT [0x01])",
"INT32 [0x0C] = math.add(INT32 [0x04], INT32 [0x08])",
"jump<+1>()",
"INT32 [0x0C] = core._copy(INT32 [0x08])",
"jump<+4>()",
"jump_if_not<+2>(OPTIONAL_UNIT [0x01])",
"INT32 [0x0C] = math.subtract(INT32 [0x04], INT32 [0x08])",
"jump<+1>()",
"INT32 [0x0C] = core._copy(INT32 [0x04])"))));
EXPECT_THAT(Invoke(expr,
{{"cond1", TypedValue::FromValue(kPresent)},
{"cond2", TypedValue::FromValue(kPresent)},
{"x", TypedValue::FromValue(50)},
{"y", TypedValue::FromValue(7)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(57))));
EXPECT_THAT(Invoke(expr,
{{"cond1", TypedValue::FromValue(kPresent)},
{"cond2", TypedValue::FromValue(kMissing)},
{"x", TypedValue::FromValue(50)},
{"y", TypedValue::FromValue(7)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(7))));
EXPECT_THAT(Invoke(expr,
{{"cond1", TypedValue::FromValue(kMissing)},
{"cond2", TypedValue::FromValue(kPresent)},
{"x", TypedValue::FromValue(50)},
{"y", TypedValue::FromValue(7)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(43))));
EXPECT_THAT(Invoke(expr,
{{"cond1", TypedValue::FromValue(kMissing)},
{"cond2", TypedValue::FromValue(kMissing)},
{"x", TypedValue::FromValue(50)},
{"y", TypedValue::FromValue(7)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int32_t>(Eq(50))));
}
TEST_P(WhereOperatorTest, Optimizations) {
auto cond = Placeholder("cond");
auto x = Leaf("x");
auto y = Leaf("y");
ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y}));
ASSERT_OK_AND_ASSIGN(auto x_mul_y, CallOp("math.multiply", {x, y}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr where,
CallOp("core.where", {cond, x_plus_y, x_mul_y}));
ASSERT_OK_AND_ASSIGN(auto true_condition,
CallOp("core.equal", {Literal(5), Literal(5)}));
ASSERT_OK_AND_ASSIGN(auto false_condition,
CallOp("core.equal", {Literal(5), Literal(7)}));
ASSERT_OK_AND_ASSIGN(
auto true_nested_condition,
CallOp("core.where", {false_condition, false_condition, true_condition}));
absl::flat_hash_map<std::string, QTypePtr> input_types = {
{"x", GetQType<int64_t>()}, {"y", GetQType<int64_t>()}};
ASSERT_OK_AND_ASSIGN(auto lower_x_plus_y,
PopulateQTypes(x_plus_y, input_types));
ASSERT_OK_AND_ASSIGN(lower_x_plus_y, ToLowest(lower_x_plus_y));
ASSERT_OK_AND_ASSIGN(auto lower_x_mul_y,
PopulateQTypes(x_mul_y, input_types));
ASSERT_OK_AND_ASSIGN(lower_x_mul_y, ToLowest(lower_x_mul_y));
auto options = GetOptions();
ASSERT_OK_AND_ASSIGN(
auto peephole_optimizer,
CreatePeepholeOptimizer({ShortCircuitWhereOptimizations}));
options.optimizer = MakeOptimizer(std::move(peephole_optimizer));
{
ASSERT_OK_AND_ASSIGN(
auto expr, SubstitutePlaceholders(where, {{"cond", true_condition}}));
EXPECT_THAT(PrepareExpression(expr, input_types, options),
IsOkAndHolds(EqualsExpr(lower_x_plus_y)));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr, SubstitutePlaceholders(where, {{"cond", false_condition}}));
EXPECT_THAT(PrepareExpression(expr, input_types, options),
IsOkAndHolds(EqualsExpr(lower_x_mul_y)));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr,
SubstitutePlaceholders(where, {{"cond", true_nested_condition}}));
EXPECT_THAT(PrepareExpression(expr, input_types, options),
IsOkAndHolds(EqualsExpr(lower_x_plus_y)));
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/compile_where_operator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/compile_where_operator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
2cef0ad9-29f3-4fb7-a084-6fed5363bd83 | cpp | tensorflow/tensorflow | pjrt_compile_util | tensorflow/compiler/jit/pjrt_compile_util.cc | tensorflow/compiler/jit/pjrt_compile_util_test.cc | #include "tensorflow/compiler/jit/pjrt_compile_util.h"
#include <vector>
#include "tensorflow/compiler/jit/device_compilation_profiler.h"
#include "tensorflow/compiler/jit/device_compiler.h"
#include "tensorflow/compiler/jit/xla_compile_util.h"
#include "tensorflow/compiler/jit/xla_compiler_options_util.h"
#include "tensorflow/compiler/jit/xla_platform_info.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
using PjRtDeviceCompiler =
DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>;
Status CompileToPjRtLoadedExecutable(
const DeviceBase* device, const XlaPlatformInfo& platform_info,
const NameAttrList& function,
const std::vector<XlaCompiler::Argument>& args,
DeviceCompileMode compile_mode, bool has_ref_vars,
bool may_alias_resource_update, FunctionLibraryRuntime* flr,
ResourceMgr* rm, const XlaCompiler::CompilationResult** compilation_result,
xla::PjRtClient** client, xla::PjRtLoadedExecutable** executable) {
PjRtDeviceCompiler* pjrt_device_compiler;
DeviceCompilationProfiler* profiler;
TF_RETURN_IF_ERROR(GetOrCreatePjRtDeviceCompilerAndProfiler(
platform_info, rm, flr, &pjrt_device_compiler, &profiler));
core::ScopedUnref pjrt_device_compiler_ref(pjrt_device_compiler);
core::ScopedUnref profiler_ref(profiler);
*client = pjrt_device_compiler->client();
XlaCompiler::Options options = GenerateCompilerOptionsForPjRt(
*flr, device, platform_info, pjrt_device_compiler);
XlaCompiler::CompileOptions compile_options =
GenerateCompileOptions(has_ref_vars, may_alias_resource_update);
return pjrt_device_compiler->CompileIfNeeded(
options, function, args, compile_options, compile_mode, profiler,
compilation_result, executable);
}
Status CompileToPjRtLoadedExecutable(
const OpKernelContext& ctx, const XlaPlatformInfo& platform_info,
const NameAttrList& function,
const std::vector<XlaCompiler::Argument>& args,
DeviceCompileMode compile_mode, bool has_ref_vars,
bool may_alias_resource_update,
const XlaCompiler::CompilationResult** compilation_result,
xla::PjRtClient** client, xla::PjRtLoadedExecutable** executable) {
TF_ASSIGN_OR_RETURN(ResourceMgr * rm, GetResourceMgrForDeviceCompiler(
ctx, platform_info.device_type()));
return CompileToPjRtLoadedExecutable(
ctx.device(), platform_info, function, args, compile_mode, has_ref_vars,
may_alias_resource_update, ctx.function_library(), rm, compilation_result,
client, executable);
}
} | #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "tensorflow/compiler/jit/pjrt_compile_util.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
namespace tensorflow {
namespace {
StatusOr<std::unique_ptr<Graph>> SampleGraphAddXY() {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1);
auto c = ops::Add(scope.WithOpName("C"), a, b);
auto d = ops::_Retval(scope.WithOpName("D"), c, 0);
TF_RETURN_IF_ERROR(scope.ToGraph(graph.get()));
return graph;
}
StatusOr<FunctionDef> SampleFuntionAddXY(const std::string& name) {
TF_ASSIGN_OR_RETURN(auto graph, SampleGraphAddXY());
FunctionDef fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*graph, name, &fdef));
return fdef;
}
std::vector<XlaCompiler::Argument> SampleArgsForAddXY() {
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].type = DT_INT32;
args[1].shape = TensorShape({2});
return args;
}
TEST(PjrtCompileUtilTest, CompileToPjRtLoadedExecutable) {
DeviceSetup device_setup;
TF_ASSERT_OK_AND_ASSIGN(auto fdef, SampleFuntionAddXY("foo"));
device_setup.AddDevicesAndSetUp({DEVICE_GPU}, fdef);
Device* device = device_setup.GetDevice(DEVICE_GPU);
const XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
NameAttrList function;
function.set_name("foo");
ResourceMgr resource_mgr("");
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::PjRtLoadedExecutable* pjrt_executable = nullptr;
xla::PjRtClient* pjrt_client = nullptr;
TF_EXPECT_OK(CompileToPjRtLoadedExecutable(
device, platform_info, function, SampleArgsForAddXY(),
DeviceCompileMode::kStrict, true,
true, device_setup.flr(), &resource_mgr,
&compilation_result, &pjrt_client, &pjrt_executable));
EXPECT_TRUE(compilation_result != nullptr);
EXPECT_TRUE(pjrt_executable != nullptr);
EXPECT_TRUE(pjrt_client != nullptr);
}
TEST(PjrtCompileUtilTest, CompileToPjRtLoadedExecutableWithOpKernelContext) {
DeviceSetup device_setup;
TF_ASSERT_OK_AND_ASSIGN(auto fdef, SampleFuntionAddXY("foo"));
device_setup.AddDevicesAndSetUp({DEVICE_GPU}, fdef);
Device* device = device_setup.GetDevice(DEVICE_GPU);
const XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
NameAttrList function;
function.set_name("foo");
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
params.device = device;
params.function_library = device_setup.flr();
OpKernelContext ctx(¶ms, 1);
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::PjRtLoadedExecutable* pjrt_executable = nullptr;
xla::PjRtClient* pjrt_client = nullptr;
TF_EXPECT_OK(CompileToPjRtLoadedExecutable(
ctx, platform_info, function, SampleArgsForAddXY(),
DeviceCompileMode::kStrict, true,
true, &compilation_result, &pjrt_client,
&pjrt_executable));
EXPECT_TRUE(compilation_result != nullptr);
EXPECT_TRUE(pjrt_executable != nullptr);
EXPECT_TRUE(pjrt_client != nullptr);
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/pjrt_compile_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/pjrt_compile_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b8b2ea9d-441c-403b-a467-54034bd1807f | cpp | google/quiche | quic_sent_packet_manager | quiche/quic/core/quic_sent_packet_manager.cc | quiche/quic/core/quic_sent_packet_manager_test.cc | #include "quiche/quic/core/quic_sent_packet_manager.h"
#include <algorithm>
#include <cstddef>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "quiche/quic/core/congestion_control/general_loss_algorithm.h"
#include "quiche/quic/core/congestion_control/pacing_sender.h"
#include "quiche/quic/core/congestion_control/send_algorithm_interface.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/frames/quic_ack_frequency_frame.h"
#include "quiche/quic/core/proto/cached_network_parameters_proto.h"
#include "quiche/quic/core/quic_connection_stats.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_packet_number.h"
#include "quiche/quic/core/quic_transmission_info.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/common/print_elements.h"
namespace quic {
namespace {
static const int64_t kDefaultRetransmissionTimeMs = 500;
static const int64_t kMinHandshakeTimeoutMs = 10;
static const size_t kDefaultMaxTailLossProbes = 2;
static const float kPtoMultiplierWithoutRttSamples = 3;
inline bool ShouldForceRetransmission(TransmissionType transmission_type) {
return transmission_type == HANDSHAKE_RETRANSMISSION ||
transmission_type == PTO_RETRANSMISSION;
}
static const uint32_t kConservativeUnpacedBurst = 2;
static const uint32_t kNumProbeTimeoutsForPathDegradingDelay = 4;
}
#define ENDPOINT \
(unacked_packets_.perspective() == Perspective::IS_SERVER ? "Server: " \
: "Client: ")
QuicSentPacketManager::QuicSentPacketManager(
Perspective perspective, const QuicClock* clock, QuicRandom* random,
QuicConnectionStats* stats, CongestionControlType congestion_control_type)
: unacked_packets_(perspective),
clock_(clock),
random_(random),
stats_(stats),
debug_delegate_(nullptr),
network_change_visitor_(nullptr),
initial_congestion_window_(kInitialCongestionWindow),
loss_algorithm_(&uber_loss_algorithm_),
consecutive_crypto_retransmission_count_(0),
pending_timer_transmission_count_(0),
using_pacing_(false),
conservative_handshake_retransmits_(false),
largest_mtu_acked_(0),
handshake_finished_(false),
peer_max_ack_delay_(
QuicTime::Delta::FromMilliseconds(kDefaultPeerDelayedAckTimeMs)),
rtt_updated_(false),
acked_packets_iter_(last_ack_frame_.packets.rbegin()),
consecutive_pto_count_(0),
handshake_mode_disabled_(false),
handshake_packet_acked_(false),
zero_rtt_packet_acked_(false),
one_rtt_packet_acked_(false),
num_ptos_for_path_degrading_(kNumProbeTimeoutsForPathDegradingDelay),
ignore_pings_(false),
ignore_ack_delay_(false) {
SetSendAlgorithm(congestion_control_type);
}
QuicSentPacketManager::~QuicSentPacketManager() {}
void QuicSentPacketManager::SetFromConfig(const QuicConfig& config) {
const Perspective perspective = unacked_packets_.perspective();
if (config.HasReceivedInitialRoundTripTimeUs() &&
config.ReceivedInitialRoundTripTimeUs() > 0) {
if (!config.HasClientSentConnectionOption(kNRTT, perspective)) {
SetInitialRtt(QuicTime::Delta::FromMicroseconds(
config.ReceivedInitialRoundTripTimeUs()),
false);
}
} else if (config.HasInitialRoundTripTimeUsToSend() &&
config.GetInitialRoundTripTimeUsToSend() > 0) {
SetInitialRtt(QuicTime::Delta::FromMicroseconds(
config.GetInitialRoundTripTimeUsToSend()),
false);
}
if (config.HasReceivedMaxAckDelayMs()) {
peer_max_ack_delay_ =
QuicTime::Delta::FromMilliseconds(config.ReceivedMaxAckDelayMs());
}
if (GetQuicReloadableFlag(quic_can_send_ack_frequency) &&
perspective == Perspective::IS_SERVER) {
if (config.HasReceivedMinAckDelayMs()) {
peer_min_ack_delay_ =
QuicTime::Delta::FromMilliseconds(config.ReceivedMinAckDelayMs());
}
if (config.HasClientSentConnectionOption(kAFF1, perspective)) {
use_smoothed_rtt_in_ack_delay_ = true;
}
}
if (config.HasClientSentConnectionOption(kMAD0, perspective)) {
ignore_ack_delay_ = true;
}
if (config.HasClientRequestedIndependentOption(kTBBR, perspective)) {
SetSendAlgorithm(kBBR);
}
if (GetQuicReloadableFlag(quic_allow_client_enabled_bbr_v2) &&
config.HasClientRequestedIndependentOption(kB2ON, perspective)) {
QUIC_RELOADABLE_FLAG_COUNT(quic_allow_client_enabled_bbr_v2);
SetSendAlgorithm(kBBRv2);
}
if (config.HasClientRequestedIndependentOption(kRENO, perspective)) {
SetSendAlgorithm(kRenoBytes);
} else if (config.HasClientRequestedIndependentOption(kBYTE, perspective) ||
(GetQuicReloadableFlag(quic_default_to_bbr) &&
config.HasClientRequestedIndependentOption(kQBIC, perspective))) {
SetSendAlgorithm(kCubicBytes);
}
if (config.HasClientRequestedIndependentOption(kIW03, perspective)) {
initial_congestion_window_ = 3;
send_algorithm_->SetInitialCongestionWindowInPackets(3);
}
if (config.HasClientRequestedIndependentOption(kIW10, perspective)) {
initial_congestion_window_ = 10;
send_algorithm_->SetInitialCongestionWindowInPackets(10);
}
if (config.HasClientRequestedIndependentOption(kIW20, perspective)) {
initial_congestion_window_ = 20;
send_algorithm_->SetInitialCongestionWindowInPackets(20);
}
if (config.HasClientRequestedIndependentOption(kIW50, perspective)) {
initial_congestion_window_ = 50;
send_algorithm_->SetInitialCongestionWindowInPackets(50);
}
if (config.HasClientRequestedIndependentOption(kBWS5, perspective)) {
initial_congestion_window_ = 10;
send_algorithm_->SetInitialCongestionWindowInPackets(10);
}
if (config.HasClientRequestedIndependentOption(kIGNP, perspective)) {
ignore_pings_ = true;
}
using_pacing_ = !GetQuicFlag(quic_disable_pacing_for_perf_tests);
if (config.HasClientRequestedIndependentOption(kILD0, perspective)) {
uber_loss_algorithm_.SetReorderingShift(kDefaultIetfLossDelayShift);
uber_loss_algorithm_.DisableAdaptiveReorderingThreshold();
}
if (config.HasClientRequestedIndependentOption(kILD1, perspective)) {
uber_loss_algorithm_.SetReorderingShift(kDefaultLossDelayShift);
uber_loss_algorithm_.DisableAdaptiveReorderingThreshold();
}
if (config.HasClientRequestedIndependentOption(kILD2, perspective)) {
uber_loss_algorithm_.EnableAdaptiveReorderingThreshold();
uber_loss_algorithm_.SetReorderingShift(kDefaultIetfLossDelayShift);
}
if (config.HasClientRequestedIndependentOption(kILD3, perspective)) {
uber_loss_algorithm_.SetReorderingShift(kDefaultLossDelayShift);
uber_loss_algorithm_.EnableAdaptiveReorderingThreshold();
}
if (config.HasClientRequestedIndependentOption(kILD4, perspective)) {
uber_loss_algorithm_.SetReorderingShift(kDefaultLossDelayShift);
uber_loss_algorithm_.EnableAdaptiveReorderingThreshold();
uber_loss_algorithm_.EnableAdaptiveTimeThreshold();
}
if (config.HasClientRequestedIndependentOption(kRUNT, perspective)) {
uber_loss_algorithm_.DisablePacketThresholdForRuntPackets();
}
if (config.HasClientSentConnectionOption(kCONH, perspective)) {
conservative_handshake_retransmits_ = true;
}
if (config.HasClientSentConnectionOption(kRNIB, perspective)) {
pacing_sender_.set_remove_non_initial_burst();
}
send_algorithm_->SetFromConfig(config, perspective);
loss_algorithm_->SetFromConfig(config, perspective);
if (network_change_visitor_ != nullptr) {
network_change_visitor_->OnCongestionChange();
}
if (debug_delegate_ != nullptr) {
DebugDelegate::SendParameters parameters;
parameters.congestion_control_type =
send_algorithm_->GetCongestionControlType();
parameters.use_pacing = using_pacing_;
parameters.initial_congestion_window = initial_congestion_window_;
debug_delegate_->OnConfigProcessed(parameters);
}
}
void QuicSentPacketManager::ApplyConnectionOptions(
const QuicTagVector& connection_options) {
std::optional<CongestionControlType> cc_type;
if (ContainsQuicTag(connection_options, kB2ON)) {
cc_type = kBBRv2;
} else if (ContainsQuicTag(connection_options, kTBBR)) {
cc_type = kBBR;
} else if (ContainsQuicTag(connection_options, kRENO)) {
cc_type = kRenoBytes;
} else if (ContainsQuicTag(connection_options, kQBIC)) {
cc_type = kCubicBytes;
}
if (cc_type.has_value()) {
SetSendAlgorithm(*cc_type);
}
send_algorithm_->ApplyConnectionOptions(connection_options);
}
void QuicSentPacketManager::ResumeConnectionState(
const CachedNetworkParameters& cached_network_params,
bool max_bandwidth_resumption) {
QuicBandwidth bandwidth = QuicBandwidth::FromBytesPerSecond(
max_bandwidth_resumption
? cached_network_params.max_bandwidth_estimate_bytes_per_second()
: cached_network_params.bandwidth_estimate_bytes_per_second());
QuicTime::Delta rtt =
QuicTime::Delta::FromMilliseconds(cached_network_params.min_rtt_ms());
SendAlgorithmInterface::NetworkParams params(
bandwidth, rtt, false);
params.is_rtt_trusted = true;
AdjustNetworkParameters(params);
}
void QuicSentPacketManager::AdjustNetworkParameters(
const SendAlgorithmInterface::NetworkParams& params) {
const QuicBandwidth& bandwidth = params.bandwidth;
const QuicTime::Delta& rtt = params.rtt;
if (!rtt.IsZero()) {
if (params.is_rtt_trusted) {
SetInitialRtt(rtt, true);
} else if (rtt_stats_.initial_rtt() ==
QuicTime::Delta::FromMilliseconds(kInitialRttMs)) {
SetInitialRtt(rtt, false);
}
}
const QuicByteCount old_cwnd = send_algorithm_->GetCongestionWindow();
if (GetQuicReloadableFlag(quic_conservative_bursts) && using_pacing_ &&
!bandwidth.IsZero()) {
QUIC_RELOADABLE_FLAG_COUNT(quic_conservative_bursts);
pacing_sender_.SetBurstTokens(kConservativeUnpacedBurst);
}
send_algorithm_->AdjustNetworkParameters(params);
if (debug_delegate_ != nullptr) {
debug_delegate_->OnAdjustNetworkParameters(
bandwidth, rtt.IsZero() ? rtt_stats_.MinOrInitialRtt() : rtt, old_cwnd,
send_algorithm_->GetCongestionWindow());
}
}
void QuicSentPacketManager::SetLossDetectionTuner(
std::unique_ptr<LossDetectionTunerInterface> tuner) {
uber_loss_algorithm_.SetLossDetectionTuner(std::move(tuner));
}
void QuicSentPacketManager::OnConfigNegotiated() {
loss_algorithm_->OnConfigNegotiated();
}
void QuicSentPacketManager::OnConnectionClosed() {
loss_algorithm_->OnConnectionClosed();
}
void QuicSentPacketManager::SetHandshakeConfirmed() {
if (!handshake_finished_) {
handshake_finished_ = true;
NeuterHandshakePackets();
}
}
void QuicSentPacketManager::PostProcessNewlyAckedPackets(
QuicPacketNumber ack_packet_number, EncryptionLevel ack_decrypted_level,
const QuicAckFrame& ack_frame, QuicTime ack_receive_time, bool rtt_updated,
QuicByteCount prior_bytes_in_flight,
std::optional<QuicEcnCounts> ecn_counts) {
unacked_packets_.NotifyAggregatedStreamFrameAcked(
last_ack_frame_.ack_delay_time);
InvokeLossDetection(ack_receive_time);
MaybeInvokeCongestionEvent(
rtt_updated, prior_bytes_in_flight, ack_receive_time, ecn_counts,
peer_ack_ecn_counts_[QuicUtils::GetPacketNumberSpace(
ack_decrypted_level)]);
unacked_packets_.RemoveObsoletePackets();
sustained_bandwidth_recorder_.RecordEstimate(
send_algorithm_->InRecovery(), send_algorithm_->InSlowStart(),
send_algorithm_->BandwidthEstimate(), ack_receive_time, clock_->WallNow(),
rtt_stats_.smoothed_rtt());
if (rtt_updated) {
if (consecutive_pto_count_ >
stats_->max_consecutive_rto_with_forward_progress) {
stats_->max_consecutive_rto_with_forward_progress =
consecutive_pto_count_;
}
consecutive_pto_count_ = 0;
consecutive_crypto_retransmission_count_ = 0;
}
if (debug_delegate_ != nullptr) {
debug_delegate_->OnIncomingAck(
ack_packet_number, ack_decrypted_level, ack_frame, ack_receive_time,
LargestAcked(ack_frame), rtt_updated, GetLeastUnacked());
}
last_ack_frame_.packets.RemoveUpTo(unacked_packets_.GetLeastUnacked());
last_ack_frame_.received_packet_times.clear();
}
void QuicSentPacketManager::MaybeInvokeCongestionEvent(
bool rtt_updated, QuicByteCount prior_in_flight, QuicTime event_time,
std::optional<QuicEcnCounts> ecn_counts,
const QuicEcnCounts& previous_counts) {
if (!rtt_updated && packets_acked_.empty() && packets_lost_.empty()) {
return;
}
const bool overshooting_detected =
stats_->overshooting_detected_with_network_parameters_adjusted;
QuicPacketCount newly_acked_ect = 0, newly_acked_ce = 0;
if (ecn_counts.has_value()) {
QUICHE_DCHECK(GetQuicRestartFlag(quic_support_ect1));
newly_acked_ect = ecn_counts->ect1 - previous_counts.ect1;
if (newly_acked_ect == 0) {
newly_acked_ect = ecn_counts->ect0 - previous_counts.ect0;
} else {
QUIC_BUG_IF(quic_bug_518619343_04,
ecn_counts->ect0 - previous_counts.ect0)
<< "Sent ECT(0) and ECT(1) newly acked in the same ACK.";
}
newly_acked_ce = ecn_counts->ce - previous_counts.ce;
}
if (using_pacing_) {
pacing_sender_.OnCongestionEvent(rtt_updated, prior_in_flight, event_time,
packets_acked_, packets_lost_,
newly_acked_ect, newly_acked_ce);
} else {
send_algorithm_->OnCongestionEvent(rtt_updated, prior_in_flight, event_time,
packets_acked_, packets_lost_,
newly_acked_ect, newly_acked_ce);
}
if (debug_delegate_ != nullptr && !overshooting_detected &&
stats_->overshooting_detected_with_network_parameters_adjusted) {
debug_delegate_->OnOvershootingDetected();
}
packets_acked_.clear();
packets_lost_.clear();
if (network_change_visitor_ != nullptr) {
network_change_visitor_->OnCongestionChange();
}
}
void QuicSentPacketManager::MarkInitialPacketsForRetransmission() {
if (unacked_packets_.empty()) {
return;
}
QuicPacketNumber packet_number = unacked_packets_.GetLeastUnacked();
QuicPacketNumber largest_sent_packet = unacked_packets_.largest_sent_packet();
for (; packet_number <= largest_sent_packet; ++packet_number) {
QuicTransmissionInfo* transmission_info =
unacked_packets_.GetMutableTransmissionInfo(packet_number);
if (transmission_info->encryption_level == ENCRYPTION_INITIAL) {
if (transmission_info->in_flight) {
unacked_packets_.RemoveFromInFlight(transmission_info);
}
if (unacked_packets_.HasRetransmittableFrames(*transmission_info)) {
MarkForRetransmission(packet_number, ALL_INITIAL_RETRANSMISSION);
}
}
}
}
void QuicSentPacketManager::MarkZeroRttPacketsForRetransmission() {
if (unacked_packets_.empty()) {
return;
}
QuicPacketNumber packet_number = unacked_packets_.GetLeastUnacked();
QuicPacketNumber largest_sent_packet = unacked_packets_.largest_sent_packet();
for (; packet_number <= largest_sent_packet; ++packet_number) {
QuicTransmissionInfo* transmission_info =
unacked_packets_.GetMutableTransmissionInfo(packet_number);
if (transmission_info->encryption_level == ENCRYPTION_ZERO_RTT) {
if (transmission_info->in_flight) {
unacked_packets_.RemoveFromInFlight(transmission_info);
}
if (unacked_packets_.HasRetransmittableFrames(*transmission_info)) {
MarkForRetransmission(packet_number, ALL_ZERO_RTT_RETRANSMISSION);
}
}
}
}
void QuicSentPacketManager::NeuterUnencryptedPackets() {
for (QuicPacketNumber packet_number :
unacked_packets_.NeuterUnencryptedPackets()) {
send_algorithm_->OnPacketNeutered(packet_number);
}
if (handshake_mode_disabled_) {
consecutive_pto_count_ = 0;
uber_loss_algorithm_.ResetLossDetection(INITIAL_DATA);
}
}
void QuicSentPacketManager::NeuterHandshakePackets() {
for (QuicPacketNumber packet_number :
unacked_packets_.NeuterHandshakePackets()) {
send_algorithm_->OnPacketNeutered(packet_number);
}
if (handshake_mode_disabled_) {
consecutive_pto_count_ = 0;
uber_loss_algorithm_.ResetLossDetection(HANDSHAKE_DATA);
}
}
bool QuicSentPacketManager::ShouldAddMaxAckDelay(
PacketNumberSpace space) const {
return !supports_multiple_packet_number_spaces() || space == APPLICATION_DATA;
}
QuicTime QuicSentPacketManager::GetEarliestPacketSentTimeForPto(
PacketNumberSpace* packet_number_space) const {
QUICHE_DCHECK(supports_multiple_packet_number_spaces());
QuicTime earliest_sent_time = QuicTime::Zero();
for (int8_t i = 0; i < NUM_PACKET_NUMBER_SPACES; ++i) {
const QuicTime sent_time = unacked_packets_.GetLastInFlightPacketSentTime(
static_cast<PacketNumberSpace>(i));
if (!handshake_finished_ && i == APPLICATION_DATA) {
continue;
}
if (!sent_time.IsInitialized() || (earliest_sent_time.IsInitialized() &&
earliest_sent_time <= sent_time)) {
continue;
}
earliest_sent_time = sent_time;
*packet_number_space = static_cast<PacketNumberSpace>(i);
}
return earliest_sent_time;
}
void QuicSentPacketManager::MarkForRetransmission(
QuicPacketNumber packet_number, TransmissionType transmission_type) {
QuicTransmissionInfo* transmission_info =
unacked_packets_.GetMutableTransmissionInfo(packet_number);
QUIC_BUG_IF(quic_bug_12552_2, transmission_type != LOSS_RETRANSMISSION &&
!unacked_packets_.HasRetransmittableFrames(
*transmission_info))
<< "packet number " << packet_number
<< " transmission_type: " << transmission_type << " transmission_info "
<< transmission_info->DebugString();
if (ShouldForceRetransmission(transmission_type)) {
if (!unacked_packets_.RetransmitFrames(
QuicFrames(transmission_info->retransmittable_frames),
transmission_type)) {
QUIC_CODE_COUNT(quic_retransmit_frames_failed);
return;
}
QUIC_CODE_COUNT(quic_retransmit_frames_succeeded);
} else {
unacked_packets_.NotifyFramesLost(*transmission_info, transmission_type);
if (!transmission_info->retransmittable_frames.empty()) {
if (transmission_type == LOSS_RETRANSMISSION) {
transmission_info->first_sent_after_loss =
unacked_packets_.largest_sent_packet() + 1;
} else {
transmission_info->first_sent_after_loss.Clear();
}
}
}
transmission_info =
unacked_packets_.GetMutableTransmissionInfo(packet_number);
transmission_info->state =
QuicUtils::RetransmissionTypeToPacketState(transmission_type);
}
void QuicSentPacketManager::RecordOneSpuriousRetransmission(
const QuicTransmissionInfo& info) {
stats_->bytes_spuriously_retransmitted += info.bytes_sent;
++stats_->packets_spuriously_retransmitted;
if (debug_delegate_ != nullptr) {
debug_delegate_->OnSpuriousPacketRetransmission(info.transmission_type,
info.bytes_sent);
}
}
void QuicSentPacketManager::MarkPacketHandled(QuicPacketNumber packet_number,
QuicTransmissionInfo* info,
QuicTime ack_receive_time,
QuicTime::Delta ack_delay_time,
QuicTime receive_timestamp) {
if (info->has_ack_frequency) {
for (const auto& frame : info->retransmittable_frames) {
if (frame.type == ACK_FREQUENCY_FRAME) {
OnAckFrequencyFrameAcked(*frame.ack_frequency_frame);
}
}
}
if (info->transmission_type == NOT_RETRANSMISSION) {
unacked_packets_.MaybeAggregateAckedStreamFrame(*info, ack_delay_time,
receive_timestamp);
} else {
unacked_packets_.NotifyAggregatedStreamFrameAcked(ack_delay_time);
const bool new_data_acked = unacked_packets_.NotifyFramesAcked(
*info, ack_delay_time, receive_timestamp);
if (!new_data_acked && info->transmission_type != NOT_RETRANSMISSION) {
QUIC_DVLOG(1) << "Detect spurious retransmitted packet " << packet_number
<< " transmission type: " << info->transmission_type;
RecordOneSpuriousRetransmission(*info);
}
}
if (info->state == LOST) {
const PacketNumberSpace packet_number_space =
unacked_packets_.GetPacketNumberSpace(info->encryption_level);
const QuicPacketNumber previous_largest_acked =
supports_multiple_packet_number_spaces()
? unacked_packets_.GetLargestAckedOfPacketNumberSpace(
packet_number_space)
: unacked_packets_.largest_acked();
QUIC_DVLOG(1) << "Packet " << packet_number
<< " was detected lost spuriously, "
"previous_largest_acked: "
<< previous_largest_acked;
loss_algorithm_->SpuriousLossDetected(unacked_packets_, rtt_stats_,
ack_receive_time, packet_number,
previous_largest_acked);
++stats_->packet_spuriously_detected_lost;
}
if (network_change_visitor_ != nullptr &&
info->bytes_sent > largest_mtu_acked_) {
largest_mtu_acked_ = info->bytes_sent;
network_change_visitor_->OnPathMtuIncreased(largest_mtu_acked_);
}
unacked_packets_.RemoveFromInFlight(info);
unacked_packets_.RemoveRetransmittability(info);
info->state = ACKED;
}
bool QuicSentPacketManager::CanSendAckFrequency() const {
return !peer_min_ack_delay_.IsInfinite() && handshake_finished_;
}
QuicAckFrequencyFrame QuicSentPacketManager::GetUpdatedAckFrequencyFrame()
const {
QuicAckFrequencyFrame frame;
if (!CanSendAckFrequency()) {
QUIC_BUG(quic_bug_10750_1)
<< "New AckFrequencyFrame is created while it shouldn't.";
return frame;
}
QUIC_RELOADABLE_FLAG_COUNT_N(quic_can_send_ack_frequency, 1, 3);
frame.packet_tolerance = kMaxRetransmittablePacketsBeforeAck;
auto rtt = use_smoothed_rtt_in_ack_delay_ ? rtt_stats_.SmoothedOrInitialRtt()
: rtt_stats_.MinOrInitialRtt();
frame.max_ack_delay = rtt * kPeerAckDecimationDelay;
frame.max_ack_delay = std::max(frame.max_ack_delay, peer_min_ack_delay_);
frame.max_ack_delay =
std::max(frame.max_ack_delay,
QuicTime::Delta::FromMilliseconds(kDefaultMinAckDelayTimeMs));
return frame;
}
void QuicSentPacketManager::RecordEcnMarkingSent(QuicEcnCodepoint ecn_codepoint,
EncryptionLevel level) {
PacketNumberSpace space = QuicUtils::GetPacketNumberSpace(level);
switch (ecn_codepoint) {
case ECN_NOT_ECT:
break;
case ECN_ECT0:
++ect0_packets_sent_[space];
break;
case ECN_ECT1:
++ect1_packets_sent_[space];
break;
case ECN_CE:
++ect0_packets_sent_[space];
++ect1_packets_sent_[space];
break;
}
}
bool QuicSentPacketManager::OnPacketSent(
SerializedPacket* mutable_packet, QuicTime sent_time,
TransmissionType transmission_type,
HasRetransmittableData has_retransmittable_data, bool measure_rtt,
QuicEcnCodepoint ecn_codepoint) {
const SerializedPacket& packet = *mutable_packet;
QuicPacketNumber packet_number = packet.packet_number;
QUICHE_DCHECK_LE(FirstSendingPacketNumber(), packet_number);
QUICHE_DCHECK(!unacked_packets_.IsUnacked(packet_number));
QUIC_BUG_IF(quic_bug_10750_2, packet.encrypted_length == 0)
<< "Cannot send empty packets.";
if (pending_timer_transmission_count_ > 0) {
--pending_timer_transmission_count_;
}
bool in_flight = has_retransmittable_data == HAS_RETRANSMITTABLE_DATA;
if (ignore_pings_ && mutable_packet->retransmittable_frames.size() == 1 &&
mutable_packet->retransmittable_frames[0].type == PING_FRAME) {
in_flight = false;
measure_rtt = false;
}
if (using_pacing_) {
pacing_sender_.OnPacketSent(sent_time, unacked_packets_.bytes_in_flight(),
packet_number, packet.encrypted_length,
has_retransmittable_data);
} else {
send_algorithm_->OnPacketSent(sent_time, unacked_packets_.bytes_in_flight(),
packet_number, packet.encrypted_length,
has_retransmittable_data);
}
if (packet.has_message) {
for (auto& frame : mutable_packet->retransmittable_frames) {
if (frame.type == MESSAGE_FRAME) {
frame.message_frame->message_data.clear();
frame.message_frame->message_length = 0;
}
}
}
if (packet.has_ack_frequency) {
for (const auto& frame : packet.retransmittable_frames) {
if (frame.type == ACK_FREQUENCY_FRAME) {
OnAckFrequencyFrameSent(*frame.ack_frequency_frame);
}
}
}
RecordEcnMarkingSent(ecn_codepoint, packet.encryption_level);
unacked_packets_.AddSentPacket(mutable_packet, transmission_type, sent_time,
in_flight, measure_rtt, ecn_codepoint);
return in_flight;
}
const QuicTransmissionInfo& QuicSentPacketManager::AddDispatcherSentPacket(
const DispatcherSentPacket& packet) {
QUIC_DVLOG(1) << "QuicSPM: Adding dispatcher sent packet "
<< packet.packet_number << ", size: " << packet.bytes_sent
<< ", sent_time: " << packet.sent_time
<< ", largest_acked: " << packet.largest_acked;
if (using_pacing_) {
pacing_sender_.OnPacketSent(
packet.sent_time, unacked_packets_.bytes_in_flight(),
packet.packet_number, packet.bytes_sent, NO_RETRANSMITTABLE_DATA);
} else {
send_algorithm_->OnPacketSent(
packet.sent_time, unacked_packets_.bytes_in_flight(),
packet.packet_number, packet.bytes_sent, NO_RETRANSMITTABLE_DATA);
}
return unacked_packets_.AddDispatcherSentPacket(packet);
}
QuicSentPacketManager::RetransmissionTimeoutMode
QuicSentPacketManager::OnRetransmissionTimeout() {
QUICHE_DCHECK(unacked_packets_.HasInFlightPackets() ||
(handshake_mode_disabled_ && !handshake_finished_));
QUICHE_DCHECK_EQ(0u, pending_timer_transmission_count_);
switch (GetRetransmissionMode()) {
case HANDSHAKE_MODE:
QUICHE_DCHECK(!handshake_mode_disabled_);
++stats_->crypto_retransmit_count;
RetransmitCryptoPackets();
return HANDSHAKE_MODE;
case LOSS_MODE: {
++stats_->loss_timeout_count;
QuicByteCount prior_in_flight = unacked_packets_.bytes_in_flight();
const QuicTime now = clock_->Now();
InvokeLossDetection(now);
MaybeInvokeCongestionEvent(false, prior_in_flight, now,
std::optional<QuicEcnCounts>(),
peer_ack_ecn_counts_[APPLICATION_DATA]);
return LOSS_MODE;
}
case PTO_MODE:
QUIC_DVLOG(1) << ENDPOINT << "PTO mode";
++stats_->pto_count;
if (handshake_mode_disabled_ && !handshake_finished_) {
++stats_->crypto_retransmit_count;
}
++consecutive_pto_count_;
pending_timer_transmission_count_ = 1;
return PTO_MODE;
}
QUIC_BUG(quic_bug_10750_3)
<< "Unknown retransmission mode " << GetRetransmissionMode();
return GetRetransmissionMode();
}
void QuicSentPacketManager::RetransmitCryptoPackets() {
QUICHE_DCHECK_EQ(HANDSHAKE_MODE, GetRetransmissionMode());
++consecutive_crypto_retransmission_count_;
bool packet_retransmitted = false;
std::vector<QuicPacketNumber> crypto_retransmissions;
if (!unacked_packets_.empty()) {
QuicPacketNumber packet_number = unacked_packets_.GetLeastUnacked();
QuicPacketNumber largest_sent_packet =
unacked_packets_.largest_sent_packet();
for (; packet_number <= largest_sent_packet; ++packet_number) {
QuicTransmissionInfo* transmission_info =
unacked_packets_.GetMutableTransmissionInfo(packet_number);
if (!transmission_info->in_flight ||
transmission_info->state != OUTSTANDING ||
!transmission_info->has_crypto_handshake ||
!unacked_packets_.HasRetransmittableFrames(*transmission_info)) {
continue;
}
packet_retransmitted = true;
crypto_retransmissions.push_back(packet_number);
++pending_timer_transmission_count_;
}
}
QUICHE_DCHECK(packet_retransmitted)
<< "No crypto packets found to retransmit.";
for (QuicPacketNumber retransmission : crypto_retransmissions) {
MarkForRetransmission(retransmission, HANDSHAKE_RETRANSMISSION);
}
}
bool QuicSentPacketManager::MaybeRetransmitOldestPacket(TransmissionType type) {
if (!unacked_packets_.empty()) {
QuicPacketNumber packet_number = unacked_packets_.GetLeastUnacked();
QuicPacketNumber largest_sent_packet =
unacked_packets_.largest_sent_packet();
for (; packet_number <= largest_sent_packet; ++packet_number) {
QuicTransmissionInfo* transmission_info =
unacked_packets_.GetMutableTransmissionInfo(packet_number);
if (!transmission_info->in_flight ||
transmission_info->state != OUTSTANDING ||
!unacked_packets_.HasRetransmittableFrames(*transmission_info)) {
continue;
}
MarkForRetransmission(packet_number, type);
return true;
}
}
QUIC_DVLOG(1)
<< "No retransmittable packets, so RetransmitOldestPacket failed.";
return false;
}
void QuicSentPacketManager::MaybeSendProbePacket() {
if (pending_timer_transmission_count_ == 0) {
return;
}
PacketNumberSpace packet_number_space;
if (supports_multiple_packet_number_spaces()) {
if (!GetEarliestPacketSentTimeForPto(&packet_number_space)
.IsInitialized()) {
QUIC_BUG_IF(quic_earliest_sent_time_not_initialized,
unacked_packets_.perspective() == Perspective::IS_SERVER)
<< "earliest_sent_time not initialized when trying to send PTO "
"retransmissions";
return;
}
}
std::vector<QuicPacketNumber> probing_packets;
if (!unacked_packets_.empty()) {
QuicPacketNumber packet_number = unacked_packets_.GetLeastUnacked();
QuicPacketNumber largest_sent_packet =
unacked_packets_.largest_sent_packet();
for (; packet_number <= largest_sent_packet; ++packet_number) {
QuicTransmissionInfo* transmission_info =
unacked_packets_.GetMutableTransmissionInfo(packet_number);
if (transmission_info->state == OUTSTANDING &&
unacked_packets_.HasRetransmittableFrames(*transmission_info) &&
(!supports_multiple_packet_number_spaces() ||
unacked_packets_.GetPacketNumberSpace(
transmission_info->encryption_level) == packet_number_space)) {
QUICHE_DCHECK(transmission_info->in_flight);
probing_packets.push_back(packet_number);
if (probing_packets.size() == pending_timer_transmission_count_) {
break;
}
}
}
}
for (QuicPacketNumber retransmission : probing_packets) {
QUIC_DVLOG(1) << ENDPOINT << "Marking " << retransmission
<< " for probing retransmission";
MarkForRetransmission(retransmission, PTO_RETRANSMISSION);
}
}
void QuicSentPacketManager::EnableIetfPtoAndLossDetection() {
handshake_mode_disabled_ = true;
}
void QuicSentPacketManager::RetransmitDataOfSpaceIfAny(
PacketNumberSpace space) {
QUICHE_DCHECK(supports_multiple_packet_number_spaces());
if (!unacked_packets_.GetLastInFlightPacketSentTime(space).IsInitialized()) {
return;
}
if (unacked_packets_.empty()) {
return;
}
QuicPacketNumber packet_number = unacked_packets_.GetLeastUnacked();
QuicPacketNumber largest_sent_packet = unacked_packets_.largest_sent_packet();
for (; packet_number <= largest_sent_packet; ++packet_number) {
QuicTransmissionInfo* transmission_info =
unacked_packets_.GetMutableTransmissionInfo(packet_number);
if (transmission_info->state == OUTSTANDING &&
unacked_packets_.HasRetransmittableFrames(*transmission_info) &&
unacked_packets_.GetPacketNumberSpace(
transmission_info->encryption_level) == space) {
QUICHE_DCHECK(transmission_info->in_flight);
if (pending_timer_transmission_count_ == 0) {
pending_timer_transmission_count_ = 1;
}
MarkForRetransmission(packet_number, PTO_RETRANSMISSION);
return;
}
}
}
QuicSentPacketManager::RetransmissionTimeoutMode
QuicSentPacketManager::GetRetransmissionMode() const {
QUICHE_DCHECK(unacked_packets_.HasInFlightPackets() ||
(handshake_mode_disabled_ && !handshake_finished_));
if (!handshake_mode_disabled_ && !handshake_finished_ &&
unacked_packets_.HasPendingCryptoPackets()) {
return HANDSHAKE_MODE;
}
if (loss_algorithm_->GetLossTimeout() != QuicTime::Zero()) {
return LOSS_MODE;
}
return PTO_MODE;
}
void QuicSentPacketManager::InvokeLossDetection(QuicTime time) {
if (!packets_acked_.empty()) {
QUICHE_DCHECK_LE(packets_acked_.front().packet_number,
packets_acked_.back().packet_number);
largest_newly_acked_ = packets_acked_.back().packet_number;
}
LossDetectionInterface::DetectionStats detection_stats =
loss_algorithm_->DetectLosses(unacked_packets_, time, rtt_stats_,
largest_newly_acked_, packets_acked_,
&packets_lost_);
if (detection_stats.sent_packets_max_sequence_reordering >
stats_->sent_packets_max_sequence_reordering) {
stats_->sent_packets_max_sequence_reordering =
detection_stats.sent_packets_max_sequence_reordering;
}
stats_->sent_packets_num_borderline_time_reorderings +=
detection_stats.sent_packets_num_borderline_time_reorderings;
stats_->total_loss_detection_response_time +=
detection_stats.total_loss_detection_response_time;
for (const LostPacket& packet : packets_lost_) {
QuicTransmissionInfo* info =
unacked_packets_.GetMutableTransmissionInfo(packet.packet_number);
++stats_->packets_lost;
if (debug_delegate_ != nullptr) {
debug_delegate_->OnPacketLoss(packet.packet_number,
info->encryption_level, LOSS_RETRANSMISSION,
time);
}
unacked_packets_.RemoveFromInFlight(info);
MarkForRetransmission(packet.packet_number, LOSS_RETRANSMISSION);
}
}
bool QuicSentPacketManager::MaybeUpdateRTT(QuicPacketNumber largest_acked,
QuicTime::Delta ack_delay_time,
QuicTime ack_receive_time) {
if (!unacked_packets_.IsUnacked(largest_acked)) {
return false;
}
const QuicTransmissionInfo& transmission_info =
unacked_packets_.GetTransmissionInfo(largest_acked);
if (transmission_info.sent_time == QuicTime::Zero()) {
QUIC_BUG(quic_bug_10750_4)
<< "Acked packet has zero sent time, largest_acked:" << largest_acked;
return false;
}
if (transmission_info.state == NOT_CONTRIBUTING_RTT) {
return false;
}
if (transmission_info.sent_time > ack_receive_time) {
QUIC_CODE_COUNT(quic_receive_acked_before_sending);
}
QuicTime::Delta send_delta = ack_receive_time - transmission_info.sent_time;
const bool min_rtt_available = !rtt_stats_.min_rtt().IsZero();
rtt_stats_.UpdateRtt(send_delta, ack_delay_time, ack_receive_time);
if (!min_rtt_available && !rtt_stats_.min_rtt().IsZero()) {
loss_algorithm_->OnMinRttAvailable();
}
return true;
}
QuicTime::Delta QuicSentPacketManager::TimeUntilSend(QuicTime now) const {
if (pending_timer_transmission_count_ > 0) {
return QuicTime::Delta::Zero();
}
if (using_pacing_) {
return pacing_sender_.TimeUntilSend(now,
unacked_packets_.bytes_in_flight());
}
return send_algorithm_->CanSend(unacked_packets_.bytes_in_flight())
? QuicTime::Delta::Zero()
: QuicTime::Delta::Infinite();
}
const QuicTime QuicSentPacketManager::GetRetransmissionTime() const {
if (!unacked_packets_.HasInFlightPackets() &&
PeerCompletedAddressValidation()) {
return QuicTime::Zero();
}
if (pending_timer_transmission_count_ > 0) {
return QuicTime::Zero();
}
switch (GetRetransmissionMode()) {
case HANDSHAKE_MODE:
return unacked_packets_.GetLastCryptoPacketSentTime() +
GetCryptoRetransmissionDelay();
case LOSS_MODE:
return loss_algorithm_->GetLossTimeout();
case PTO_MODE: {
if (!supports_multiple_packet_number_spaces()) {
if (unacked_packets_.HasInFlightPackets() &&
consecutive_pto_count_ == 0) {
return std::max(
clock_->ApproximateNow(),
std::max(unacked_packets_.GetFirstInFlightTransmissionInfo()
->sent_time +
GetProbeTimeoutDelay(NUM_PACKET_NUMBER_SPACES),
unacked_packets_.GetLastInFlightPacketSentTime() +
kFirstPtoSrttMultiplier *
rtt_stats_.SmoothedOrInitialRtt()));
}
return std::max(clock_->ApproximateNow(),
unacked_packets_.GetLastInFlightPacketSentTime() +
GetProbeTimeoutDelay(NUM_PACKET_NUMBER_SPACES));
}
PacketNumberSpace packet_number_space = NUM_PACKET_NUMBER_SPACES;
QuicTime earliest_right_edge =
GetEarliestPacketSentTimeForPto(&packet_number_space);
if (!earliest_right_edge.IsInitialized()) {
earliest_right_edge = clock_->ApproximateNow();
}
if (packet_number_space == APPLICATION_DATA &&
consecutive_pto_count_ == 0) {
const QuicTransmissionInfo* first_application_info =
unacked_packets_.GetFirstInFlightTransmissionInfoOfSpace(
APPLICATION_DATA);
if (first_application_info != nullptr) {
return std::max(
clock_->ApproximateNow(),
std::max(
first_application_info->sent_time +
GetProbeTimeoutDelay(packet_number_space),
earliest_right_edge + kFirstPtoSrttMultiplier *
rtt_stats_.SmoothedOrInitialRtt()));
}
}
return std::max(
clock_->ApproximateNow(),
earliest_right_edge + GetProbeTimeoutDelay(packet_number_space));
}
}
QUICHE_DCHECK(false);
return QuicTime::Zero();
}
const QuicTime::Delta QuicSentPacketManager::GetPathDegradingDelay() const {
QUICHE_DCHECK_GT(num_ptos_for_path_degrading_, 0);
return num_ptos_for_path_degrading_ * GetPtoDelay();
}
const QuicTime::Delta QuicSentPacketManager::GetNetworkBlackholeDelay(
int8_t num_rtos_for_blackhole_detection) const {
return GetNConsecutiveRetransmissionTimeoutDelay(
kDefaultMaxTailLossProbes + num_rtos_for_blackhole_detection);
}
QuicTime::Delta QuicSentPacketManager::GetMtuReductionDelay(
int8_t num_rtos_for_blackhole_detection) const {
return GetNetworkBlackholeDelay(num_rtos_for_blackhole_detection / 2);
}
const QuicTime::Delta QuicSentPacketManager::GetCryptoRetransmissionDelay()
const {
QuicTime::Delta srtt = rtt_stats_.SmoothedOrInitialRtt();
int64_t delay_ms;
if (conservative_handshake_retransmits_) {
delay_ms = std::max(peer_max_ack_delay_.ToMilliseconds(),
static_cast<int64_t>(2 * srtt.ToMilliseconds()));
} else {
delay_ms = std::max(kMinHandshakeTimeoutMs,
static_cast<int64_t>(1.5 * srtt.ToMilliseconds()));
}
return QuicTime::Delta::FromMilliseconds(
delay_ms << consecutive_crypto_retransmission_count_);
}
const QuicTime::Delta QuicSentPacketManager::GetProbeTimeoutDelay(
PacketNumberSpace space) const {
if (rtt_stats_.smoothed_rtt().IsZero()) {
QUIC_BUG_IF(quic_bug_12552_6, rtt_stats_.initial_rtt().IsZero());
return std::max(kPtoMultiplierWithoutRttSamples * rtt_stats_.initial_rtt(),
QuicTime::Delta::FromMilliseconds(kMinHandshakeTimeoutMs)) *
(1 << consecutive_pto_count_);
}
QuicTime::Delta pto_delay =
rtt_stats_.smoothed_rtt() +
std::max(kPtoRttvarMultiplier * rtt_stats_.mean_deviation(),
kAlarmGranularity) +
(ShouldAddMaxAckDelay(space) ? peer_max_ack_delay_
: QuicTime::Delta::Zero());
return pto_delay * (1 << consecutive_pto_count_);
}
QuicTime::Delta QuicSentPacketManager::GetSlowStartDuration() const {
if (send_algorithm_->GetCongestionControlType() == kBBR ||
send_algorithm_->GetCongestionControlType() == kBBRv2) {
return stats_->slowstart_duration.GetTotalElapsedTime(
clock_->ApproximateNow());
}
return QuicTime::Delta::Infinite();
}
QuicByteCount QuicSentPacketManager::GetAvailableCongestionWindowInBytes()
const {
QuicByteCount congestion_window = GetCongestionWindowInBytes();
QuicByteCount bytes_in_flight = GetBytesInFlight();
return congestion_window - std::min(congestion_window, bytes_in_flight);
}
std::string QuicSentPacketManager::GetDebugState() const {
return send_algorithm_->GetDebugState();
}
void QuicSentPacketManager::SetSendAlgorithm(
CongestionControlType congestion_control_type) {
if (send_algorithm_ &&
send_algorithm_->GetCongestionControlType() == congestion_control_type) {
return;
}
SetSendAlgorithm(SendAlgorithmInterface::Create(
clock_, &rtt_stats_, &unacked_packets_, congestion_control_type, random_,
stats_, initial_congestion_window_, send_algorithm_.get()));
}
void QuicSentPacketManager::SetSendAlgorithm(
SendAlgorithmInterface* send_algorithm) {
if (debug_delegate_ != nullptr && send_algorithm != nullptr) {
debug_delegate_->OnSendAlgorithmChanged(
send_algorithm->GetCongestionControlType());
}
send_algorithm_.reset(send_algorithm);
pacing_sender_.set_sender(send_algorithm);
}
std::unique_ptr<SendAlgorithmInterface>
QuicSentPacketManager::OnConnectionMigration(bool reset_send_algorithm) {
consecutive_pto_count_ = 0;
rtt_stats_.OnConnectionMigration();
if (!reset_send_algorithm) {
send_algorithm_->OnConnectionMigration();
return nullptr;
}
std::unique_ptr<SendAlgorithmInterface> old_send_algorithm =
std::move(send_algorithm_);
SetSendAlgorithm(old_send_algorithm->GetCongestionControlType());
QuicPacketNumber packet_number = unacked_packets_.GetLeastUnacked();
for (auto it = unacked_packets_.begin(); it != unacked_packets_.end();
++it, ++packet_number) {
if (it->in_flight) {
unacked_packets_.RemoveFromInFlight(packet_number);
if (unacked_packets_.HasRetransmittableFrames(packet_number)) {
MarkForRetransmission(packet_number, PATH_RETRANSMISSION);
QUICHE_DCHECK_EQ(it->state, NOT_CONTRIBUTING_RTT);
}
}
it->state = NOT_CONTRIBUTING_RTT;
}
return old_send_algorithm;
}
void QuicSentPacketManager::OnAckFrameStart(QuicPacketNumber largest_acked,
QuicTime::Delta ack_delay_time,
QuicTime ack_receive_time) {
QUICHE_DCHECK(packets_acked_.empty());
QUICHE_DCHECK_LE(largest_acked, unacked_packets_.largest_sent_packet());
if (!supports_multiple_packet_number_spaces() || handshake_finished_) {
if (ack_delay_time > peer_max_ack_delay()) {
ack_delay_time = peer_max_ack_delay();
}
if (ignore_ack_delay_) {
ack_delay_time = QuicTime::Delta::Zero();
}
}
rtt_updated_ =
MaybeUpdateRTT(largest_acked, ack_delay_time, ack_receive_time);
last_ack_frame_.ack_delay_time = ack_delay_time;
acked_packets_iter_ = last_ack_frame_.packets.rbegin();
}
void QuicSentPacketManager::OnAckRange(QuicPacketNumber start,
QuicPacketNumber end) {
if (!last_ack_frame_.largest_acked.IsInitialized() ||
end > last_ack_frame_.largest_acked + 1) {
unacked_packets_.IncreaseLargestAcked(end - 1);
last_ack_frame_.largest_acked = end - 1;
}
QuicPacketNumber least_unacked = unacked_packets_.GetLeastUnacked();
if (least_unacked.IsInitialized() && end <= least_unacked) {
return;
}
start = std::max(start, least_unacked);
do {
QuicPacketNumber newly_acked_start = start;
if (acked_packets_iter_ != last_ack_frame_.packets.rend()) {
newly_acked_start = std::max(start, acked_packets_iter_->max());
}
for (QuicPacketNumber acked = end - 1; acked >= newly_acked_start;
--acked) {
packets_acked_.push_back(AckedPacket(acked, 0, QuicTime::Zero()));
if (acked == FirstSendingPacketNumber()) {
break;
}
}
if (acked_packets_iter_ == last_ack_frame_.packets.rend() ||
start > acked_packets_iter_->min()) {
return;
}
end = std::min(end, acked_packets_iter_->min());
++acked_packets_iter_;
} while (start < end);
}
void QuicSentPacketManager::OnAckTimestamp(QuicPacketNumber packet_number,
QuicTime timestamp) {
last_ack_frame_.received_packet_times.push_back({packet_number, timestamp});
for (AckedPacket& packet : packets_acked_) {
if (packet.packet_number == packet_number) {
packet.receive_timestamp = timestamp;
return;
}
}
}
bool QuicSentPacketManager::IsEcnFeedbackValid(
PacketNumberSpace space, const std::optional<QuicEcnCounts>& ecn_counts,
QuicPacketCount newly_acked_ect0, QuicPacketCount newly_acked_ect1) {
if (!ecn_counts.has_value()) {
if (newly_acked_ect0 > 0 || newly_acked_ect1 > 0) {
QUIC_DVLOG(1) << ENDPOINT
<< "ECN packets acknowledged, no counts reported.";
return false;
}
return true;
}
if (ecn_counts->ect0 < peer_ack_ecn_counts_[space].ect0 ||
ecn_counts->ect1 < peer_ack_ecn_counts_[space].ect1 ||
ecn_counts->ce < peer_ack_ecn_counts_[space].ce) {
QUIC_DVLOG(1) << ENDPOINT << "Reported ECN count declined.";
return false;
}
if (ecn_counts->ect0 > ect0_packets_sent_[space] ||
ecn_counts->ect1 > ect1_packets_sent_[space] ||
(ecn_counts->ect0 + ecn_counts->ect1 + ecn_counts->ce >
ect0_packets_sent_[space] + ect1_packets_sent_[space])) {
QUIC_DVLOG(1) << ENDPOINT << "Reported ECT + CE exceeds packets sent:"
<< " reported " << ecn_counts->ToString() << " , ECT(0) sent "
<< ect0_packets_sent_[space] << " , ECT(1) sent "
<< ect1_packets_sent_[space];
return false;
}
if ((newly_acked_ect0 >
(ecn_counts->ect0 + ecn_counts->ce - peer_ack_ecn_counts_[space].ect0 +
peer_ack_ecn_counts_[space].ce)) ||
(newly_acked_ect1 >
(ecn_counts->ect1 + ecn_counts->ce - peer_ack_ecn_counts_[space].ect1 +
peer_ack_ecn_counts_[space].ce))) {
QUIC_DVLOG(1) << ENDPOINT
<< "Peer acked packet but did not report the ECN mark: "
<< " New ECN counts: " << ecn_counts->ToString()
<< " Old ECN counts: "
<< peer_ack_ecn_counts_[space].ToString()
<< " Newly acked ECT(0) : " << newly_acked_ect0
<< " Newly acked ECT(1) : " << newly_acked_ect1;
return false;
}
return true;
}
AckResult QuicSentPacketManager::OnAckFrameEnd(
QuicTime ack_receive_time, QuicPacketNumber ack_packet_number,
EncryptionLevel ack_decrypted_level,
const std::optional<QuicEcnCounts>& ecn_counts) {
QuicByteCount prior_bytes_in_flight = unacked_packets_.bytes_in_flight();
QuicPacketCount newly_acked_ect0 = 0;
QuicPacketCount newly_acked_ect1 = 0;
PacketNumberSpace acked_packet_number_space =
QuicUtils::GetPacketNumberSpace(ack_decrypted_level);
QuicPacketNumber old_largest_acked =
unacked_packets_.GetLargestAckedOfPacketNumberSpace(
acked_packet_number_space);
std::reverse(packets_acked_.begin(), packets_acked_.end());
for (AckedPacket& acked_packet : packets_acked_) {
QuicTransmissionInfo* info =
unacked_packets_.GetMutableTransmissionInfo(acked_packet.packet_number);
if (!QuicUtils::IsAckable(info->state)) {
if (info->state == ACKED) {
QUIC_BUG(quic_bug_10750_5)
<< "Trying to ack an already acked packet: "
<< acked_packet.packet_number
<< ", last_ack_frame_: " << last_ack_frame_
<< ", least_unacked: " << unacked_packets_.GetLeastUnacked()
<< ", packets_acked_: " << quiche::PrintElements(packets_acked_);
} else {
QUIC_PEER_BUG(quic_peer_bug_10750_6)
<< "Received " << ack_decrypted_level
<< " ack for unackable packet: " << acked_packet.packet_number
<< " with state: "
<< QuicUtils::SentPacketStateToString(info->state);
if (supports_multiple_packet_number_spaces()) {
if (info->state == NEVER_SENT) {
return UNSENT_PACKETS_ACKED;
}
return UNACKABLE_PACKETS_ACKED;
}
}
continue;
}
QUIC_DVLOG(1) << ENDPOINT << "Got an " << ack_decrypted_level
<< " ack for packet " << acked_packet.packet_number
<< " , state: "
<< QuicUtils::SentPacketStateToString(info->state);
const PacketNumberSpace packet_number_space =
unacked_packets_.GetPacketNumberSpace(info->encryption_level);
if (supports_multiple_packet_number_spaces() &&
QuicUtils::GetPacketNumberSpace(ack_decrypted_level) !=
packet_number_space) {
return PACKETS_ACKED_IN_WRONG_PACKET_NUMBER_SPACE;
}
last_ack_frame_.packets.Add(acked_packet.packet_number);
if (info->encryption_level == ENCRYPTION_HANDSHAKE) {
handshake_packet_acked_ = true;
} else if (info->encryption_level == ENCRYPTION_ZERO_RTT) {
zero_rtt_packet_acked_ = true;
} else if (info->encryption_level == ENCRYPTION_FORWARD_SECURE) {
one_rtt_packet_acked_ = true;
}
largest_packet_peer_knows_is_acked_.UpdateMax(info->largest_acked);
if (supports_multiple_packet_number_spaces()) {
largest_packets_peer_knows_is_acked_[packet_number_space].UpdateMax(
info->largest_acked);
}
if (info->in_flight) {
acked_packet.bytes_acked = info->bytes_sent;
} else {
acked_packet.spurious_loss = (info->state == LOST);
largest_newly_acked_ = acked_packet.packet_number;
}
switch (info->ecn_codepoint) {
case ECN_NOT_ECT:
break;
case ECN_CE:
break;
case ECN_ECT0:
++newly_acked_ect0;
if (info->in_flight) {
network_change_visitor_->OnInFlightEcnPacketAcked();
}
break;
case ECN_ECT1:
++newly_acked_ect1;
if (info->in_flight) {
network_change_visitor_->OnInFlightEcnPacketAcked();
}
break;
}
unacked_packets_.MaybeUpdateLargestAckedOfPacketNumberSpace(
packet_number_space, acked_packet.packet_number);
MarkPacketHandled(acked_packet.packet_number, info, ack_receive_time,
last_ack_frame_.ack_delay_time,
acked_packet.receive_timestamp);
}
std::optional<QuicEcnCounts> valid_ecn_counts;
if (GetQuicRestartFlag(quic_support_ect1)) {
QUIC_RESTART_FLAG_COUNT_N(quic_support_ect1, 1, 9);
if (IsEcnFeedbackValid(acked_packet_number_space, ecn_counts,
newly_acked_ect0, newly_acked_ect1)) {
valid_ecn_counts = ecn_counts;
} else if (!old_largest_acked.IsInitialized() ||
old_largest_acked <
unacked_packets_.GetLargestAckedOfPacketNumberSpace(
acked_packet_number_space)) {
network_change_visitor_->OnInvalidEcnFeedback();
}
}
const bool acked_new_packet = !packets_acked_.empty();
PostProcessNewlyAckedPackets(ack_packet_number, ack_decrypted_level,
last_ack_frame_, ack_receive_time, rtt_updated_,
prior_bytes_in_flight, valid_ecn_counts);
if (valid_ecn_counts.has_value()) {
peer_ack_ecn_counts_[acked_packet_number_space] = *valid_ecn_counts;
}
return acked_new_packet ? PACKETS_NEWLY_ACKED : NO_PACKETS_NEWLY_ACKED;
}
void QuicSentPacketManager::SetDebugDelegate(DebugDelegate* debug_delegate) {
debug_delegate_ = debug_delegate;
}
void QuicSentPacketManager::OnApplicationLimited() {
if (using_pacing_) {
pacing_sender_.OnApplicationLimited();
}
send_algorithm_->OnApplicationLimited(unacked_packets_.bytes_in_flight());
if (debug_delegate_ != nullptr) {
debug_delegate_->OnApplicationLimited();
}
}
NextReleaseTimeResult QuicSentPacketManager::GetNextReleaseTime() const {
if (!using_pacing_) {
return {QuicTime::Zero(), false};
}
return pacing_sender_.GetNextReleaseTime();
}
void QuicSentPacketManager::SetInitialRtt(QuicTime::Delta rtt, bool trusted) {
const QuicTime::Delta min_rtt = QuicTime::Delta::FromMicroseconds(
trusted ? kMinTrustedInitialRoundTripTimeUs
: kMinUntrustedInitialRoundTripTimeUs);
QuicTime::Delta max_rtt =
QuicTime::Delta::FromMicroseconds(kMaxInitialRoundTripTimeUs);
rtt_stats_.set_initial_rtt(std::max(min_rtt, std::min(max_rtt, rtt)));
}
void QuicSentPacketManager::EnableMultiplePacketNumberSpacesSupport() {
EnableIetfPtoAndLossDetection();
unacked_packets_.EnableMultiplePacketNumberSpacesSupport();
}
QuicPacketNumber QuicSentPacketManager::GetLargestAckedPacket(
EncryptionLevel decrypted_packet_level) const {
QUICHE_DCHECK(supports_multiple_packet_number_spaces());
return unacked_packets_.GetLargestAckedOfPacketNumberSpace(
QuicUtils::GetPacketNumberSpace(decrypted_packet_level));
}
QuicPacketNumber QuicSentPacketManager::GetLeastPacketAwaitedByPeer(
EncryptionLevel encryption_level) const {
QuicPacketNumber largest_acked;
if (supports_multiple_packet_number_spaces()) {
largest_acked = GetLargestAckedPacket(encryption_level);
} else {
largest_acked = GetLargestObserved();
}
if (!largest_acked.IsInitialized()) {
return FirstSendingPacketNumber();
}
QuicPacketNumber least_awaited = largest_acked + 1;
QuicPacketNumber least_unacked = GetLeastUnacked();
if (least_unacked.IsInitialized() && least_unacked < least_awaited) {
least_awaited = least_unacked;
}
return least_awaited;
}
QuicPacketNumber QuicSentPacketManager::GetLargestPacketPeerKnowsIsAcked(
EncryptionLevel decrypted_packet_level) const {
QUICHE_DCHECK(supports_multiple_packet_number_spaces());
return largest_packets_peer_knows_is_acked_[QuicUtils::GetPacketNumberSpace(
decrypted_packet_level)];
}
QuicTime::Delta
QuicSentPacketManager::GetNConsecutiveRetransmissionTimeoutDelay(
int num_timeouts) const {
QuicTime::Delta total_delay = QuicTime::Delta::Zero();
const QuicTime::Delta srtt = rtt_stats_.SmoothedOrInitialRtt();
int num_tlps =
std::min(num_timeouts, static_cast<int>(kDefaultMaxTailLossProbes));
num_timeouts -= num_tlps;
if (num_tlps > 0) {
const QuicTime::Delta tlp_delay = std::max(
2 * srtt,
unacked_packets_.HasMultipleInFlightPackets()
? QuicTime::Delta::FromMilliseconds(kMinTailLossProbeTimeoutMs)
: (1.5 * srtt +
(QuicTime::Delta::FromMilliseconds(kMinRetransmissionTimeMs) *
0.5)));
total_delay = total_delay + num_tlps * tlp_delay;
}
if (num_timeouts == 0) {
return total_delay;
}
const QuicTime::Delta retransmission_delay =
rtt_stats_.smoothed_rtt().IsZero()
? QuicTime::Delta::FromMilliseconds(kDefaultRetransmissionTimeMs)
: std::max(
srtt + 4 * rtt_stats_.mean_deviation(),
QuicTime::Delta::FromMilliseconds(kMinRetransmissionTimeMs));
total_delay = total_delay + ((1 << num_timeouts) - 1) * retransmission_delay;
return total_delay;
}
bool QuicSentPacketManager::PeerCompletedAddressValidation() const {
if (unacked_packets_.perspective() == Perspective::IS_SERVER ||
!handshake_mode_disabled_) {
return true;
}
return handshake_finished_ || handshake_packet_acked_;
}
bool QuicSentPacketManager::IsLessThanThreePTOs(QuicTime::Delta timeout) const {
return timeout < 3 * GetPtoDelay();
}
QuicTime::Delta QuicSentPacketManager::GetPtoDelay() const {
return GetProbeTimeoutDelay(APPLICATION_DATA);
}
void QuicSentPacketManager::OnAckFrequencyFrameSent(
const QuicAckFrequencyFrame& ack_frequency_frame) {
in_use_sent_ack_delays_.emplace_back(ack_frequency_frame.max_ack_delay,
ack_frequency_frame.sequence_number);
if (ack_frequency_frame.max_ack_delay > peer_max_ack_delay_) {
peer_max_ack_delay_ = ack_frequency_frame.max_ack_delay;
}
}
void QuicSentPacketManager::OnAckFrequencyFrameAcked(
const QuicAckFrequencyFrame& ack_frequency_frame) {
int stale_entry_count = 0;
for (auto it = in_use_sent_ack_delays_.cbegin();
it != in_use_sent_ack_delays_.cend(); ++it) {
if (it->second < ack_frequency_frame.sequence_number) {
++stale_entry_count;
} else {
break;
}
}
if (stale_entry_count > 0) {
in_use_sent_ack_delays_.pop_front_n(stale_entry_count);
}
if (in_use_sent_ack_delays_.empty()) {
QUIC_BUG(quic_bug_10750_7) << "in_use_sent_ack_delays_ is empty.";
return;
}
peer_max_ack_delay_ = std::max_element(in_use_sent_ack_delays_.cbegin(),
in_use_sent_ack_delays_.cend())
->first;
}
#undef ENDPOINT
} | #include "quiche/quic/core/quic_sent_packet_manager.h"
#include <algorithm>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/frames/quic_ack_frequency_frame.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_config_peer.h"
#include "quiche/quic/test_tools/quic_sent_packet_manager_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/platform/api/quiche_mem_slice.h"
using testing::_;
using testing::AnyNumber;
using testing::Invoke;
using testing::InvokeWithoutArgs;
using testing::IsEmpty;
using testing::Not;
using testing::Pointwise;
using testing::Return;
using testing::StrictMock;
using testing::WithArgs;
namespace quic {
namespace test {
namespace {
const uint32_t kDefaultLength = 1000;
const QuicStreamId kStreamId = 7;
const std::optional<QuicEcnCounts> kEmptyCounts = std::nullopt;
MATCHER(PacketNumberEq, "") {
return std::get<0>(arg).packet_number == QuicPacketNumber(std::get<1>(arg));
}
class MockDebugDelegate : public QuicSentPacketManager::DebugDelegate {
public:
MOCK_METHOD(void, OnSpuriousPacketRetransmission,
(TransmissionType transmission_type, QuicByteCount byte_size),
(override));
MOCK_METHOD(void, OnPacketLoss,
(QuicPacketNumber lost_packet_number,
EncryptionLevel encryption_level,
TransmissionType transmission_type, QuicTime detection_time),
(override));
};
class QuicSentPacketManagerTest : public QuicTest {
public:
bool RetransmitCryptoPacket(uint64_t packet_number) {
EXPECT_CALL(
*send_algorithm_,
OnPacketSent(_, BytesInFlight(), QuicPacketNumber(packet_number),
kDefaultLength, HAS_RETRANSMITTABLE_DATA));
SerializedPacket packet(CreatePacket(packet_number, false));
packet.retransmittable_frames.push_back(
QuicFrame(QuicStreamFrame(1, false, 0, absl::string_view())));
packet.has_crypto_handshake = IS_HANDSHAKE;
manager_.OnPacketSent(&packet, clock_.Now(), HANDSHAKE_RETRANSMISSION,
HAS_RETRANSMITTABLE_DATA, true, ECN_NOT_ECT);
return true;
}
bool RetransmitDataPacket(uint64_t packet_number, TransmissionType type,
EncryptionLevel level) {
EXPECT_CALL(
*send_algorithm_,
OnPacketSent(_, BytesInFlight(), QuicPacketNumber(packet_number),
kDefaultLength, HAS_RETRANSMITTABLE_DATA));
SerializedPacket packet(CreatePacket(packet_number, true));
packet.encryption_level = level;
manager_.OnPacketSent(&packet, clock_.Now(), type, HAS_RETRANSMITTABLE_DATA,
true, ECN_NOT_ECT);
return true;
}
bool RetransmitDataPacket(uint64_t packet_number, TransmissionType type) {
return RetransmitDataPacket(packet_number, type, ENCRYPTION_INITIAL);
}
protected:
const CongestionControlType kInitialCongestionControlType = kCubicBytes;
QuicSentPacketManagerTest()
: manager_(Perspective::IS_SERVER, &clock_, QuicRandom::GetInstance(),
&stats_, kInitialCongestionControlType),
send_algorithm_(new StrictMock<MockSendAlgorithm>),
network_change_visitor_(new StrictMock<MockNetworkChangeVisitor>) {
QuicSentPacketManagerPeer::SetSendAlgorithm(&manager_, send_algorithm_);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(1000));
manager_.SetNetworkChangeVisitor(network_change_visitor_.get());
manager_.SetSessionNotifier(¬ifier_);
EXPECT_CALL(*send_algorithm_, GetCongestionControlType())
.WillRepeatedly(Return(kInitialCongestionControlType));
EXPECT_CALL(*send_algorithm_, BandwidthEstimate())
.Times(AnyNumber())
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, InSlowStart()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, InRecovery()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, OnPacketNeutered(_)).Times(AnyNumber());
EXPECT_CALL(*network_change_visitor_, OnPathMtuIncreased(1000))
.Times(AnyNumber());
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(true));
EXPECT_CALL(notifier_, HasUnackedCryptoData())
.WillRepeatedly(Return(false));
EXPECT_CALL(notifier_, OnStreamFrameRetransmitted(_)).Times(AnyNumber());
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).WillRepeatedly(Return(true));
}
~QuicSentPacketManagerTest() override {}
QuicByteCount BytesInFlight() { return manager_.GetBytesInFlight(); }
void VerifyUnackedPackets(uint64_t* packets, size_t num_packets) {
if (num_packets == 0) {
EXPECT_TRUE(manager_.unacked_packets().empty());
EXPECT_EQ(0u, QuicSentPacketManagerPeer::GetNumRetransmittablePackets(
&manager_));
return;
}
EXPECT_FALSE(manager_.unacked_packets().empty());
EXPECT_EQ(QuicPacketNumber(packets[0]), manager_.GetLeastUnacked());
for (size_t i = 0; i < num_packets; ++i) {
EXPECT_TRUE(
manager_.unacked_packets().IsUnacked(QuicPacketNumber(packets[i])))
<< packets[i];
}
}
void VerifyRetransmittablePackets(uint64_t* packets, size_t num_packets) {
EXPECT_EQ(
num_packets,
QuicSentPacketManagerPeer::GetNumRetransmittablePackets(&manager_));
for (size_t i = 0; i < num_packets; ++i) {
EXPECT_TRUE(QuicSentPacketManagerPeer::HasRetransmittableFrames(
&manager_, packets[i]))
<< " packets[" << i << "]:" << packets[i];
}
}
void ExpectAck(uint64_t largest_observed) {
EXPECT_CALL(
*send_algorithm_,
OnCongestionEvent(true, _, _,
Pointwise(PacketNumberEq(), {largest_observed}),
IsEmpty(), _, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
}
void ExpectUpdatedRtt(uint64_t ) {
EXPECT_CALL(*send_algorithm_,
OnCongestionEvent(true, _, _, IsEmpty(), IsEmpty(), _, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
}
void ExpectAckAndLoss(bool rtt_updated, uint64_t largest_observed,
uint64_t lost_packet) {
EXPECT_CALL(
*send_algorithm_,
OnCongestionEvent(rtt_updated, _, _,
Pointwise(PacketNumberEq(), {largest_observed}),
Pointwise(PacketNumberEq(), {lost_packet}), _, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
}
void ExpectAcksAndLosses(bool rtt_updated, uint64_t* packets_acked,
size_t num_packets_acked, uint64_t* packets_lost,
size_t num_packets_lost) {
std::vector<QuicPacketNumber> ack_vector;
for (size_t i = 0; i < num_packets_acked; ++i) {
ack_vector.push_back(QuicPacketNumber(packets_acked[i]));
}
std::vector<QuicPacketNumber> lost_vector;
for (size_t i = 0; i < num_packets_lost; ++i) {
lost_vector.push_back(QuicPacketNumber(packets_lost[i]));
}
EXPECT_CALL(*send_algorithm_,
OnCongestionEvent(
rtt_updated, _, _, Pointwise(PacketNumberEq(), ack_vector),
Pointwise(PacketNumberEq(), lost_vector), _, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange())
.Times(AnyNumber());
}
void RetransmitAndSendPacket(uint64_t old_packet_number,
uint64_t new_packet_number) {
RetransmitAndSendPacket(old_packet_number, new_packet_number,
PTO_RETRANSMISSION);
}
void RetransmitAndSendPacket(uint64_t old_packet_number,
uint64_t new_packet_number,
TransmissionType transmission_type) {
bool is_lost = false;
if (transmission_type == HANDSHAKE_RETRANSMISSION ||
transmission_type == PTO_RETRANSMISSION) {
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(WithArgs<1>(
Invoke([this, new_packet_number](TransmissionType type) {
return RetransmitDataPacket(new_packet_number, type);
})));
} else {
EXPECT_CALL(notifier_, OnFrameLost(_)).Times(1);
is_lost = true;
}
QuicSentPacketManagerPeer::MarkForRetransmission(
&manager_, old_packet_number, transmission_type);
if (!is_lost) {
return;
}
EXPECT_CALL(
*send_algorithm_,
OnPacketSent(_, BytesInFlight(), QuicPacketNumber(new_packet_number),
kDefaultLength, HAS_RETRANSMITTABLE_DATA));
SerializedPacket packet(CreatePacket(new_packet_number, true));
manager_.OnPacketSent(&packet, clock_.Now(), transmission_type,
HAS_RETRANSMITTABLE_DATA, true, ECN_NOT_ECT);
}
SerializedPacket CreateDataPacket(uint64_t packet_number) {
return CreatePacket(packet_number, true);
}
SerializedPacket CreatePacket(uint64_t packet_number, bool retransmittable) {
SerializedPacket packet(QuicPacketNumber(packet_number),
PACKET_4BYTE_PACKET_NUMBER, nullptr, kDefaultLength,
false, false);
if (retransmittable) {
packet.retransmittable_frames.push_back(
QuicFrame(QuicStreamFrame(kStreamId, false, 0, absl::string_view())));
}
return packet;
}
SerializedPacket CreatePingPacket(uint64_t packet_number) {
SerializedPacket packet(QuicPacketNumber(packet_number),
PACKET_4BYTE_PACKET_NUMBER, nullptr, kDefaultLength,
false, false);
packet.retransmittable_frames.push_back(QuicFrame(QuicPingFrame()));
return packet;
}
void SendDataPacket(uint64_t packet_number) {
SendDataPacket(packet_number, ENCRYPTION_INITIAL, ECN_NOT_ECT);
}
void SendDataPacket(uint64_t packet_number,
EncryptionLevel encryption_level) {
SendDataPacket(packet_number, encryption_level, ECN_NOT_ECT);
}
void SendDataPacket(uint64_t packet_number, EncryptionLevel encryption_level,
QuicEcnCodepoint ecn_codepoint) {
EXPECT_CALL(*send_algorithm_,
OnPacketSent(_, BytesInFlight(),
QuicPacketNumber(packet_number), _, _));
SerializedPacket packet(CreateDataPacket(packet_number));
packet.encryption_level = encryption_level;
manager_.OnPacketSent(&packet, clock_.Now(), NOT_RETRANSMISSION,
HAS_RETRANSMITTABLE_DATA, true, ecn_codepoint);
}
void SendPingPacket(uint64_t packet_number,
EncryptionLevel encryption_level) {
EXPECT_CALL(*send_algorithm_,
OnPacketSent(_, BytesInFlight(),
QuicPacketNumber(packet_number), _, _));
SerializedPacket packet(CreatePingPacket(packet_number));
packet.encryption_level = encryption_level;
manager_.OnPacketSent(&packet, clock_.Now(), NOT_RETRANSMISSION,
HAS_RETRANSMITTABLE_DATA, true, ECN_NOT_ECT);
}
void SendCryptoPacket(uint64_t packet_number) {
EXPECT_CALL(
*send_algorithm_,
OnPacketSent(_, BytesInFlight(), QuicPacketNumber(packet_number),
kDefaultLength, HAS_RETRANSMITTABLE_DATA));
SerializedPacket packet(CreatePacket(packet_number, false));
packet.retransmittable_frames.push_back(
QuicFrame(QuicStreamFrame(1, false, 0, absl::string_view())));
packet.has_crypto_handshake = IS_HANDSHAKE;
manager_.OnPacketSent(&packet, clock_.Now(), NOT_RETRANSMISSION,
HAS_RETRANSMITTABLE_DATA, true, ECN_NOT_ECT);
EXPECT_CALL(notifier_, HasUnackedCryptoData()).WillRepeatedly(Return(true));
}
void SendAckPacket(uint64_t packet_number, uint64_t largest_acked) {
SendAckPacket(packet_number, largest_acked, ENCRYPTION_INITIAL);
}
void SendAckPacket(uint64_t packet_number, uint64_t largest_acked,
EncryptionLevel level) {
EXPECT_CALL(
*send_algorithm_,
OnPacketSent(_, BytesInFlight(), QuicPacketNumber(packet_number),
kDefaultLength, NO_RETRANSMITTABLE_DATA));
SerializedPacket packet(CreatePacket(packet_number, false));
packet.largest_acked = QuicPacketNumber(largest_acked);
packet.encryption_level = level;
manager_.OnPacketSent(&packet, clock_.Now(), NOT_RETRANSMISSION,
NO_RETRANSMITTABLE_DATA, true, ECN_NOT_ECT);
}
quiche::SimpleBufferAllocator allocator_;
QuicSentPacketManager manager_;
MockClock clock_;
QuicConnectionStats stats_;
MockSendAlgorithm* send_algorithm_;
std::unique_ptr<MockNetworkChangeVisitor> network_change_visitor_;
StrictMock<MockSessionNotifier> notifier_;
};
TEST_F(QuicSentPacketManagerTest, IsUnacked) {
VerifyUnackedPackets(nullptr, 0);
SendDataPacket(1);
uint64_t unacked[] = {1};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
uint64_t retransmittable[] = {1};
VerifyRetransmittablePackets(retransmittable,
ABSL_ARRAYSIZE(retransmittable));
}
TEST_F(QuicSentPacketManagerTest, IsUnAckedRetransmit) {
SendDataPacket(1);
RetransmitAndSendPacket(1, 2);
EXPECT_TRUE(QuicSentPacketManagerPeer::IsRetransmission(&manager_, 2));
uint64_t unacked[] = {1, 2};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
std::vector<uint64_t> retransmittable = {1, 2};
VerifyRetransmittablePackets(&retransmittable[0], retransmittable.size());
}
TEST_F(QuicSentPacketManagerTest, RetransmitThenAck) {
SendDataPacket(1);
RetransmitAndSendPacket(1, 2);
ExpectAck(2);
manager_.OnAckFrameStart(QuicPacketNumber(2), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(2), QuicPacketNumber(3));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));
uint64_t unacked[] = {1};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
EXPECT_TRUE(manager_.HasInFlightPackets());
VerifyRetransmittablePackets(nullptr, 0);
}
TEST_F(QuicSentPacketManagerTest, RetransmitThenAckBeforeSend) {
SendDataPacket(1);
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(WithArgs<1>(Invoke([this](TransmissionType type) {
return RetransmitDataPacket(2, type);
})));
QuicSentPacketManagerPeer::MarkForRetransmission(&manager_, 1,
PTO_RETRANSMISSION);
ExpectAck(1);
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));
uint64_t unacked[] = {2};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyRetransmittablePackets(nullptr, 0);
EXPECT_EQ(0u, stats_.packets_spuriously_retransmitted);
}
TEST_F(QuicSentPacketManagerTest, RetransmitThenStopRetransmittingBeforeSend) {
SendDataPacket(1);
EXPECT_CALL(notifier_, RetransmitFrames(_, _)).WillRepeatedly(Return(true));
QuicSentPacketManagerPeer::MarkForRetransmission(&manager_, 1,
PTO_RETRANSMISSION);
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));
uint64_t unacked[] = {1};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyRetransmittablePackets(nullptr, 0);
EXPECT_EQ(0u, stats_.packets_spuriously_retransmitted);
}
TEST_F(QuicSentPacketManagerTest, RetransmitThenAckPrevious) {
SendDataPacket(1);
RetransmitAndSendPacket(1, 2);
QuicTime::Delta rtt = QuicTime::Delta::FromMilliseconds(15);
clock_.AdvanceTime(rtt);
ExpectAck(1);
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));
uint64_t unacked[] = {2};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
EXPECT_TRUE(manager_.HasInFlightPackets());
VerifyRetransmittablePackets(nullptr, 0);
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).WillOnce(Return(false));
ExpectAck(2);
manager_.OnAckFrameStart(QuicPacketNumber(2), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(3));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(1u, stats_.packets_spuriously_retransmitted);
}
TEST_F(QuicSentPacketManagerTest, RetransmitThenAckPreviousThenNackRetransmit) {
SendDataPacket(1);
RetransmitAndSendPacket(1, 2);
QuicTime::Delta rtt = QuicTime::Delta::FromMilliseconds(15);
clock_.AdvanceTime(rtt);
ExpectAck(1);
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
SendDataPacket(3);
SendDataPacket(4);
SendDataPacket(5);
clock_.AdvanceTime(rtt);
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));
EXPECT_CALL(notifier_, OnFrameLost(_)).Times(1);
ExpectAckAndLoss(true, 3, 2);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(3), QuicPacketNumber(4));
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_INITIAL, kEmptyCounts));
ExpectAck(4);
manager_.OnAckFrameStart(QuicPacketNumber(4), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(3), QuicPacketNumber(5));
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(3),
ENCRYPTION_INITIAL, kEmptyCounts));
ExpectAck(5);
manager_.OnAckFrameStart(QuicPacketNumber(5), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(3), QuicPacketNumber(6));
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(4),
ENCRYPTION_INITIAL, kEmptyCounts));
uint64_t unacked[] = {2};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
EXPECT_FALSE(manager_.HasInFlightPackets());
VerifyRetransmittablePackets(nullptr, 0);
EXPECT_EQ(QuicTime::Zero(), manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest,
DISABLED_RetransmitTwiceThenAckPreviousBeforeSend) {
SendDataPacket(1);
RetransmitAndSendPacket(1, 2);
EXPECT_CALL(*send_algorithm_, OnRetransmissionTimeout(true));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.OnRetransmissionTimeout();
ExpectUpdatedRtt(1);
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
uint64_t unacked[] = {2};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
EXPECT_FALSE(manager_.HasInFlightPackets());
VerifyRetransmittablePackets(nullptr, 0);
EXPECT_EQ(QuicTime::Zero(), manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest, RetransmitTwiceThenAckFirst) {
StrictMock<MockDebugDelegate> debug_delegate;
EXPECT_CALL(debug_delegate, OnSpuriousPacketRetransmission(PTO_RETRANSMISSION,
kDefaultLength))
.Times(1);
manager_.SetDebugDelegate(&debug_delegate);
SendDataPacket(1);
RetransmitAndSendPacket(1, 2);
RetransmitAndSendPacket(2, 3);
QuicTime::Delta rtt = QuicTime::Delta::FromMilliseconds(15);
clock_.AdvanceTime(rtt);
ExpectAck(1);
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_CALL(notifier_, IsFrameOutstanding(_))
.Times(2)
.WillRepeatedly(Return(false));
uint64_t unacked[] = {2, 3};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
EXPECT_TRUE(manager_.HasInFlightPackets());
VerifyRetransmittablePackets(nullptr, 0);
SendDataPacket(4);
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _))
.WillOnce(Return(false))
.WillRepeatedly(Return(true));
uint64_t acked[] = {3, 4};
ExpectAcksAndLosses(true, acked, ABSL_ARRAYSIZE(acked), nullptr, 0);
manager_.OnAckFrameStart(QuicPacketNumber(4), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(3), QuicPacketNumber(5));
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_INITIAL, kEmptyCounts));
uint64_t unacked2[] = {2};
VerifyUnackedPackets(unacked2, ABSL_ARRAYSIZE(unacked2));
EXPECT_TRUE(manager_.HasInFlightPackets());
SendDataPacket(5);
ExpectAckAndLoss(true, 5, 2);
EXPECT_CALL(debug_delegate,
OnPacketLoss(QuicPacketNumber(2), _, LOSS_RETRANSMISSION, _));
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));
EXPECT_CALL(notifier_, OnFrameLost(_)).Times(1);
manager_.OnAckFrameStart(QuicPacketNumber(5), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(3), QuicPacketNumber(6));
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(3),
ENCRYPTION_INITIAL, kEmptyCounts));
uint64_t unacked3[] = {2};
VerifyUnackedPackets(unacked3, ABSL_ARRAYSIZE(unacked3));
EXPECT_FALSE(manager_.HasInFlightPackets());
EXPECT_EQ(1u, stats_.packets_spuriously_retransmitted);
EXPECT_EQ(1u, stats_.packets_lost);
EXPECT_LT(0.0, stats_.total_loss_detection_response_time);
EXPECT_LE(1u, stats_.sent_packets_max_sequence_reordering);
}
TEST_F(QuicSentPacketManagerTest, AckOriginalTransmission) {
auto loss_algorithm = std::make_unique<MockLossAlgorithm>();
QuicSentPacketManagerPeer::SetLossAlgorithm(&manager_, loss_algorithm.get());
SendDataPacket(1);
RetransmitAndSendPacket(1, 2);
{
ExpectAck(1);
EXPECT_CALL(*loss_algorithm, DetectLosses(_, _, _, _, _, _));
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
}
SendDataPacket(3);
SendDataPacket(4);
{
ExpectAck(4);
EXPECT_CALL(*loss_algorithm, DetectLosses(_, _, _, _, _, _));
manager_.OnAckFrameStart(QuicPacketNumber(4), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(4), QuicPacketNumber(5));
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_INITIAL, kEmptyCounts));
RetransmitAndSendPacket(3, 5, LOSS_RETRANSMISSION);
}
{
uint64_t acked[] = {3};
ExpectAcksAndLosses(false, acked, ABSL_ARRAYSIZE(acked), nullptr, 0);
EXPECT_CALL(*loss_algorithm, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(*loss_algorithm,
SpuriousLossDetected(_, _, _, QuicPacketNumber(3),
QuicPacketNumber(4)));
manager_.OnAckFrameStart(QuicPacketNumber(4), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(3), QuicPacketNumber(5));
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(0u, stats_.packet_spuriously_detected_lost);
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(3),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(1u, stats_.packet_spuriously_detected_lost);
ExpectAck(5);
EXPECT_CALL(*loss_algorithm, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).WillOnce(Return(false));
manager_.OnAckFrameStart(QuicPacketNumber(5), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(3), QuicPacketNumber(6));
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(4),
ENCRYPTION_INITIAL, kEmptyCounts));
}
}
TEST_F(QuicSentPacketManagerTest, GetLeastUnacked) {
EXPECT_EQ(QuicPacketNumber(1u), manager_.GetLeastUnacked());
}
TEST_F(QuicSentPacketManagerTest, GetLeastUnackedUnacked) {
SendDataPacket(1);
EXPECT_EQ(QuicPacketNumber(1u), manager_.GetLeastUnacked());
}
TEST_F(QuicSentPacketManagerTest, AckAckAndUpdateRtt) {
EXPECT_FALSE(manager_.largest_packet_peer_knows_is_acked().IsInitialized());
SendDataPacket(1);
SendAckPacket(2, 1);
uint64_t acked[] = {1, 2};
ExpectAcksAndLosses(true, acked, ABSL_ARRAYSIZE(acked), nullptr, 0);
manager_.OnAckFrameStart(QuicPacketNumber(2),
QuicTime::Delta::FromMilliseconds(5), clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(3));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(QuicPacketNumber(1), manager_.largest_packet_peer_knows_is_acked());
SendAckPacket(3, 3);
uint64_t acked2[] = {3};
ExpectAcksAndLosses(true, acked2, ABSL_ARRAYSIZE(acked2), nullptr, 0);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(4));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(QuicPacketNumber(3u),
manager_.largest_packet_peer_knows_is_acked());
}
TEST_F(QuicSentPacketManagerTest, Rtt) {
QuicTime::Delta expected_rtt = QuicTime::Delta::FromMilliseconds(20);
SendDataPacket(1);
clock_.AdvanceTime(expected_rtt);
ExpectAck(1);
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(expected_rtt, manager_.GetRttStats()->latest_rtt());
}
TEST_F(QuicSentPacketManagerTest, RttWithInvalidDelta) {
QuicTime::Delta expected_rtt = QuicTime::Delta::FromMilliseconds(10);
SendDataPacket(1);
clock_.AdvanceTime(expected_rtt);
ExpectAck(1);
manager_.OnAckFrameStart(QuicPacketNumber(1),
QuicTime::Delta::FromMilliseconds(11), clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(expected_rtt, manager_.GetRttStats()->latest_rtt());
}
TEST_F(QuicSentPacketManagerTest, RttWithInfiniteDelta) {
QuicTime::Delta expected_rtt = QuicTime::Delta::FromMilliseconds(10);
SendDataPacket(1);
clock_.AdvanceTime(expected_rtt);
ExpectAck(1);
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(expected_rtt, manager_.GetRttStats()->latest_rtt());
}
TEST_F(QuicSentPacketManagerTest, RttWithDeltaExceedingLimit) {
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(10),
QuicTime::Delta::Zero(), QuicTime::Zero());
QuicTime::Delta send_delta = QuicTime::Delta::FromMilliseconds(100);
QuicTime::Delta ack_delay =
QuicTime::Delta::FromMilliseconds(5) + manager_.peer_max_ack_delay();
ASSERT_GT(send_delta - rtt_stats->min_rtt(), ack_delay);
SendDataPacket(1);
clock_.AdvanceTime(send_delta);
ExpectAck(1);
manager_.OnAckFrameStart(QuicPacketNumber(1), ack_delay, clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts));
QuicTime::Delta expected_rtt_sample =
send_delta - manager_.peer_max_ack_delay();
EXPECT_EQ(expected_rtt_sample, manager_.GetRttStats()->latest_rtt());
}
TEST_F(QuicSentPacketManagerTest, RttZeroDelta) {
QuicTime::Delta expected_rtt = QuicTime::Delta::FromMilliseconds(10);
SendDataPacket(1);
clock_.AdvanceTime(expected_rtt);
ExpectAck(1);
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Zero(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(expected_rtt, manager_.GetRttStats()->latest_rtt());
}
TEST_F(QuicSentPacketManagerTest, CryptoHandshakeTimeout) {
const size_t kNumSentCryptoPackets = 2;
for (size_t i = 1; i <= kNumSentCryptoPackets; ++i) {
SendCryptoPacket(i);
}
const size_t kNumSentDataPackets = 3;
for (size_t i = 1; i <= kNumSentDataPackets; ++i) {
SendDataPacket(kNumSentCryptoPackets + i);
}
EXPECT_TRUE(manager_.HasUnackedCryptoPackets());
EXPECT_EQ(5 * kDefaultLength, manager_.GetBytesInFlight());
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.Times(2)
.WillOnce(
InvokeWithoutArgs([this]() { return RetransmitCryptoPacket(6); }))
.WillOnce(
InvokeWithoutArgs([this]() { return RetransmitCryptoPacket(7); }));
manager_.OnRetransmissionTimeout();
EXPECT_EQ(7 * kDefaultLength, manager_.GetBytesInFlight());
EXPECT_TRUE(manager_.HasUnackedCryptoPackets());
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.Times(2)
.WillOnce(
InvokeWithoutArgs([this]() { return RetransmitCryptoPacket(8); }))
.WillOnce(
InvokeWithoutArgs([this]() { return RetransmitCryptoPacket(9); }));
manager_.OnRetransmissionTimeout();
EXPECT_EQ(9 * kDefaultLength, manager_.GetBytesInFlight());
EXPECT_TRUE(manager_.HasUnackedCryptoPackets());
uint64_t acked[] = {3, 4, 5, 8, 9};
uint64_t lost[] = {1, 2, 6};
ExpectAcksAndLosses(true, acked, ABSL_ARRAYSIZE(acked), lost,
ABSL_ARRAYSIZE(lost));
EXPECT_CALL(notifier_, OnFrameLost(_)).Times(3);
EXPECT_CALL(notifier_, HasUnackedCryptoData()).WillRepeatedly(Return(false));
manager_.OnAckFrameStart(QuicPacketNumber(9), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(8), QuicPacketNumber(10));
manager_.OnAckRange(QuicPacketNumber(3), QuicPacketNumber(6));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_FALSE(manager_.HasUnackedCryptoPackets());
}
TEST_F(QuicSentPacketManagerTest, CryptoHandshakeSpuriousRetransmission) {
SendCryptoPacket(1);
EXPECT_TRUE(manager_.HasUnackedCryptoPackets());
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(
InvokeWithoutArgs([this]() { return RetransmitCryptoPacket(2); }));
manager_.OnRetransmissionTimeout();
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(
InvokeWithoutArgs([this]() { return RetransmitCryptoPacket(3); }));
manager_.OnRetransmissionTimeout();
uint64_t acked[] = {2};
ExpectAcksAndLosses(true, acked, ABSL_ARRAYSIZE(acked), nullptr, 0);
EXPECT_CALL(notifier_, HasUnackedCryptoData()).WillRepeatedly(Return(false));
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));
manager_.OnAckFrameStart(QuicPacketNumber(2), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(2), QuicPacketNumber(3));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_FALSE(manager_.HasUnackedCryptoPackets());
uint64_t unacked[] = {1, 3};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
}
TEST_F(QuicSentPacketManagerTest, CryptoHandshakeTimeoutUnsentDataPacket) {
const size_t kNumSentCryptoPackets = 2;
for (size_t i = 1; i <= kNumSentCryptoPackets; ++i) {
SendCryptoPacket(i);
}
SendDataPacket(3);
EXPECT_TRUE(manager_.HasUnackedCryptoPackets());
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.Times(2)
.WillOnce(
InvokeWithoutArgs([this]() { return RetransmitCryptoPacket(4); }))
.WillOnce(
InvokeWithoutArgs([this]() { return RetransmitCryptoPacket(5); }));
manager_.OnRetransmissionTimeout();
EXPECT_TRUE(manager_.HasUnackedCryptoPackets());
}
TEST_F(QuicSentPacketManagerTest,
CryptoHandshakeRetransmissionThenNeuterAndAck) {
SendCryptoPacket(1);
EXPECT_TRUE(manager_.HasUnackedCryptoPackets());
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(
InvokeWithoutArgs([this]() { return RetransmitCryptoPacket(2); }));
manager_.OnRetransmissionTimeout();
EXPECT_TRUE(manager_.HasUnackedCryptoPackets());
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(
InvokeWithoutArgs([this]() { return RetransmitCryptoPacket(3); }));
manager_.OnRetransmissionTimeout();
EXPECT_TRUE(manager_.HasUnackedCryptoPackets());
EXPECT_CALL(notifier_, HasUnackedCryptoData()).WillRepeatedly(Return(false));
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));
manager_.NeuterUnencryptedPackets();
EXPECT_FALSE(manager_.HasUnackedCryptoPackets());
uint64_t unacked[] = {1, 2, 3};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyRetransmittablePackets(nullptr, 0);
EXPECT_FALSE(manager_.HasUnackedCryptoPackets());
EXPECT_FALSE(manager_.HasInFlightPackets());
uint64_t acked[] = {3};
ExpectAcksAndLosses(true, acked, ABSL_ARRAYSIZE(acked), nullptr, 0);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(3), QuicPacketNumber(4));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
VerifyUnackedPackets(nullptr, 0);
VerifyRetransmittablePackets(nullptr, 0);
}
TEST_F(QuicSentPacketManagerTest, GetTransmissionTime) {
EXPECT_EQ(QuicTime::Zero(), manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest, GetTransmissionTimeCryptoHandshake) {
QuicTime crypto_packet_send_time = clock_.Now();
SendCryptoPacket(1);
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
rtt_stats->set_initial_rtt(QuicTime::Delta::FromMilliseconds(1));
EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromMilliseconds(10),
manager_.GetRetransmissionTime());
rtt_stats->set_initial_rtt(QuicTime::Delta::FromMilliseconds(100));
QuicTime::Delta srtt = rtt_stats->initial_rtt();
QuicTime expected_time = clock_.Now() + 1.5 * srtt;
EXPECT_EQ(expected_time, manager_.GetRetransmissionTime());
clock_.AdvanceTime(1.5 * srtt);
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(
InvokeWithoutArgs([this]() { return RetransmitCryptoPacket(2); }));
crypto_packet_send_time = clock_.Now();
manager_.OnRetransmissionTimeout();
expected_time = crypto_packet_send_time + srtt * 2 * 1.5;
EXPECT_EQ(expected_time, manager_.GetRetransmissionTime());
clock_.AdvanceTime(2 * 1.5 * srtt);
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(
InvokeWithoutArgs([this]() { return RetransmitCryptoPacket(3); }));
crypto_packet_send_time = clock_.Now();
manager_.OnRetransmissionTimeout();
expected_time = crypto_packet_send_time + srtt * 4 * 1.5;
EXPECT_EQ(expected_time, manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest,
GetConservativeTransmissionTimeCryptoHandshake) {
QuicConfig config;
QuicTagVector options;
options.push_back(kCONH);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_CALL(*send_algorithm_, PacingRate(_))
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(10 * kDefaultTCPMSS));
QuicTime crypto_packet_send_time = clock_.Now();
SendCryptoPacket(1);
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
rtt_stats->set_initial_rtt(QuicTime::Delta::FromMilliseconds(1));
EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromMilliseconds(25),
manager_.GetRetransmissionTime());
rtt_stats->set_initial_rtt(QuicTime::Delta::FromMilliseconds(100));
QuicTime::Delta srtt = rtt_stats->initial_rtt();
QuicTime expected_time = clock_.Now() + 2 * srtt;
EXPECT_EQ(expected_time, manager_.GetRetransmissionTime());
clock_.AdvanceTime(2 * srtt);
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(
InvokeWithoutArgs([this]() { return RetransmitCryptoPacket(2); }));
crypto_packet_send_time = clock_.Now();
manager_.OnRetransmissionTimeout();
expected_time = crypto_packet_send_time + srtt * 2 * 2;
EXPECT_EQ(expected_time, manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest, GetLossDelay) {
auto loss_algorithm = std::make_unique<MockLossAlgorithm>();
QuicSentPacketManagerPeer::SetLossAlgorithm(&manager_, loss_algorithm.get());
EXPECT_CALL(*loss_algorithm, GetLossTimeout())
.WillRepeatedly(Return(QuicTime::Zero()));
SendDataPacket(1);
SendDataPacket(2);
ExpectAck(2);
EXPECT_CALL(*loss_algorithm, DetectLosses(_, _, _, _, _, _));
manager_.OnAckFrameStart(QuicPacketNumber(2), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(2), QuicPacketNumber(3));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
QuicTime timeout(clock_.Now() + QuicTime::Delta::FromMilliseconds(10));
EXPECT_CALL(*loss_algorithm, GetLossTimeout())
.WillRepeatedly(Return(timeout));
EXPECT_EQ(timeout, manager_.GetRetransmissionTime());
EXPECT_CALL(*loss_algorithm, DetectLosses(_, _, _, _, _, _));
manager_.OnRetransmissionTimeout();
}
TEST_F(QuicSentPacketManagerTest, NegotiateIetfLossDetectionFromOptions) {
EXPECT_TRUE(
QuicSentPacketManagerPeer::AdaptiveReorderingThresholdEnabled(&manager_));
EXPECT_FALSE(
QuicSentPacketManagerPeer::AdaptiveTimeThresholdEnabled(&manager_));
EXPECT_EQ(kDefaultLossDelayShift,
QuicSentPacketManagerPeer::GetReorderingShift(&manager_));
QuicConfig config;
QuicTagVector options;
options.push_back(kILD0);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_EQ(3, QuicSentPacketManagerPeer::GetReorderingShift(&manager_));
EXPECT_FALSE(
QuicSentPacketManagerPeer::AdaptiveReorderingThresholdEnabled(&manager_));
}
TEST_F(QuicSentPacketManagerTest,
NegotiateIetfLossDetectionOneFourthRttFromOptions) {
EXPECT_TRUE(
QuicSentPacketManagerPeer::AdaptiveReorderingThresholdEnabled(&manager_));
EXPECT_FALSE(
QuicSentPacketManagerPeer::AdaptiveTimeThresholdEnabled(&manager_));
EXPECT_EQ(kDefaultLossDelayShift,
QuicSentPacketManagerPeer::GetReorderingShift(&manager_));
QuicConfig config;
QuicTagVector options;
options.push_back(kILD1);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_EQ(kDefaultLossDelayShift,
QuicSentPacketManagerPeer::GetReorderingShift(&manager_));
EXPECT_FALSE(
QuicSentPacketManagerPeer::AdaptiveReorderingThresholdEnabled(&manager_));
}
TEST_F(QuicSentPacketManagerTest,
NegotiateIetfLossDetectionAdaptiveReorderingThreshold) {
EXPECT_TRUE(
QuicSentPacketManagerPeer::AdaptiveReorderingThresholdEnabled(&manager_));
EXPECT_FALSE(
QuicSentPacketManagerPeer::AdaptiveTimeThresholdEnabled(&manager_));
EXPECT_EQ(kDefaultLossDelayShift,
QuicSentPacketManagerPeer::GetReorderingShift(&manager_));
QuicConfig config;
QuicTagVector options;
options.push_back(kILD2);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_EQ(3, QuicSentPacketManagerPeer::GetReorderingShift(&manager_));
EXPECT_TRUE(
QuicSentPacketManagerPeer::AdaptiveReorderingThresholdEnabled(&manager_));
}
TEST_F(QuicSentPacketManagerTest,
NegotiateIetfLossDetectionAdaptiveReorderingThreshold2) {
EXPECT_TRUE(
QuicSentPacketManagerPeer::AdaptiveReorderingThresholdEnabled(&manager_));
EXPECT_FALSE(
QuicSentPacketManagerPeer::AdaptiveTimeThresholdEnabled(&manager_));
EXPECT_EQ(kDefaultLossDelayShift,
QuicSentPacketManagerPeer::GetReorderingShift(&manager_));
QuicConfig config;
QuicTagVector options;
options.push_back(kILD3);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_EQ(kDefaultLossDelayShift,
QuicSentPacketManagerPeer::GetReorderingShift(&manager_));
EXPECT_TRUE(
QuicSentPacketManagerPeer::AdaptiveReorderingThresholdEnabled(&manager_));
}
TEST_F(QuicSentPacketManagerTest,
NegotiateIetfLossDetectionAdaptiveReorderingAndTimeThreshold) {
EXPECT_TRUE(
QuicSentPacketManagerPeer::AdaptiveReorderingThresholdEnabled(&manager_));
EXPECT_FALSE(
QuicSentPacketManagerPeer::AdaptiveTimeThresholdEnabled(&manager_));
EXPECT_EQ(kDefaultLossDelayShift,
QuicSentPacketManagerPeer::GetReorderingShift(&manager_));
QuicConfig config;
QuicTagVector options;
options.push_back(kILD4);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_EQ(kDefaultLossDelayShift,
QuicSentPacketManagerPeer::GetReorderingShift(&manager_));
EXPECT_TRUE(
QuicSentPacketManagerPeer::AdaptiveReorderingThresholdEnabled(&manager_));
EXPECT_TRUE(
QuicSentPacketManagerPeer::AdaptiveTimeThresholdEnabled(&manager_));
}
TEST_F(QuicSentPacketManagerTest, NegotiateCongestionControlFromOptions) {
QuicConfig config;
QuicTagVector options;
options.push_back(kRENO);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_EQ(kRenoBytes, QuicSentPacketManagerPeer::GetSendAlgorithm(manager_)
->GetCongestionControlType());
options.clear();
options.push_back(kTBBR);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_EQ(kBBR, QuicSentPacketManagerPeer::GetSendAlgorithm(manager_)
->GetCongestionControlType());
options.clear();
options.push_back(kBYTE);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_EQ(kCubicBytes, QuicSentPacketManagerPeer::GetSendAlgorithm(manager_)
->GetCongestionControlType());
options.clear();
options.push_back(kRENO);
options.push_back(kBYTE);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_EQ(kRenoBytes, QuicSentPacketManagerPeer::GetSendAlgorithm(manager_)
->GetCongestionControlType());
}
TEST_F(QuicSentPacketManagerTest, NegotiateClientCongestionControlFromOptions) {
QuicConfig config;
QuicTagVector options;
const SendAlgorithmInterface* mock_sender =
QuicSentPacketManagerPeer::GetSendAlgorithm(manager_);
options.push_back(kRENO);
config.SetClientConnectionOptions(options);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_EQ(mock_sender, QuicSentPacketManagerPeer::GetSendAlgorithm(manager_));
QuicSentPacketManagerPeer::SetPerspective(&manager_, Perspective::IS_CLIENT);
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_EQ(kRenoBytes, QuicSentPacketManagerPeer::GetSendAlgorithm(manager_)
->GetCongestionControlType());
options.clear();
options.push_back(kTBBR);
config.SetClientConnectionOptions(options);
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_EQ(kBBR, QuicSentPacketManagerPeer::GetSendAlgorithm(manager_)
->GetCongestionControlType());
options.clear();
options.push_back(kBYTE);
config.SetClientConnectionOptions(options);
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_EQ(kCubicBytes, QuicSentPacketManagerPeer::GetSendAlgorithm(manager_)
->GetCongestionControlType());
options.clear();
options.push_back(kRENO);
options.push_back(kBYTE);
config.SetClientConnectionOptions(options);
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_EQ(kRenoBytes, QuicSentPacketManagerPeer::GetSendAlgorithm(manager_)
->GetCongestionControlType());
}
TEST_F(QuicSentPacketManagerTest, UseInitialRoundTripTimeToSend) {
QuicTime::Delta initial_rtt = QuicTime::Delta::FromMilliseconds(325);
EXPECT_NE(initial_rtt, manager_.GetRttStats()->smoothed_rtt());
QuicConfig config;
config.SetInitialRoundTripTimeUsToSend(initial_rtt.ToMicroseconds());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_EQ(QuicTime::Delta::Zero(), manager_.GetRttStats()->smoothed_rtt());
EXPECT_EQ(initial_rtt, manager_.GetRttStats()->initial_rtt());
}
TEST_F(QuicSentPacketManagerTest, ResumeConnectionState) {
const QuicTime::Delta kRtt = QuicTime::Delta::FromMilliseconds(123);
CachedNetworkParameters cached_network_params;
cached_network_params.set_min_rtt_ms(kRtt.ToMilliseconds());
SendAlgorithmInterface::NetworkParams params;
params.bandwidth = QuicBandwidth::Zero();
params.allow_cwnd_to_decrease = false;
params.rtt = kRtt;
params.is_rtt_trusted = true;
EXPECT_CALL(*send_algorithm_, AdjustNetworkParameters(params));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.Times(testing::AnyNumber());
manager_.ResumeConnectionState(cached_network_params, false);
EXPECT_EQ(kRtt, manager_.GetRttStats()->initial_rtt());
}
TEST_F(QuicSentPacketManagerTest, ConnectionMigrationUnspecifiedChange) {
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
QuicTime::Delta default_init_rtt = rtt_stats->initial_rtt();
rtt_stats->set_initial_rtt(default_init_rtt * 2);
EXPECT_EQ(2 * default_init_rtt, rtt_stats->initial_rtt());
QuicSentPacketManagerPeer::SetConsecutivePtoCount(&manager_, 1);
EXPECT_EQ(1u, manager_.GetConsecutivePtoCount());
EXPECT_CALL(*send_algorithm_, OnConnectionMigration());
EXPECT_EQ(nullptr,
manager_.OnConnectionMigration(false));
EXPECT_EQ(default_init_rtt, rtt_stats->initial_rtt());
EXPECT_EQ(0u, manager_.GetConsecutivePtoCount());
}
TEST_F(QuicSentPacketManagerTest,
ConnectionMigrationUnspecifiedChangeResetSendAlgorithm) {
auto loss_algorithm = std::make_unique<MockLossAlgorithm>();
QuicSentPacketManagerPeer::SetLossAlgorithm(&manager_, loss_algorithm.get());
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
QuicTime::Delta default_init_rtt = rtt_stats->initial_rtt();
rtt_stats->set_initial_rtt(default_init_rtt * 2);
EXPECT_EQ(2 * default_init_rtt, rtt_stats->initial_rtt());
QuicSentPacketManagerPeer::SetConsecutivePtoCount(&manager_, 1);
EXPECT_EQ(1u, manager_.GetConsecutivePtoCount());
SendDataPacket(1, ENCRYPTION_FORWARD_SECURE);
RttStats old_rtt_stats;
old_rtt_stats.CloneFrom(*manager_.GetRttStats());
EXPECT_CALL(notifier_, OnFrameLost(_));
std::unique_ptr<SendAlgorithmInterface> old_send_algorithm =
manager_.OnConnectionMigration(true);
EXPECT_NE(old_send_algorithm.get(), manager_.GetSendAlgorithm());
EXPECT_EQ(old_send_algorithm->GetCongestionControlType(),
manager_.GetSendAlgorithm()->GetCongestionControlType());
EXPECT_EQ(default_init_rtt, rtt_stats->initial_rtt());
EXPECT_EQ(0u, manager_.GetConsecutivePtoCount());
EXPECT_EQ(0u, BytesInFlight());
manager_.SetSendAlgorithm(old_send_algorithm.release());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
RetransmitDataPacket(2, LOSS_RETRANSMISSION, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kDefaultLength, BytesInFlight());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
EXPECT_CALL(
*send_algorithm_,
OnCongestionEvent(false, kDefaultLength, _, _, _, _, _))
.WillOnce(testing::WithArg<3>(
Invoke([](const AckedPacketVector& acked_packets) {
EXPECT_EQ(1u, acked_packets.size());
EXPECT_EQ(QuicPacketNumber(1), acked_packets[0].packet_number);
EXPECT_EQ(0u, acked_packets[0].bytes_acked);
})));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_CALL(*loss_algorithm, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(*loss_algorithm, SpuriousLossDetected(_, _, _, _, _)).Times(0u);
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts));
EXPECT_TRUE(manager_.GetRttStats()->latest_rtt().IsZero());
manager_.OnAckFrameStart(QuicPacketNumber(2), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(2), QuicPacketNumber(3));
EXPECT_CALL(*loss_algorithm, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(
*send_algorithm_,
OnCongestionEvent(true, kDefaultLength, _, _, _, _, _))
.WillOnce(testing::WithArg<3>(
Invoke([](const AckedPacketVector& acked_packets) {
EXPECT_EQ(1u, acked_packets.size());
EXPECT_EQ(QuicPacketNumber(2), acked_packets[0].packet_number);
EXPECT_EQ(kDefaultLength, acked_packets[0].bytes_acked);
})));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts));
EXPECT_EQ(0u, BytesInFlight());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(10),
manager_.GetRttStats()->latest_rtt());
SendDataPacket(3, ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(*loss_algorithm, GetLossTimeout())
.WillOnce(Return(clock_.Now() + QuicTime::Delta::FromMilliseconds(10)));
EXPECT_CALL(*loss_algorithm, DetectLosses(_, _, _, _, _, _))
.WillOnce(WithArgs<5>(Invoke([](LostPacketVector* packet_lost) {
packet_lost->emplace_back(QuicPacketNumber(3u), kDefaultLength);
return LossDetectionInterface::DetectionStats();
})));
EXPECT_CALL(notifier_, OnFrameLost(_));
EXPECT_CALL(*send_algorithm_,
OnCongestionEvent(false, kDefaultLength, _, _, _, _, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.OnRetransmissionTimeout();
EXPECT_EQ(0u, BytesInFlight());
old_send_algorithm =
manager_.OnConnectionMigration(true);
EXPECT_NE(old_send_algorithm.get(), manager_.GetSendAlgorithm());
EXPECT_EQ(old_send_algorithm->GetCongestionControlType(),
manager_.GetSendAlgorithm()->GetCongestionControlType());
EXPECT_EQ(default_init_rtt, rtt_stats->initial_rtt());
EXPECT_EQ(0u, manager_.GetConsecutivePtoCount());
EXPECT_EQ(0u, BytesInFlight());
EXPECT_TRUE(manager_.GetRttStats()->latest_rtt().IsZero());
manager_.SetSendAlgorithm(old_send_algorithm.release());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(30));
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(3), QuicPacketNumber(4));
EXPECT_CALL(*loss_algorithm, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(*loss_algorithm, SpuriousLossDetected(_, _, _, _, _)).Times(0u);
EXPECT_CALL(*send_algorithm_,
OnCongestionEvent(false, 0, _, _, _, _, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(3),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts));
EXPECT_EQ(0u, BytesInFlight());
EXPECT_TRUE(manager_.GetRttStats()->latest_rtt().IsZero());
SendDataPacket(4, ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(*loss_algorithm, GetLossTimeout())
.WillOnce(Return(clock_.Now() + QuicTime::Delta::FromMilliseconds(10)));
EXPECT_CALL(*loss_algorithm, DetectLosses(_, _, _, _, _, _))
.WillOnce(WithArgs<5>(Invoke([](LostPacketVector* packet_lost) {
packet_lost->emplace_back(QuicPacketNumber(4u), kDefaultLength);
return LossDetectionInterface::DetectionStats();
})));
EXPECT_CALL(notifier_, OnFrameLost(_));
EXPECT_CALL(*send_algorithm_,
OnCongestionEvent(false, kDefaultLength, _, _, _, _, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.OnRetransmissionTimeout();
EXPECT_EQ(0u, BytesInFlight());
RetransmitDataPacket(5, LOSS_RETRANSMISSION, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kDefaultLength, BytesInFlight());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(30));
manager_.OnAckFrameStart(QuicPacketNumber(4), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(4), QuicPacketNumber(5));
EXPECT_CALL(*loss_algorithm, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(*loss_algorithm, SpuriousLossDetected(_, _, _, _, _));
EXPECT_CALL(
*send_algorithm_,
OnCongestionEvent(true, kDefaultLength, _, _, _, _, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(3),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts));
EXPECT_EQ(kDefaultLength, BytesInFlight());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(30),
manager_.GetRttStats()->latest_rtt());
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillOnce(Return(false));
EXPECT_CALL(notifier_, OnFrameLost(_)).Times(0u);
old_send_algorithm =
manager_.OnConnectionMigration(true);
EXPECT_EQ(default_init_rtt, rtt_stats->initial_rtt());
EXPECT_EQ(0u, manager_.GetConsecutivePtoCount());
EXPECT_EQ(0u, BytesInFlight());
EXPECT_TRUE(manager_.GetRttStats()->latest_rtt().IsZero());
manager_.SetSendAlgorithm(old_send_algorithm.release());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
manager_.OnAckFrameStart(QuicPacketNumber(5), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(5), QuicPacketNumber(6));
EXPECT_CALL(*loss_algorithm, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(*loss_algorithm, SpuriousLossDetected(_, _, _, _, _)).Times(0u);
EXPECT_CALL(*send_algorithm_,
OnCongestionEvent(false, 0, _, _, _, _, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(3),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts));
EXPECT_EQ(0u, BytesInFlight());
EXPECT_TRUE(manager_.GetRttStats()->latest_rtt().IsZero());
}
TEST_F(QuicSentPacketManagerTest, PathMtuIncreased) {
EXPECT_CALL(*send_algorithm_,
OnPacketSent(_, BytesInFlight(), QuicPacketNumber(1), _, _));
SerializedPacket packet(QuicPacketNumber(1), PACKET_4BYTE_PACKET_NUMBER,
nullptr, kDefaultLength + 100, false, false);
manager_.OnPacketSent(&packet, clock_.Now(), NOT_RETRANSMISSION,
HAS_RETRANSMITTABLE_DATA, true, ECN_NOT_ECT);
ExpectAck(1);
EXPECT_CALL(*network_change_visitor_,
OnPathMtuIncreased(kDefaultLength + 100));
QuicAckFrame ack_frame = InitAckFrame(1);
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
}
TEST_F(QuicSentPacketManagerTest, OnAckRangeSlowPath) {
for (size_t i = 1; i <= 20; ++i) {
SendDataPacket(i);
}
uint64_t acked1[] = {5, 6, 10, 11, 15, 16};
uint64_t lost1[] = {1, 2, 3, 4, 7, 8, 9, 12, 13};
ExpectAcksAndLosses(true, acked1, ABSL_ARRAYSIZE(acked1), lost1,
ABSL_ARRAYSIZE(lost1));
EXPECT_CALL(notifier_, OnFrameLost(_)).Times(AnyNumber());
manager_.OnAckFrameStart(QuicPacketNumber(16), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(15), QuicPacketNumber(17));
manager_.OnAckRange(QuicPacketNumber(10), QuicPacketNumber(12));
manager_.OnAckRange(QuicPacketNumber(5), QuicPacketNumber(7));
manager_.OnAckRange(QuicPacketNumber(4), QuicPacketNumber(4));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
uint64_t acked2[] = {4, 7, 9, 12, 14, 17, 18, 19, 20};
ExpectAcksAndLosses(true, acked2, ABSL_ARRAYSIZE(acked2), nullptr, 0);
manager_.OnAckFrameStart(QuicPacketNumber(20), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(14), QuicPacketNumber(21));
manager_.OnAckRange(QuicPacketNumber(9), QuicPacketNumber(13));
manager_.OnAckRange(QuicPacketNumber(4), QuicPacketNumber(8));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_INITIAL, kEmptyCounts));
}
TEST_F(QuicSentPacketManagerTest, TolerateReneging) {
for (size_t i = 1; i <= 20; ++i) {
SendDataPacket(i);
}
uint64_t acked1[] = {5, 6, 10, 11, 15, 16};
uint64_t lost1[] = {1, 2, 3, 4, 7, 8, 9, 12, 13};
ExpectAcksAndLosses(true, acked1, ABSL_ARRAYSIZE(acked1), lost1,
ABSL_ARRAYSIZE(lost1));
EXPECT_CALL(notifier_, OnFrameLost(_)).Times(AnyNumber());
manager_.OnAckFrameStart(QuicPacketNumber(16), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(15), QuicPacketNumber(17));
manager_.OnAckRange(QuicPacketNumber(10), QuicPacketNumber(12));
manager_.OnAckRange(QuicPacketNumber(5), QuicPacketNumber(7));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
uint64_t acked2[] = {4, 7, 9, 12};
ExpectAcksAndLosses(true, acked2, ABSL_ARRAYSIZE(acked2), nullptr, 0);
manager_.OnAckFrameStart(QuicPacketNumber(12), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(9), QuicPacketNumber(13));
manager_.OnAckRange(QuicPacketNumber(4), QuicPacketNumber(8));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(QuicPacketNumber(16), manager_.GetLargestObserved());
}
TEST_F(QuicSentPacketManagerTest, MultiplePacketNumberSpaces) {
manager_.EnableMultiplePacketNumberSpacesSupport();
const QuicUnackedPacketMap* unacked_packets =
QuicSentPacketManagerPeer::GetUnackedPacketMap(&manager_);
EXPECT_FALSE(
unacked_packets
->GetLargestSentRetransmittableOfPacketNumberSpace(INITIAL_DATA)
.IsInitialized());
EXPECT_FALSE(
manager_.GetLargestAckedPacket(ENCRYPTION_INITIAL).IsInitialized());
SendDataPacket(1, ENCRYPTION_INITIAL);
EXPECT_EQ(QuicPacketNumber(1),
unacked_packets->GetLargestSentRetransmittableOfPacketNumberSpace(
INITIAL_DATA));
EXPECT_FALSE(
unacked_packets
->GetLargestSentRetransmittableOfPacketNumberSpace(HANDSHAKE_DATA)
.IsInitialized());
ExpectAck(1);
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(QuicPacketNumber(1),
manager_.GetLargestAckedPacket(ENCRYPTION_INITIAL));
EXPECT_FALSE(
manager_.GetLargestAckedPacket(ENCRYPTION_HANDSHAKE).IsInitialized());
SendDataPacket(2, ENCRYPTION_HANDSHAKE);
SendDataPacket(3, ENCRYPTION_HANDSHAKE);
EXPECT_EQ(QuicPacketNumber(1),
unacked_packets->GetLargestSentRetransmittableOfPacketNumberSpace(
INITIAL_DATA));
EXPECT_EQ(QuicPacketNumber(3),
unacked_packets->GetLargestSentRetransmittableOfPacketNumberSpace(
HANDSHAKE_DATA));
EXPECT_FALSE(
unacked_packets
->GetLargestSentRetransmittableOfPacketNumberSpace(APPLICATION_DATA)
.IsInitialized());
ExpectAck(2);
manager_.OnAckFrameStart(QuicPacketNumber(2), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(2), QuicPacketNumber(3));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_HANDSHAKE, kEmptyCounts));
EXPECT_EQ(QuicPacketNumber(2),
manager_.GetLargestAckedPacket(ENCRYPTION_HANDSHAKE));
EXPECT_FALSE(
manager_.GetLargestAckedPacket(ENCRYPTION_ZERO_RTT).IsInitialized());
ExpectAck(3);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(2), QuicPacketNumber(4));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(3),
ENCRYPTION_HANDSHAKE, kEmptyCounts));
EXPECT_EQ(QuicPacketNumber(3),
manager_.GetLargestAckedPacket(ENCRYPTION_HANDSHAKE));
EXPECT_FALSE(
manager_.GetLargestAckedPacket(ENCRYPTION_ZERO_RTT).IsInitialized());
SendDataPacket(4, ENCRYPTION_ZERO_RTT);
SendDataPacket(5, ENCRYPTION_ZERO_RTT);
EXPECT_EQ(QuicPacketNumber(1),
unacked_packets->GetLargestSentRetransmittableOfPacketNumberSpace(
INITIAL_DATA));
EXPECT_EQ(QuicPacketNumber(3),
unacked_packets->GetLargestSentRetransmittableOfPacketNumberSpace(
HANDSHAKE_DATA));
EXPECT_EQ(QuicPacketNumber(5),
unacked_packets->GetLargestSentRetransmittableOfPacketNumberSpace(
APPLICATION_DATA));
ExpectAck(5);
manager_.OnAckFrameStart(QuicPacketNumber(5), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(5), QuicPacketNumber(6));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(4),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts));
EXPECT_EQ(QuicPacketNumber(3),
manager_.GetLargestAckedPacket(ENCRYPTION_HANDSHAKE));
EXPECT_EQ(QuicPacketNumber(5),
manager_.GetLargestAckedPacket(ENCRYPTION_ZERO_RTT));
EXPECT_EQ(QuicPacketNumber(5),
manager_.GetLargestAckedPacket(ENCRYPTION_FORWARD_SECURE));
SendDataPacket(6, ENCRYPTION_FORWARD_SECURE);
SendDataPacket(7, ENCRYPTION_FORWARD_SECURE);
SendDataPacket(8, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(QuicPacketNumber(1),
unacked_packets->GetLargestSentRetransmittableOfPacketNumberSpace(
INITIAL_DATA));
EXPECT_EQ(QuicPacketNumber(3),
unacked_packets->GetLargestSentRetransmittableOfPacketNumberSpace(
HANDSHAKE_DATA));
EXPECT_EQ(QuicPacketNumber(8),
unacked_packets->GetLargestSentRetransmittableOfPacketNumberSpace(
APPLICATION_DATA));
uint64_t acked[] = {4, 6, 7, 8};
ExpectAcksAndLosses(true, acked, ABSL_ARRAYSIZE(acked), nullptr, 0);
manager_.OnAckFrameStart(QuicPacketNumber(8), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(4), QuicPacketNumber(9));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(5),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts));
EXPECT_EQ(QuicPacketNumber(3),
manager_.GetLargestAckedPacket(ENCRYPTION_HANDSHAKE));
EXPECT_EQ(QuicPacketNumber(8),
manager_.GetLargestAckedPacket(ENCRYPTION_ZERO_RTT));
EXPECT_EQ(QuicPacketNumber(8),
manager_.GetLargestAckedPacket(ENCRYPTION_FORWARD_SECURE));
}
TEST_F(QuicSentPacketManagerTest, PacketsGetAckedInWrongPacketNumberSpace) {
manager_.EnableMultiplePacketNumberSpacesSupport();
SendDataPacket(1, ENCRYPTION_INITIAL);
SendDataPacket(2, ENCRYPTION_HANDSHAKE);
SendDataPacket(3, ENCRYPTION_HANDSHAKE);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(4));
EXPECT_EQ(PACKETS_ACKED_IN_WRONG_PACKET_NUMBER_SPACE,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
}
TEST_F(QuicSentPacketManagerTest, PacketsGetAckedInWrongPacketNumberSpace2) {
manager_.EnableMultiplePacketNumberSpacesSupport();
SendDataPacket(1, ENCRYPTION_INITIAL);
SendDataPacket(2, ENCRYPTION_HANDSHAKE);
SendDataPacket(3, ENCRYPTION_HANDSHAKE);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(4));
EXPECT_EQ(PACKETS_ACKED_IN_WRONG_PACKET_NUMBER_SPACE,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_HANDSHAKE, kEmptyCounts));
}
TEST_F(QuicSentPacketManagerTest,
ToleratePacketsGetAckedInWrongPacketNumberSpace) {
manager_.EnableMultiplePacketNumberSpacesSupport();
SendDataPacket(1, ENCRYPTION_INITIAL);
ExpectAck(1);
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
SendDataPacket(2, ENCRYPTION_HANDSHAKE);
SendDataPacket(3, ENCRYPTION_HANDSHAKE);
uint64_t acked[] = {2, 3};
ExpectAcksAndLosses(true, acked, ABSL_ARRAYSIZE(acked), nullptr, 0);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(4));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_HANDSHAKE, kEmptyCounts));
}
TEST_F(QuicSentPacketManagerTest, ComputingProbeTimeout) {
EXPECT_CALL(*send_algorithm_, PacingRate(_))
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(10 * kDefaultTCPMSS));
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
QuicTime::Delta::Zero(), QuicTime::Zero());
QuicTime::Delta srtt = rtt_stats->smoothed_rtt();
SendDataPacket(1, ENCRYPTION_FORWARD_SECURE);
QuicTime::Delta expected_pto_delay =
srtt + kPtoRttvarMultiplier * rtt_stats->mean_deviation() +
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs());
QuicTime packet1_sent_time = clock_.Now();
EXPECT_EQ(clock_.Now() + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(2, ENCRYPTION_FORWARD_SECURE);
QuicTime deadline = packet1_sent_time + expected_pto_delay;
EXPECT_EQ(deadline, manager_.GetRetransmissionTime());
EXPECT_EQ(0u, stats_.pto_count);
clock_.AdvanceTime(deadline - clock_.Now());
manager_.OnRetransmissionTimeout();
EXPECT_EQ(QuicTime::Delta::Zero(), manager_.TimeUntilSend(clock_.Now()));
EXPECT_EQ(1u, stats_.pto_count);
EXPECT_EQ(0u, stats_.max_consecutive_rto_with_forward_progress);
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(WithArgs<1>(Invoke([this](TransmissionType type) {
return RetransmitDataPacket(3, type, ENCRYPTION_FORWARD_SECURE);
})));
manager_.MaybeSendProbePacket();
QuicTime sent_time = clock_.Now();
EXPECT_EQ(sent_time + expected_pto_delay * 2,
manager_.GetRetransmissionTime());
uint64_t acked[] = {1, 2};
ExpectAcksAndLosses(true, acked, ABSL_ARRAYSIZE(acked), nullptr, 0);
manager_.OnAckFrameStart(QuicPacketNumber(2), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(3));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts));
expected_pto_delay =
rtt_stats->SmoothedOrInitialRtt() +
std::max(kPtoRttvarMultiplier * rtt_stats->mean_deviation(),
QuicTime::Delta::FromMilliseconds(1)) +
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs());
EXPECT_EQ(sent_time + expected_pto_delay, manager_.GetRetransmissionTime());
EXPECT_EQ(1u, stats_.max_consecutive_rto_with_forward_progress);
}
TEST_F(QuicSentPacketManagerTest, SendOneProbePacket) {
EXPECT_CALL(*send_algorithm_, PacingRate(_))
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(10 * kDefaultTCPMSS));
SendDataPacket(1, ENCRYPTION_FORWARD_SECURE);
QuicTime packet1_sent_time = clock_.Now();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(2, ENCRYPTION_FORWARD_SECURE);
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
QuicTime::Delta::Zero(), QuicTime::Zero());
QuicTime::Delta srtt = rtt_stats->smoothed_rtt();
QuicTime::Delta expected_pto_delay =
srtt + kPtoRttvarMultiplier * rtt_stats->mean_deviation() +
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs());
QuicTime deadline = packet1_sent_time + expected_pto_delay;
EXPECT_EQ(deadline, manager_.GetRetransmissionTime());
clock_.AdvanceTime(deadline - clock_.Now());
manager_.OnRetransmissionTimeout();
EXPECT_EQ(QuicTime::Delta::Zero(), manager_.TimeUntilSend(clock_.Now()));
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(WithArgs<1>(Invoke([this](TransmissionType type) {
return RetransmitDataPacket(3, type, ENCRYPTION_FORWARD_SECURE);
})));
manager_.MaybeSendProbePacket();
}
TEST_F(QuicSentPacketManagerTest, DisableHandshakeModeClient) {
QuicSentPacketManagerPeer::SetPerspective(&manager_, Perspective::IS_CLIENT);
manager_.EnableMultiplePacketNumberSpacesSupport();
SendCryptoPacket(1);
EXPECT_NE(QuicTime::Zero(), manager_.GetRetransmissionTime());
ExpectAck(1);
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(0u, manager_.GetBytesInFlight());
EXPECT_NE(QuicTime::Zero(), manager_.GetRetransmissionTime());
EXPECT_EQ(QuicSentPacketManager::PTO_MODE,
manager_.OnRetransmissionTimeout());
SendDataPacket(2, ENCRYPTION_HANDSHAKE);
ExpectAck(2);
manager_.OnAckFrameStart(QuicPacketNumber(2), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(2), QuicPacketNumber(3));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_HANDSHAKE, kEmptyCounts));
EXPECT_EQ(QuicTime::Zero(), manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest, DisableHandshakeModeServer) {
manager_.EnableIetfPtoAndLossDetection();
SendCryptoPacket(1);
EXPECT_NE(QuicTime::Zero(), manager_.GetRetransmissionTime());
ExpectAck(1);
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(0u, manager_.GetBytesInFlight());
EXPECT_EQ(QuicTime::Zero(), manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest, PtoTimeoutRttVarMultiple) {
EXPECT_CALL(*send_algorithm_, PacingRate(_))
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(10 * kDefaultTCPMSS));
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
QuicTime::Delta::Zero(), QuicTime::Zero());
QuicTime::Delta srtt = rtt_stats->smoothed_rtt();
SendDataPacket(1, ENCRYPTION_FORWARD_SECURE);
QuicTime::Delta expected_pto_delay =
srtt + kPtoRttvarMultiplier * rtt_stats->mean_deviation() +
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs());
EXPECT_EQ(clock_.Now() + expected_pto_delay,
manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest, IW10ForUpAndDown) {
QuicConfig config;
QuicTagVector options;
options.push_back(kBWS5);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
EXPECT_CALL(*send_algorithm_, SetInitialCongestionWindowInPackets(10));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_EQ(10u, manager_.initial_congestion_window());
}
TEST_F(QuicSentPacketManagerTest, ClientMultiplePacketNumberSpacePtoTimeout) {
manager_.EnableMultiplePacketNumberSpacesSupport();
EXPECT_CALL(*send_algorithm_, PacingRate(_))
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(10 * kDefaultTCPMSS));
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
QuicTime::Delta::Zero(), QuicTime::Zero());
QuicTime::Delta srtt = rtt_stats->smoothed_rtt();
QuicSentPacketManagerPeer::SetPerspective(&manager_, Perspective::IS_CLIENT);
SendDataPacket(1, ENCRYPTION_INITIAL);
QuicTime::Delta expected_pto_delay =
srtt + kPtoRttvarMultiplier * rtt_stats->mean_deviation() +
QuicTime::Delta::Zero();
EXPECT_EQ(clock_.Now() + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));
manager_.NeuterUnencryptedPackets();
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(true));
SendDataPacket(2, ENCRYPTION_HANDSHAKE);
EXPECT_EQ(clock_.Now() + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(expected_pto_delay);
manager_.OnRetransmissionTimeout();
EXPECT_EQ(QuicTime::Delta::Zero(), manager_.TimeUntilSend(clock_.Now()));
EXPECT_EQ(1u, stats_.pto_count);
EXPECT_EQ(1u, stats_.crypto_retransmit_count);
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(WithArgs<1>(Invoke([this](TransmissionType type) {
return RetransmitDataPacket(3, type, ENCRYPTION_HANDSHAKE);
})));
manager_.MaybeSendProbePacket();
const QuicTime packet3_sent_time = clock_.Now();
EXPECT_EQ(packet3_sent_time + expected_pto_delay * 2,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(4, ENCRYPTION_ZERO_RTT);
const QuicTime packet4_sent_time = clock_.Now();
EXPECT_EQ(packet3_sent_time + expected_pto_delay * 2,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(5, ENCRYPTION_HANDSHAKE);
const QuicTime packet5_sent_time = clock_.Now();
EXPECT_EQ(clock_.Now() + expected_pto_delay * 2,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(6, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(packet5_sent_time + expected_pto_delay * 2,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
const QuicTime packet7_sent_time = clock_.Now();
SendDataPacket(7, ENCRYPTION_HANDSHAKE);
expected_pto_delay =
srtt + kPtoRttvarMultiplier * rtt_stats->mean_deviation();
EXPECT_EQ(packet7_sent_time + expected_pto_delay * 2,
manager_.GetRetransmissionTime());
manager_.SetHandshakeConfirmed();
expected_pto_delay =
srtt + kPtoRttvarMultiplier * rtt_stats->mean_deviation() +
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs());
EXPECT_EQ(packet4_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest, ServerMultiplePacketNumberSpacePtoTimeout) {
manager_.EnableMultiplePacketNumberSpacesSupport();
EXPECT_CALL(*send_algorithm_, PacingRate(_))
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(10 * kDefaultTCPMSS));
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
QuicTime::Delta::Zero(), QuicTime::Zero());
QuicTime::Delta srtt = rtt_stats->smoothed_rtt();
SendDataPacket(1, ENCRYPTION_INITIAL);
const QuicTime packet1_sent_time = clock_.Now();
QuicTime::Delta expected_pto_delay =
srtt + kPtoRttvarMultiplier * rtt_stats->mean_deviation() +
QuicTime::Delta::Zero();
EXPECT_EQ(packet1_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(2, ENCRYPTION_HANDSHAKE);
const QuicTime packet2_sent_time = clock_.Now();
EXPECT_EQ(packet1_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));
manager_.NeuterUnencryptedPackets();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(3, ENCRYPTION_FORWARD_SECURE);
const QuicTime packet3_sent_time = clock_.Now();
EXPECT_EQ(packet2_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(4, ENCRYPTION_HANDSHAKE);
EXPECT_EQ(clock_.Now() + expected_pto_delay,
manager_.GetRetransmissionTime());
manager_.SetHandshakeConfirmed();
expected_pto_delay =
srtt + kPtoRttvarMultiplier * rtt_stats->mean_deviation() +
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs());
EXPECT_EQ(packet3_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest, ComputingProbeTimeoutByLeftEdge) {
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(true));
EXPECT_CALL(*send_algorithm_, PacingRate(_))
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(10 * kDefaultTCPMSS));
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
QuicTime::Delta::Zero(), QuicTime::Zero());
QuicTime::Delta srtt = rtt_stats->smoothed_rtt();
SendDataPacket(1, ENCRYPTION_FORWARD_SECURE);
QuicTime::Delta expected_pto_delay =
srtt + kPtoRttvarMultiplier * rtt_stats->mean_deviation() +
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs());
const QuicTime packet1_sent_time = clock_.Now();
EXPECT_EQ(packet1_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(2, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(packet1_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
EXPECT_EQ(0u, stats_.pto_count);
clock_.AdvanceTime(expected_pto_delay);
manager_.OnRetransmissionTimeout();
EXPECT_EQ(QuicTime::Delta::Zero(), manager_.TimeUntilSend(clock_.Now()));
EXPECT_EQ(1u, stats_.pto_count);
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(WithArgs<1>(Invoke([this](TransmissionType type) {
return RetransmitDataPacket(3, type, ENCRYPTION_FORWARD_SECURE);
})));
manager_.MaybeSendProbePacket();
QuicTime packet3_sent_time = clock_.Now();
EXPECT_EQ(packet3_sent_time + expected_pto_delay * 2,
manager_.GetRetransmissionTime());
uint64_t acked[] = {1, 2};
ExpectAcksAndLosses(true, acked, ABSL_ARRAYSIZE(acked), nullptr, 0);
manager_.OnAckFrameStart(QuicPacketNumber(2), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(3));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts));
expected_pto_delay =
rtt_stats->SmoothedOrInitialRtt() +
std::max(kPtoRttvarMultiplier * rtt_stats->mean_deviation(),
QuicTime::Delta::FromMilliseconds(1)) +
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs());
EXPECT_EQ(packet3_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest, ComputingProbeTimeoutByLeftEdge2) {
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(true));
EXPECT_CALL(*send_algorithm_, PacingRate(_))
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(10 * kDefaultTCPMSS));
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
QuicTime::Delta::Zero(), QuicTime::Zero());
QuicTime::Delta srtt = rtt_stats->smoothed_rtt();
SendDataPacket(1, ENCRYPTION_FORWARD_SECURE);
QuicTime::Delta expected_pto_delay =
srtt + kPtoRttvarMultiplier * rtt_stats->mean_deviation() +
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs());
const QuicTime packet1_sent_time = clock_.Now();
EXPECT_EQ(packet1_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(
expected_pto_delay.ToMilliseconds() - 10));
SendDataPacket(2, ENCRYPTION_FORWARD_SECURE);
expected_pto_delay = kFirstPtoSrttMultiplier * rtt_stats->smoothed_rtt();
EXPECT_EQ(clock_.Now() + expected_pto_delay,
manager_.GetRetransmissionTime());
EXPECT_EQ(0u, stats_.pto_count);
clock_.AdvanceTime(expected_pto_delay);
manager_.OnRetransmissionTimeout();
EXPECT_EQ(QuicTime::Delta::Zero(), manager_.TimeUntilSend(clock_.Now()));
EXPECT_EQ(1u, stats_.pto_count);
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(WithArgs<1>(Invoke([this](TransmissionType type) {
return RetransmitDataPacket(3, type, ENCRYPTION_FORWARD_SECURE);
})));
manager_.MaybeSendProbePacket();
expected_pto_delay =
srtt + kPtoRttvarMultiplier * rtt_stats->mean_deviation() +
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs());
QuicTime packet3_sent_time = clock_.Now();
EXPECT_EQ(packet3_sent_time + expected_pto_delay * 2,
manager_.GetRetransmissionTime());
uint64_t acked[] = {1, 2};
ExpectAcksAndLosses(true, acked, ABSL_ARRAYSIZE(acked), nullptr, 0);
manager_.OnAckFrameStart(QuicPacketNumber(2), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(3));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts));
expected_pto_delay =
rtt_stats->SmoothedOrInitialRtt() +
std::max(kPtoRttvarMultiplier * rtt_stats->mean_deviation(),
QuicTime::Delta::FromMilliseconds(1)) +
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs());
EXPECT_EQ(packet3_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest,
ComputingProbeTimeoutByLeftEdgeMultiplePacketNumberSpaces) {
manager_.EnableMultiplePacketNumberSpacesSupport();
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(true));
EXPECT_CALL(*send_algorithm_, PacingRate(_))
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(10 * kDefaultTCPMSS));
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
QuicTime::Delta::Zero(), QuicTime::Zero());
QuicTime::Delta srtt = rtt_stats->smoothed_rtt();
SendDataPacket(1, ENCRYPTION_INITIAL);
const QuicTime packet1_sent_time = clock_.Now();
QuicTime::Delta expected_pto_delay =
srtt + kPtoRttvarMultiplier * rtt_stats->mean_deviation() +
QuicTime::Delta::Zero();
EXPECT_EQ(packet1_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(2, ENCRYPTION_HANDSHAKE);
const QuicTime packet2_sent_time = clock_.Now();
EXPECT_EQ(packet1_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));
manager_.NeuterUnencryptedPackets();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(3, ENCRYPTION_FORWARD_SECURE);
const QuicTime packet3_sent_time = clock_.Now();
EXPECT_EQ(packet2_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(4, ENCRYPTION_HANDSHAKE);
EXPECT_EQ(clock_.Now() + expected_pto_delay,
manager_.GetRetransmissionTime());
manager_.SetHandshakeConfirmed();
expected_pto_delay =
srtt + kPtoRttvarMultiplier * rtt_stats->mean_deviation() +
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs());
EXPECT_EQ(packet3_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(5, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(packet3_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest,
ComputingProbeTimeoutByLeftEdge2MultiplePacketNumberSpaces) {
manager_.EnableMultiplePacketNumberSpacesSupport();
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(true));
EXPECT_CALL(*send_algorithm_, PacingRate(_))
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(10 * kDefaultTCPMSS));
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
QuicTime::Delta::Zero(), QuicTime::Zero());
QuicTime::Delta srtt = rtt_stats->smoothed_rtt();
SendDataPacket(1, ENCRYPTION_INITIAL);
const QuicTime packet1_sent_time = clock_.Now();
QuicTime::Delta expected_pto_delay =
srtt + kPtoRttvarMultiplier * rtt_stats->mean_deviation() +
QuicTime::Delta::Zero();
EXPECT_EQ(packet1_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(2, ENCRYPTION_HANDSHAKE);
const QuicTime packet2_sent_time = clock_.Now();
EXPECT_EQ(packet1_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));
manager_.NeuterUnencryptedPackets();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(3, ENCRYPTION_FORWARD_SECURE);
const QuicTime packet3_sent_time = clock_.Now();
EXPECT_EQ(packet2_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(4, ENCRYPTION_HANDSHAKE);
EXPECT_EQ(clock_.Now() + expected_pto_delay,
manager_.GetRetransmissionTime());
manager_.SetHandshakeConfirmed();
expected_pto_delay =
srtt + kPtoRttvarMultiplier * rtt_stats->mean_deviation() +
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs());
EXPECT_EQ(packet3_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(
expected_pto_delay.ToMilliseconds() - 10));
SendDataPacket(5, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(clock_.Now() + kFirstPtoSrttMultiplier * rtt_stats->smoothed_rtt(),
manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest, SetHandshakeConfirmed) {
QuicSentPacketManagerPeer::SetPerspective(&manager_, Perspective::IS_CLIENT);
manager_.EnableMultiplePacketNumberSpacesSupport();
SendDataPacket(1, ENCRYPTION_INITIAL);
SendDataPacket(2, ENCRYPTION_HANDSHAKE);
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _))
.WillOnce(
Invoke([](const QuicFrame& , QuicTime::Delta ack_delay_time,
QuicTime receive_timestamp) {
EXPECT_TRUE(ack_delay_time.IsZero());
EXPECT_EQ(receive_timestamp, QuicTime::Zero());
return true;
}));
EXPECT_CALL(*send_algorithm_, OnPacketNeutered(QuicPacketNumber(2))).Times(1);
manager_.SetHandshakeConfirmed();
}
TEST_F(QuicSentPacketManagerTest, NeuterUnencryptedPackets) {
SendCryptoPacket(1);
SendPingPacket(2, ENCRYPTION_INITIAL);
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _))
.Times(2)
.WillOnce(Return(false))
.WillOnce(Return(true));
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));
EXPECT_CALL(*send_algorithm_, OnPacketNeutered(QuicPacketNumber(1))).Times(1);
manager_.NeuterUnencryptedPackets();
}
TEST_F(QuicSentPacketManagerTest, MarkInitialPacketsForRetransmission) {
SendCryptoPacket(1);
SendPingPacket(2, ENCRYPTION_HANDSHAKE);
EXPECT_CALL(notifier_, OnFrameLost(_)).Times(1);
manager_.MarkInitialPacketsForRetransmission();
}
TEST_F(QuicSentPacketManagerTest, NoPacketThresholdDetectionForRuntPackets) {
EXPECT_TRUE(
QuicSentPacketManagerPeer::UsePacketThresholdForRuntPackets(&manager_));
QuicConfig config;
QuicTagVector options;
options.push_back(kRUNT);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(config);
EXPECT_FALSE(
QuicSentPacketManagerPeer::UsePacketThresholdForRuntPackets(&manager_));
}
TEST_F(QuicSentPacketManagerTest, GetPathDegradingDelayDefaultPTO) {
QuicSentPacketManagerPeer::SetPerspective(&manager_, Perspective::IS_CLIENT);
QuicTime::Delta expected_delay = 4 * manager_.GetPtoDelay();
EXPECT_EQ(expected_delay, manager_.GetPathDegradingDelay());
}
TEST_F(QuicSentPacketManagerTest, ClientsIgnorePings) {
QuicSentPacketManagerPeer::SetPerspective(&manager_, Perspective::IS_CLIENT);
QuicConfig client_config;
QuicTagVector options;
QuicTagVector client_options;
client_options.push_back(kIGNP);
client_config.SetConnectionOptionsToSend(options);
client_config.SetClientConnectionOptions(client_options);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.SetFromConfig(client_config);
EXPECT_CALL(*send_algorithm_, PacingRate(_))
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(10 * kDefaultTCPMSS));
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(true));
SendPingPacket(1, ENCRYPTION_INITIAL);
EXPECT_EQ(QuicTime::Zero(), manager_.GetRetransmissionTime());
SendDataPacket(2, ENCRYPTION_INITIAL);
EXPECT_NE(QuicTime::Zero(), manager_.GetRetransmissionTime());
uint64_t acked[] = {1};
ExpectAcksAndLosses(false, acked, ABSL_ARRAYSIZE(acked),
nullptr, 0);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(90));
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
EXPECT_TRUE(rtt_stats->smoothed_rtt().IsZero());
ExpectAck(2);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
manager_.OnAckFrameStart(QuicPacketNumber(2), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(3));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(100), rtt_stats->smoothed_rtt());
}
TEST_F(QuicSentPacketManagerTest, ExponentialBackoffWithNoRttMeasurement) {
QuicSentPacketManagerPeer::SetPerspective(&manager_, Perspective::IS_CLIENT);
manager_.EnableMultiplePacketNumberSpacesSupport();
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(kInitialRttMs),
rtt_stats->initial_rtt());
EXPECT_TRUE(rtt_stats->smoothed_rtt().IsZero());
SendCryptoPacket(1);
QuicTime::Delta expected_pto_delay =
QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs);
EXPECT_EQ(clock_.Now() + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(expected_pto_delay);
manager_.OnRetransmissionTimeout();
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(
WithArgs<1>(Invoke([this]() { return RetransmitCryptoPacket(3); })));
manager_.MaybeSendProbePacket();
EXPECT_EQ(clock_.Now() + 2 * expected_pto_delay,
manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest, PtoDelayWithTinyInitialRtt) {
manager_.EnableMultiplePacketNumberSpacesSupport();
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
rtt_stats->set_initial_rtt(QuicTime::Delta::FromMicroseconds(1));
EXPECT_EQ(QuicTime::Delta::FromMicroseconds(1), rtt_stats->initial_rtt());
EXPECT_TRUE(rtt_stats->smoothed_rtt().IsZero());
SendCryptoPacket(1);
QuicTime::Delta expected_pto_delay = QuicTime::Delta::FromMilliseconds(10);
EXPECT_EQ(clock_.Now() + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(expected_pto_delay);
manager_.OnRetransmissionTimeout();
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(
WithArgs<1>(Invoke([this]() { return RetransmitCryptoPacket(3); })));
manager_.MaybeSendProbePacket();
EXPECT_EQ(clock_.Now() + 2 * expected_pto_delay,
manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest, HandshakeAckCausesInitialKeyDropping) {
manager_.EnableMultiplePacketNumberSpacesSupport();
QuicSentPacketManagerPeer::SetPerspective(&manager_, Perspective::IS_CLIENT);
SendDataPacket(1, ENCRYPTION_INITIAL);
QuicTime::Delta expected_pto_delay =
QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs);
EXPECT_EQ(clock_.Now() + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendAckPacket(2, 1, ENCRYPTION_HANDSHAKE);
EXPECT_CALL(notifier_, HasUnackedCryptoData()).WillRepeatedly(Return(false));
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));
manager_.NeuterUnencryptedPackets();
EXPECT_FALSE(manager_.HasInFlightPackets());
EXPECT_EQ(clock_.Now() + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(expected_pto_delay);
manager_.OnRetransmissionTimeout();
EXPECT_CALL(notifier_, RetransmitFrames(_, _)).Times(0);
manager_.MaybeSendProbePacket();
}
TEST_F(QuicSentPacketManagerTest, ClearLastInflightPacketsSentTime) {
manager_.EnableMultiplePacketNumberSpacesSupport();
EXPECT_CALL(*send_algorithm_, PacingRate(_))
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(10 * kDefaultTCPMSS));
SendDataPacket(1, ENCRYPTION_INITIAL);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(2, ENCRYPTION_HANDSHAKE);
SendDataPacket(3, ENCRYPTION_HANDSHAKE);
SendDataPacket(4, ENCRYPTION_HANDSHAKE);
const QuicTime packet2_sent_time = clock_.Now();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(5, ENCRYPTION_FORWARD_SECURE);
ExpectAck(1);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(90));
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
const QuicTime::Delta pto_delay =
rtt_stats->smoothed_rtt() +
kPtoRttvarMultiplier * rtt_stats->mean_deviation() +
QuicTime::Delta::Zero();
EXPECT_EQ(packet2_sent_time + pto_delay, manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest, MaybeRetransmitInitialData) {
manager_.EnableMultiplePacketNumberSpacesSupport();
EXPECT_CALL(*send_algorithm_, PacingRate(_))
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(10 * kDefaultTCPMSS));
RttStats* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
QuicTime::Delta::Zero(), QuicTime::Zero());
QuicTime::Delta srtt = rtt_stats->smoothed_rtt();
SendDataPacket(1, ENCRYPTION_INITIAL);
QuicTime packet1_sent_time = clock_.Now();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
SendDataPacket(2, ENCRYPTION_HANDSHAKE);
QuicTime packet2_sent_time = clock_.Now();
SendDataPacket(3, ENCRYPTION_HANDSHAKE);
QuicTime::Delta expected_pto_delay =
srtt + kPtoRttvarMultiplier * rtt_stats->mean_deviation() +
QuicTime::Delta::Zero();
EXPECT_EQ(packet1_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(WithArgs<1>(Invoke([this](TransmissionType type) {
return RetransmitDataPacket(4, type, ENCRYPTION_INITIAL);
})));
manager_.RetransmitDataOfSpaceIfAny(INITIAL_DATA);
EXPECT_EQ(packet2_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
EXPECT_CALL(notifier_, RetransmitFrames(_, _))
.WillOnce(WithArgs<1>(Invoke([this](TransmissionType type) {
return RetransmitDataPacket(5, type, ENCRYPTION_INITIAL);
})));
manager_.RetransmitDataOfSpaceIfAny(INITIAL_DATA);
EXPECT_EQ(packet2_sent_time + expected_pto_delay,
manager_.GetRetransmissionTime());
}
TEST_F(QuicSentPacketManagerTest, SendPathChallengeAndGetAck) {
QuicPacketNumber packet_number(1);
EXPECT_CALL(*send_algorithm_,
OnPacketSent(_, BytesInFlight(), packet_number, _, _));
SerializedPacket packet(packet_number, PACKET_4BYTE_PACKET_NUMBER, nullptr,
kDefaultLength, false, false);
QuicPathFrameBuffer path_frame_buffer{0, 1, 2, 3, 4, 5, 6, 7};
packet.nonretransmittable_frames.push_back(
QuicFrame(QuicPathChallengeFrame(0, path_frame_buffer)));
packet.encryption_level = ENCRYPTION_FORWARD_SECURE;
manager_.OnPacketSent(&packet, clock_.Now(), NOT_RETRANSMISSION,
NO_RETRANSMITTABLE_DATA, false, ECN_NOT_ECT);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
EXPECT_CALL(
*send_algorithm_,
OnCongestionEvent(false, _, _,
Pointwise(PacketNumberEq(), {1}), IsEmpty(), _, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts));
}
SerializedPacket MakePacketWithAckFrequencyFrame(
int packet_number, int ack_frequency_sequence_number,
QuicTime::Delta max_ack_delay) {
auto* ack_frequency_frame = new QuicAckFrequencyFrame();
ack_frequency_frame->max_ack_delay = max_ack_delay;
ack_frequency_frame->sequence_number = ack_frequency_sequence_number;
SerializedPacket packet(QuicPacketNumber(packet_number),
PACKET_4BYTE_PACKET_NUMBER, nullptr, kDefaultLength,
false,
false);
packet.retransmittable_frames.push_back(QuicFrame(ack_frequency_frame));
packet.has_ack_frequency = true;
packet.encryption_level = ENCRYPTION_FORWARD_SECURE;
return packet;
}
TEST_F(QuicSentPacketManagerTest,
PeerMaxAckDelayUpdatedFromAckFrequencyFrameOneAtATime) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _))
.Times(AnyNumber());
EXPECT_CALL(*network_change_visitor_, OnCongestionChange())
.Times(AnyNumber());
auto initial_peer_max_ack_delay = manager_.peer_max_ack_delay();
auto one_ms = QuicTime::Delta::FromMilliseconds(1);
auto plus_1_ms_delay = initial_peer_max_ack_delay + one_ms;
auto minus_1_ms_delay = initial_peer_max_ack_delay - one_ms;
SerializedPacket packet1 = MakePacketWithAckFrequencyFrame(
1, 1,
plus_1_ms_delay);
manager_.OnPacketSent(&packet1, clock_.Now(), NOT_RETRANSMISSION,
HAS_RETRANSMITTABLE_DATA, true,
ECN_NOT_ECT);
EXPECT_EQ(manager_.peer_max_ack_delay(), plus_1_ms_delay);
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts);
EXPECT_EQ(manager_.peer_max_ack_delay(), plus_1_ms_delay);
SerializedPacket packet2 = MakePacketWithAckFrequencyFrame(
2, 2,
minus_1_ms_delay);
manager_.OnPacketSent(&packet2, clock_.Now(), NOT_RETRANSMISSION,
HAS_RETRANSMITTABLE_DATA, true,
ECN_NOT_ECT);
EXPECT_EQ(manager_.peer_max_ack_delay(), plus_1_ms_delay);
manager_.OnAckFrameStart(QuicPacketNumber(2), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(2), QuicPacketNumber(3));
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts);
EXPECT_EQ(manager_.peer_max_ack_delay(), minus_1_ms_delay);
}
TEST_F(QuicSentPacketManagerTest,
PeerMaxAckDelayUpdatedFromInOrderAckFrequencyFrames) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _))
.Times(AnyNumber());
EXPECT_CALL(*network_change_visitor_, OnCongestionChange())
.Times(AnyNumber());
auto initial_peer_max_ack_delay = manager_.peer_max_ack_delay();
auto one_ms = QuicTime::Delta::FromMilliseconds(1);
auto extra_1_ms = initial_peer_max_ack_delay + one_ms;
auto extra_2_ms = initial_peer_max_ack_delay + 2 * one_ms;
auto extra_3_ms = initial_peer_max_ack_delay + 3 * one_ms;
SerializedPacket packet1 = MakePacketWithAckFrequencyFrame(
1, 1, extra_1_ms);
SerializedPacket packet2 = MakePacketWithAckFrequencyFrame(
2, 2, extra_3_ms);
SerializedPacket packet3 = MakePacketWithAckFrequencyFrame(
3, 3, extra_2_ms);
manager_.OnPacketSent(&packet1, clock_.Now(), NOT_RETRANSMISSION,
HAS_RETRANSMITTABLE_DATA, true,
ECN_NOT_ECT);
EXPECT_EQ(manager_.peer_max_ack_delay(), extra_1_ms);
manager_.OnPacketSent(&packet2, clock_.Now(), NOT_RETRANSMISSION,
HAS_RETRANSMITTABLE_DATA, true,
ECN_NOT_ECT);
EXPECT_EQ(manager_.peer_max_ack_delay(), extra_3_ms);
manager_.OnPacketSent(&packet3, clock_.Now(), NOT_RETRANSMISSION,
HAS_RETRANSMITTABLE_DATA, true,
ECN_NOT_ECT);
EXPECT_EQ(manager_.peer_max_ack_delay(), extra_3_ms);
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts);
EXPECT_EQ(manager_.peer_max_ack_delay(), extra_3_ms);
manager_.OnAckFrameStart(QuicPacketNumber(2), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(3));
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts);
EXPECT_EQ(manager_.peer_max_ack_delay(), extra_3_ms);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(4));
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts);
EXPECT_EQ(manager_.peer_max_ack_delay(), extra_2_ms);
}
TEST_F(QuicSentPacketManagerTest,
PeerMaxAckDelayUpdatedFromOutOfOrderAckedAckFrequencyFrames) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _))
.Times(AnyNumber());
EXPECT_CALL(*network_change_visitor_, OnCongestionChange())
.Times(AnyNumber());
auto initial_peer_max_ack_delay = manager_.peer_max_ack_delay();
auto one_ms = QuicTime::Delta::FromMilliseconds(1);
auto extra_1_ms = initial_peer_max_ack_delay + one_ms;
auto extra_2_ms = initial_peer_max_ack_delay + 2 * one_ms;
auto extra_3_ms = initial_peer_max_ack_delay + 3 * one_ms;
auto extra_4_ms = initial_peer_max_ack_delay + 4 * one_ms;
SerializedPacket packet1 = MakePacketWithAckFrequencyFrame(
1, 1, extra_4_ms);
SerializedPacket packet2 = MakePacketWithAckFrequencyFrame(
2, 2, extra_3_ms);
SerializedPacket packet3 = MakePacketWithAckFrequencyFrame(
3, 3, extra_2_ms);
SerializedPacket packet4 = MakePacketWithAckFrequencyFrame(
4, 4, extra_1_ms);
manager_.OnPacketSent(&packet1, clock_.Now(), NOT_RETRANSMISSION,
HAS_RETRANSMITTABLE_DATA, true,
ECN_NOT_ECT);
manager_.OnPacketSent(&packet2, clock_.Now(), NOT_RETRANSMISSION,
HAS_RETRANSMITTABLE_DATA, true,
ECN_NOT_ECT);
manager_.OnPacketSent(&packet3, clock_.Now(), NOT_RETRANSMISSION,
HAS_RETRANSMITTABLE_DATA, true,
ECN_NOT_ECT);
manager_.OnPacketSent(&packet4, clock_.Now(), NOT_RETRANSMISSION,
NO_RETRANSMITTABLE_DATA, true,
ECN_NOT_ECT);
EXPECT_EQ(manager_.peer_max_ack_delay(), extra_4_ms);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(3), QuicPacketNumber(4));
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts);
EXPECT_EQ(manager_.peer_max_ack_delay(), extra_2_ms);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(3), QuicPacketNumber(4));
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts);
EXPECT_EQ(manager_.peer_max_ack_delay(), extra_2_ms);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(4));
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts);
EXPECT_EQ(manager_.peer_max_ack_delay(), extra_2_ms);
manager_.OnAckFrameStart(QuicPacketNumber(4), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(5));
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, kEmptyCounts);
EXPECT_EQ(manager_.peer_max_ack_delay(), extra_1_ms);
}
TEST_F(QuicSentPacketManagerTest, ClearDataInMessageFrameAfterPacketSent) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
QuicMessageFrame* message_frame = nullptr;
{
quiche::QuicheMemSlice slice(quiche::QuicheBuffer(&allocator_, 1024));
message_frame = new QuicMessageFrame(1, std::move(slice));
EXPECT_FALSE(message_frame->message_data.empty());
EXPECT_EQ(message_frame->message_length, 1024);
SerializedPacket packet(QuicPacketNumber(1), PACKET_4BYTE_PACKET_NUMBER,
nullptr, kDefaultLength,
false,
false);
packet.encryption_level = ENCRYPTION_FORWARD_SECURE;
packet.retransmittable_frames.push_back(QuicFrame(message_frame));
packet.has_message = true;
manager_.OnPacketSent(&packet, clock_.Now(), NOT_RETRANSMISSION,
HAS_RETRANSMITTABLE_DATA, true,
ECN_NOT_ECT);
}
EXPECT_TRUE(message_frame->message_data.empty());
EXPECT_EQ(message_frame->message_length, 0);
}
TEST_F(QuicSentPacketManagerTest, BuildAckFrequencyFrame) {
SetQuicReloadableFlag(quic_can_send_ack_frequency, true);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
QuicConfig config;
QuicConfigPeer::SetReceivedMinAckDelayMs(&config, 1);
manager_.SetFromConfig(config);
manager_.SetHandshakeConfirmed();
auto* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(80),
QuicTime::Delta::Zero(),
QuicTime::Zero());
rtt_stats->UpdateRtt(
QuicTime::Delta::FromMilliseconds(160),
QuicTime::Delta::Zero(),
QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(24));
auto frame = manager_.GetUpdatedAckFrequencyFrame();
EXPECT_EQ(frame.max_ack_delay,
std::max(rtt_stats->min_rtt() * 0.25,
QuicTime::Delta::FromMilliseconds(1u)));
EXPECT_EQ(frame.packet_tolerance, 10u);
}
TEST_F(QuicSentPacketManagerTest, SmoothedRttIgnoreAckDelay) {
QuicConfig config;
QuicTagVector options;
options.push_back(kMAD0);
QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(true));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(10 * kDefaultTCPMSS));
manager_.SetFromConfig(config);
SendDataPacket(1);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(300));
ExpectAck(1);
manager_.OnAckFrameStart(QuicPacketNumber(1),
QuicTime::Delta::FromMilliseconds(100),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300),
manager_.GetRttStats()->latest_rtt());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300),
manager_.GetRttStats()->smoothed_rtt());
SendDataPacket(2);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(300));
ExpectAck(2);
manager_.OnAckFrameStart(QuicPacketNumber(2),
QuicTime::Delta::FromMilliseconds(100),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(2), QuicPacketNumber(3));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300),
manager_.GetRttStats()->latest_rtt());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300),
manager_.GetRttStats()->smoothed_rtt());
SendDataPacket(3);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(300));
ExpectAck(3);
manager_.OnAckFrameStart(QuicPacketNumber(3),
QuicTime::Delta::FromMilliseconds(50), clock_.Now());
manager_.OnAckRange(QuicPacketNumber(3), QuicPacketNumber(4));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(3),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300),
manager_.GetRttStats()->latest_rtt());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300),
manager_.GetRttStats()->smoothed_rtt());
SendDataPacket(4);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(200));
ExpectAck(4);
manager_.OnAckFrameStart(QuicPacketNumber(4),
QuicTime::Delta::FromMilliseconds(300),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(4), QuicPacketNumber(5));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(4),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200),
manager_.GetRttStats()->latest_rtt());
EXPECT_EQ(QuicTime::Delta::FromMicroseconds(287500),
manager_.GetRttStats()->smoothed_rtt());
}
TEST_F(QuicSentPacketManagerTest, IgnorePeerMaxAckDelayDuringHandshake) {
manager_.EnableMultiplePacketNumberSpacesSupport();
const QuicTime::Delta kTestRTT = QuicTime::Delta::FromMilliseconds(100);
SendDataPacket(1, ENCRYPTION_INITIAL);
SendDataPacket(2, ENCRYPTION_HANDSHAKE);
clock_.AdvanceTime(kTestRTT);
ExpectAck(1);
manager_.OnAckFrameStart(QuicPacketNumber(1), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_INITIAL, kEmptyCounts));
EXPECT_EQ(kTestRTT, manager_.GetRttStats()->latest_rtt());
const QuicTime::Delta queuing_delay = QuicTime::Delta::FromMilliseconds(50);
clock_.AdvanceTime(queuing_delay);
ExpectAck(2);
manager_.OnAckFrameStart(QuicPacketNumber(2), queuing_delay, clock_.Now());
manager_.OnAckRange(QuicPacketNumber(2), QuicPacketNumber(3));
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_HANDSHAKE, kEmptyCounts));
EXPECT_EQ(kTestRTT, manager_.GetRttStats()->latest_rtt());
}
TEST_F(QuicSentPacketManagerTest, BuildAckFrequencyFrameWithSRTT) {
SetQuicReloadableFlag(quic_can_send_ack_frequency, true);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
EXPECT_CALL(*network_change_visitor_, OnCongestionChange());
QuicConfig config;
QuicConfigPeer::SetReceivedMinAckDelayMs(&config, 1);
QuicTagVector quic_tag_vector;
quic_tag_vector.push_back(kAFF1);
QuicConfigPeer::SetReceivedConnectionOptions(&config, quic_tag_vector);
manager_.SetFromConfig(config);
manager_.SetHandshakeConfirmed();
auto* rtt_stats = const_cast<RttStats*>(manager_.GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(80),
QuicTime::Delta::Zero(),
QuicTime::Zero());
rtt_stats->UpdateRtt(
QuicTime::Delta::FromMilliseconds(160),
QuicTime::Delta::Zero(),
QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(24));
auto frame = manager_.GetUpdatedAckFrequencyFrame();
EXPECT_EQ(frame.max_ack_delay,
std::max(rtt_stats->SmoothedOrInitialRtt() * 0.25,
QuicTime::Delta::FromMilliseconds(1u)));
}
TEST_F(QuicSentPacketManagerTest, SetInitialRtt) {
manager_.SetInitialRtt(
QuicTime::Delta::FromMicroseconds(kMaxInitialRoundTripTimeUs + 1), false);
EXPECT_EQ(manager_.GetRttStats()->initial_rtt().ToMicroseconds(),
kMaxInitialRoundTripTimeUs);
manager_.SetInitialRtt(
QuicTime::Delta::FromMicroseconds(kMaxInitialRoundTripTimeUs + 1), true);
EXPECT_EQ(manager_.GetRttStats()->initial_rtt().ToMicroseconds(),
kMaxInitialRoundTripTimeUs);
EXPECT_GT(kMinUntrustedInitialRoundTripTimeUs,
kMinTrustedInitialRoundTripTimeUs);
manager_.SetInitialRtt(QuicTime::Delta::FromMicroseconds(
kMinUntrustedInitialRoundTripTimeUs - 1),
false);
EXPECT_EQ(manager_.GetRttStats()->initial_rtt().ToMicroseconds(),
kMinUntrustedInitialRoundTripTimeUs);
manager_.SetInitialRtt(QuicTime::Delta::FromMicroseconds(
kMinUntrustedInitialRoundTripTimeUs - 1),
true);
EXPECT_EQ(manager_.GetRttStats()->initial_rtt().ToMicroseconds(),
kMinUntrustedInitialRoundTripTimeUs - 1);
manager_.SetInitialRtt(
QuicTime::Delta::FromMicroseconds(kMinTrustedInitialRoundTripTimeUs - 1),
true);
EXPECT_EQ(manager_.GetRttStats()->initial_rtt().ToMicroseconds(),
kMinTrustedInitialRoundTripTimeUs);
}
TEST_F(QuicSentPacketManagerTest, GetAvailableCongestionWindow) {
SendDataPacket(1);
EXPECT_EQ(kDefaultLength, manager_.GetBytesInFlight());
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillOnce(Return(kDefaultLength + 10));
EXPECT_EQ(10u, manager_.GetAvailableCongestionWindowInBytes());
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillOnce(Return(kDefaultLength));
EXPECT_EQ(0u, manager_.GetAvailableCongestionWindowInBytes());
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillOnce(Return(kDefaultLength - 10));
EXPECT_EQ(0u, manager_.GetAvailableCongestionWindowInBytes());
}
TEST_F(QuicSentPacketManagerTest, EcnCountsAreStored) {
if (!GetQuicRestartFlag(quic_support_ect1)) {
return;
}
std::optional<QuicEcnCounts> ecn_counts1, ecn_counts2, ecn_counts3;
ecn_counts1 = {1, 0, 3};
ecn_counts2 = {0, 3, 1};
ecn_counts3 = {0, 2, 0};
SendDataPacket(1, ENCRYPTION_INITIAL, ECN_ECT0);
SendDataPacket(2, ENCRYPTION_INITIAL, ECN_ECT0);
SendDataPacket(3, ENCRYPTION_INITIAL, ECN_ECT0);
SendDataPacket(4, ENCRYPTION_INITIAL, ECN_ECT0);
SendDataPacket(5, ENCRYPTION_HANDSHAKE, ECN_ECT1);
SendDataPacket(6, ENCRYPTION_HANDSHAKE, ECN_ECT1);
SendDataPacket(7, ENCRYPTION_HANDSHAKE, ECN_ECT1);
SendDataPacket(8, ENCRYPTION_HANDSHAKE, ECN_ECT1);
SendDataPacket(9, ENCRYPTION_FORWARD_SECURE, ECN_ECT1);
SendDataPacket(10, ENCRYPTION_FORWARD_SECURE, ECN_ECT1);
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1), ENCRYPTION_INITIAL,
ecn_counts1);
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_HANDSHAKE, ecn_counts2);
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(3),
ENCRYPTION_FORWARD_SECURE, ecn_counts3);
EXPECT_EQ(
*QuicSentPacketManagerPeer::GetPeerEcnCounts(&manager_, INITIAL_DATA),
ecn_counts1);
EXPECT_EQ(
*QuicSentPacketManagerPeer::GetPeerEcnCounts(&manager_, HANDSHAKE_DATA),
ecn_counts2);
EXPECT_EQ(
*QuicSentPacketManagerPeer::GetPeerEcnCounts(&manager_, APPLICATION_DATA),
ecn_counts3);
}
TEST_F(QuicSentPacketManagerTest, EcnCountsReceived) {
if (!GetQuicRestartFlag(quic_support_ect1)) {
return;
}
for (uint64_t i = 1; i <= 3; ++i) {
SendDataPacket(i, ENCRYPTION_FORWARD_SECURE, ECN_ECT1);
}
EXPECT_CALL(*network_change_visitor_, OnInFlightEcnPacketAcked()).Times(2);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(2), QuicPacketNumber(4));
EXPECT_CALL(*send_algorithm_,
OnCongestionEvent(_, _, _, Pointwise(PacketNumberEq(), {2, 3}),
IsEmpty(), 2, 1))
.Times(1);
EXPECT_CALL(*network_change_visitor_, OnCongestionChange()).Times(1);
std::optional<QuicEcnCounts> ecn_counts = QuicEcnCounts();
ecn_counts->ect1 = QuicPacketCount(2);
ecn_counts->ce = QuicPacketCount(1);
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, ecn_counts));
}
TEST_F(QuicSentPacketManagerTest, PeerDecrementsEcnCounts) {
if (!GetQuicRestartFlag(quic_support_ect1)) {
return;
}
for (uint64_t i = 1; i <= 5; ++i) {
SendDataPacket(i, ENCRYPTION_FORWARD_SECURE, ECN_ECT1);
}
EXPECT_CALL(*network_change_visitor_, OnInFlightEcnPacketAcked()).Times(3);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(4));
EXPECT_CALL(*send_algorithm_,
OnCongestionEvent(_, _, _, Pointwise(PacketNumberEq(), {1, 2, 3}),
IsEmpty(), 2, 1))
.Times(1);
EXPECT_CALL(*network_change_visitor_, OnCongestionChange()).Times(1);
std::optional<QuicEcnCounts> ecn_counts = QuicEcnCounts();
ecn_counts->ect1 = QuicPacketCount(2);
ecn_counts->ce = QuicPacketCount(1);
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, ecn_counts));
EXPECT_CALL(*network_change_visitor_, OnInFlightEcnPacketAcked()).Times(1);
manager_.OnAckFrameStart(QuicPacketNumber(4), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(4), QuicPacketNumber(5));
EXPECT_CALL(*network_change_visitor_, OnInvalidEcnFeedback());
EXPECT_CALL(*send_algorithm_,
OnCongestionEvent(_, _, _, Pointwise(PacketNumberEq(), {4}),
IsEmpty(), 0, 0))
.Times(1);
EXPECT_CALL(*network_change_visitor_, OnCongestionChange()).Times(1);
ecn_counts = QuicEcnCounts();
ecn_counts->ect1 = QuicPacketCount(3);
ecn_counts->ce = QuicPacketCount(0);
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_FORWARD_SECURE, ecn_counts));
}
TEST_F(QuicSentPacketManagerTest, TooManyEcnCountsReported) {
if (!GetQuicRestartFlag(quic_support_ect1)) {
return;
}
for (uint64_t i = 1; i <= 3; ++i) {
SendDataPacket(i, ENCRYPTION_FORWARD_SECURE, ECN_ECT1);
}
EXPECT_CALL(*network_change_visitor_, OnInFlightEcnPacketAcked()).Times(2);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(2), QuicPacketNumber(4));
std::optional<QuicEcnCounts> ecn_counts = QuicEcnCounts();
ecn_counts->ect1 = QuicPacketCount(3);
ecn_counts->ce = QuicPacketCount(1);
EXPECT_CALL(*network_change_visitor_, OnInvalidEcnFeedback());
EXPECT_CALL(*send_algorithm_,
OnCongestionEvent(_, _, _, Pointwise(PacketNumberEq(), {2, 3}),
IsEmpty(), 0, 0))
.Times(1);
EXPECT_CALL(*network_change_visitor_, OnCongestionChange()).Times(1);
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, ecn_counts));
}
TEST_F(QuicSentPacketManagerTest, PeerReportsWrongCodepoint) {
if (!GetQuicRestartFlag(quic_support_ect1)) {
return;
}
for (uint64_t i = 1; i <= 3; ++i) {
SendDataPacket(i, ENCRYPTION_FORWARD_SECURE, ECN_ECT1);
}
EXPECT_CALL(*network_change_visitor_, OnInFlightEcnPacketAcked()).Times(2);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(2), QuicPacketNumber(4));
std::optional<QuicEcnCounts> ecn_counts = QuicEcnCounts();
ecn_counts->ect0 = QuicPacketCount(2);
ecn_counts->ce = QuicPacketCount(1);
EXPECT_CALL(*network_change_visitor_, OnInvalidEcnFeedback());
EXPECT_CALL(*send_algorithm_,
OnCongestionEvent(_, _, _, Pointwise(PacketNumberEq(), {2, 3}),
IsEmpty(), 0, 0))
.Times(1);
EXPECT_CALL(*network_change_visitor_, OnCongestionChange()).Times(1);
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, ecn_counts));
}
TEST_F(QuicSentPacketManagerTest, TooFewEcnCountsReported) {
if (!GetQuicRestartFlag(quic_support_ect1)) {
return;
}
for (uint64_t i = 1; i <= 3; ++i) {
SendDataPacket(i, ENCRYPTION_FORWARD_SECURE, ECN_ECT1);
}
EXPECT_CALL(*network_change_visitor_, OnInFlightEcnPacketAcked()).Times(2);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(2), QuicPacketNumber(4));
EXPECT_CALL(*network_change_visitor_, OnInvalidEcnFeedback());
EXPECT_CALL(*send_algorithm_,
OnCongestionEvent(_, _, _, Pointwise(PacketNumberEq(), {2, 3}),
IsEmpty(), 0, 0))
.Times(1);
EXPECT_CALL(*network_change_visitor_, OnCongestionChange()).Times(1);
std::optional<QuicEcnCounts> ecn_counts = QuicEcnCounts();
ecn_counts->ect1 = QuicPacketCount(1);
ecn_counts->ce = QuicPacketCount(0);
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, ecn_counts));
}
TEST_F(QuicSentPacketManagerTest,
EcnCountsNotValidatedIfLargestAckedUnchanged) {
if (!GetQuicRestartFlag(quic_support_ect1)) {
return;
}
for (uint64_t i = 1; i <= 3; ++i) {
SendDataPacket(i, ENCRYPTION_FORWARD_SECURE, ECN_ECT1);
}
EXPECT_CALL(*network_change_visitor_, OnInFlightEcnPacketAcked()).Times(2);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(2), QuicPacketNumber(4));
EXPECT_CALL(*send_algorithm_,
OnCongestionEvent(_, _, _, Pointwise(PacketNumberEq(), {2, 3}),
IsEmpty(), 2, 1))
.Times(1);
EXPECT_CALL(*network_change_visitor_, OnCongestionChange()).Times(1);
std::optional<QuicEcnCounts> ecn_counts = QuicEcnCounts();
ecn_counts->ect1 = QuicPacketCount(2);
ecn_counts->ce = QuicPacketCount(1);
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, ecn_counts));
EXPECT_CALL(*network_change_visitor_, OnInFlightEcnPacketAcked()).Times(1);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(1), QuicPacketNumber(4));
EXPECT_CALL(*send_algorithm_,
OnCongestionEvent(_, _, _, Pointwise(PacketNumberEq(), {1}),
IsEmpty(), 0, 0))
.Times(1);
EXPECT_CALL(*network_change_visitor_, OnCongestionChange()).Times(1);
ecn_counts = QuicEcnCounts();
ecn_counts->ect1 = QuicPacketCount(2);
ecn_counts->ce = QuicPacketCount(0);
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(2),
ENCRYPTION_FORWARD_SECURE, ecn_counts));
}
TEST_F(QuicSentPacketManagerTest, EcnAckedButNoMarksReported) {
if (!GetQuicRestartFlag(quic_support_ect1)) {
return;
}
for (uint64_t i = 1; i <= 3; ++i) {
SendDataPacket(i, ENCRYPTION_FORWARD_SECURE, ECN_ECT1);
}
EXPECT_CALL(*network_change_visitor_, OnInFlightEcnPacketAcked()).Times(2);
manager_.OnAckFrameStart(QuicPacketNumber(3), QuicTime::Delta::Infinite(),
clock_.Now());
manager_.OnAckRange(QuicPacketNumber(2), QuicPacketNumber(4));
EXPECT_CALL(*network_change_visitor_, OnInvalidEcnFeedback());
EXPECT_CALL(*send_algorithm_,
OnCongestionEvent(_, _, _, Pointwise(PacketNumberEq(), {2, 3}),
IsEmpty(), 0, 0))
.Times(1);
EXPECT_CALL(*network_change_visitor_, OnCongestionChange()).Times(1);
std::optional<QuicEcnCounts> ecn_counts = std::nullopt;
EXPECT_EQ(PACKETS_NEWLY_ACKED,
manager_.OnAckFrameEnd(clock_.Now(), QuicPacketNumber(1),
ENCRYPTION_FORWARD_SECURE, ecn_counts));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_sent_packet_manager.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_sent_packet_manager_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
10fac47b-79d0-4e11-8153-c2ff1b272ea0 | cpp | tensorflow/tensorflow | lock_free_queue | third_party/xla/xla/tsl/profiler/utils/lock_free_queue.h | third_party/xla/xla/tsl/profiler/utils/lock_free_queue_test.cc | #ifndef XLA_TSL_PROFILER_UTILS_LOCK_FREE_QUEUE_H_
#define XLA_TSL_PROFILER_UTILS_LOCK_FREE_QUEUE_H_
#include <stddef.h>
#include <algorithm>
#include <atomic>
#include <cstddef>
#include <optional>
#include <utility>
#include "xla/tsl/profiler/utils/no_init.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
namespace tsl {
namespace profiler {
namespace QueueBaseInternal {
template <typename T, size_t kBlockSize>
struct InternalBlock {
static constexpr size_t kNumSlots =
(kBlockSize -
(sizeof(size_t ) + sizeof(InternalBlock* ))) /
sizeof(NoInit<T>);
size_t start;
InternalBlock* next;
NoInit<T> slots[kNumSlots];
};
template <bool kIsAtomic>
struct Index;
template <>
struct Index<false> {
size_t value;
explicit Index(size_t pos = 0) : value(pos) {}
size_t Get() const { return value; }
void Set(size_t pos) { value = pos; }
};
template <>
struct Index<true> {
std::atomic<size_t> value;
explicit Index(size_t pos = 0) : value(pos) {}
size_t Get() const { return value.load(std::memory_order_acquire); }
void Set(size_t pos) { value.store(pos, std::memory_order_release); }
};
template <typename T, size_t kBlockSize, bool kAtomicEnd>
class BlockedQueueBase {
using Block = InternalBlock<T, kBlockSize>;
public:
static constexpr size_t kNumSlotsPerBlockForTesting = Block::kNumSlots;
BlockedQueueBase()
: start_block_(new Block{0, nullptr}),
start_(start_block_->start),
end_block_(start_block_),
end_(end_block_->start) {}
~BlockedQueueBase() {
Clear();
DCHECK(Empty());
delete end_block_;
}
void Push(T&& element) {
size_t end = End();
auto& slot = end_block_->slots[end++ - end_block_->start];
slot.Emplace(std::move(element));
if (TF_PREDICT_FALSE(end - end_block_->start == Block::kNumSlots)) {
auto* new_block = new Block{end, nullptr};
end_block_ = (end_block_->next = new_block);
}
SetEnd(end);
}
void Clear() {
size_t end = End();
while (start_ != end) {
PopImpl();
}
}
std::optional<T> Pop() {
std::optional<T> element;
size_t end = End();
if (start_ != end) {
element = PopImpl();
}
return element;
}
protected:
void SetEnd(size_t end) { end_.Set(end); }
size_t End() const { return end_.Get(); }
bool Empty() const { return (start_ == End()); }
T PopImpl() {
DCHECK(!Empty());
auto& slot = start_block_->slots[start_++ - start_block_->start];
T element = std::move(slot).Consume();
if (TF_PREDICT_FALSE(start_ - start_block_->start == Block::kNumSlots)) {
auto* old_block = std::exchange(start_block_, start_block_->next);
delete old_block;
DCHECK_EQ(start_, start_block_->start);
}
return element;
}
Block* start_block_;
size_t start_;
Block* end_block_;
Index<kAtomicEnd> end_;
};
}
template <typename T, size_t kBlockSize>
class LockFreeQueue;
template <typename T, size_t kBlockSize = 1 << 16 >
class BlockedQueue final
: public QueueBaseInternal::BlockedQueueBase<T, kBlockSize, false> {
using Block = QueueBaseInternal::InternalBlock<T, kBlockSize>;
friend class LockFreeQueue<T, kBlockSize>;
public:
BlockedQueue() = default;
BlockedQueue(BlockedQueue&& src) { *this = std::move(src); }
BlockedQueue& operator=(BlockedQueue&& src) {
this->Clear();
std::swap(this->start_block_, src.start_block_);
std::swap(this->start_, src.start_);
std::swap(this->end_block_, src.end_block_);
auto origin_end = this->End();
this->SetEnd(src.End());
src.SetEnd(origin_end);
return *this;
}
class Iterator {
public:
bool operator==(const Iterator& another) const {
return (index_ == another.index_) && (queue_ == another.queue_);
}
bool operator!=(const Iterator& another) const {
return !(*this == another);
}
T& operator*() const {
DCHECK(block_ != nullptr);
DCHECK_GE(index_, block_->start);
DCHECK_LT(index_, block_->start + Block::kNumSlots);
DCHECK_LT(index_, queue_->End());
return block_->slots[index_ - block_->start].value;
}
T* operator->() const { return &(this->operator*()); }
Iterator& operator++() {
DCHECK(queue_ != nullptr);
DCHECK(block_ != nullptr);
if (index_ < queue_->End()) {
++index_;
auto next_block_start = block_->start + Block::kNumSlots;
DCHECK_LE(index_, next_block_start);
if (index_ == next_block_start) {
block_ = block_->next;
DCHECK_NE(block_, nullptr);
}
}
return (*this);
}
Iterator operator++(int) {
auto temp(*this);
this->operator++();
return temp;
}
private:
friend class BlockedQueue;
Iterator(BlockedQueue* queue, BlockedQueue::Block* block, size_t index)
: queue_(queue), block_(block), index_(index) {};
BlockedQueue* queue_ = nullptr;
BlockedQueue::Block* block_ = nullptr;
size_t index_ = 0;
};
Iterator begin() { return Iterator(this, this->start_block_, this->start_); }
Iterator end() { return Iterator(this, this->end_block_, this->End()); }
};
template <typename T, size_t kBlockSize = 1 << 16 >
class LockFreeQueue final
: public QueueBaseInternal::BlockedQueueBase<T, kBlockSize, true> {
using Block = QueueBaseInternal::InternalBlock<T, kBlockSize>;
public:
BlockedQueue<T, kBlockSize> PopAll() {
BlockedQueue<T, kBlockSize> result;
auto* empty_block = result.start_block_;
result.start_block_ = result.end_block_ = nullptr;
result.start_ = this->start_;
size_t end = this->End();
result.SetEnd(end);
while (this->start_block_->start + Block::kNumSlots <= end) {
auto* old_block =
std::exchange(this->start_block_, this->start_block_->next);
this->start_ = this->start_block_->start;
old_block->next = nullptr;
if (result.end_block_) {
result.end_block_->next = old_block;
} else {
result.start_block_ = old_block;
}
result.end_block_ = old_block;
}
empty_block->start = this->start_block_->start;
if (result.end_block_ == nullptr) {
result.end_block_ = result.start_block_ = empty_block;
} else {
result.end_block_->next = empty_block;
result.end_block_ = empty_block;
}
size_t bs = this->start_block_->start;
for (size_t i = std::max(this->start_, bs); i < end; i++) {
auto& src_slot = this->start_block_->slots[i - bs];
auto& dst_slot = result.end_block_->slots[i - bs];
dst_slot.Emplace(std::move(src_slot).Consume());
}
this->start_ = end;
return result;
}
};
}
}
#endif | #include "xla/tsl/profiler/utils/lock_free_queue.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <vector>
#include "absl/synchronization/notification.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
namespace {
template <typename T, size_t block_size_in_bytes>
void RetriveEvents(LockFreeQueue<T, block_size_in_bytes>& queue,
absl::Notification& stopped, std::vector<T>& result) {
result.clear();
do {
while (auto event = queue.Pop()) {
result.emplace_back(*event);
}
} while (!stopped.HasBeenNotified());
while (auto event = queue.Pop()) {
result.emplace_back(*event);
}
}
template <typename T, size_t block_size_in_bytes, typename Generator>
void FillEvents2Stage(LockFreeQueue<T, block_size_in_bytes>& queue,
Generator gen, size_t event_count1, size_t event_count2,
absl::Notification& stage1_filled,
absl::Notification& stage1_grabbed,
absl::Notification& stage2_filled,
std::vector<T>& expected1, std::vector<T>& expected2) {
expected1.clear();
expected2.clear();
for (size_t i = 0; i < event_count1; ++i) {
T event = gen(i);
expected1.emplace_back(event);
queue.Push(std::move(event));
}
stage1_filled.Notify();
stage1_grabbed.WaitForNotification();
for (size_t i = 0; i < event_count2; ++i) {
T event = gen(i + event_count1);
expected2.emplace_back(event);
queue.Push(std::move(event));
}
stage2_filled.Notify();
}
template <typename T, size_t block_size_in_bytes, typename Generator>
void TestProducerConsumer(size_t event_count1, size_t event_count2,
Generator gen) {
LockFreeQueue<T, block_size_in_bytes> queue;
std::vector<T> expected1;
std::vector<T> expected2;
absl::Notification stage1_filled;
absl::Notification stage1_grabbed;
absl::Notification stage2_filled;
auto producer = absl::WrapUnique(Env::Default()->StartThread(
ThreadOptions(), "producer", [&, gen, event_count1, event_count2]() {
FillEvents2Stage(queue, gen, event_count1, event_count2, stage1_filled,
stage1_grabbed, stage2_filled, expected1, expected2);
}));
std::vector<T> result1;
auto consumer1 = absl::WrapUnique(Env::Default()->StartThread(
ThreadOptions(), "consumer1", [&queue, &result1, &stage1_filled]() {
RetriveEvents(queue, stage1_filled, result1);
}));
consumer1.reset();
EXPECT_THAT(result1, ::testing::ContainerEq(expected1));
stage1_grabbed.Notify();
std::vector<T> result2;
auto consumer2 = absl::WrapUnique(Env::Default()->StartThread(
ThreadOptions(), "consumer2", [&queue, &result2, &stage2_filled]() {
RetriveEvents(queue, stage2_filled, result2);
}));
consumer2.reset();
EXPECT_THAT(result2, ::testing::ContainerEq(expected2));
producer.reset();
}
template <typename T, size_t block_size_in_bytes, typename Generator>
void TestPopAll(size_t event_count1, size_t event_count2, Generator gen) {
using TLockFreeQueue = LockFreeQueue<T, block_size_in_bytes>;
using TBlockedQueue = BlockedQueue<T, block_size_in_bytes>;
TLockFreeQueue queue;
std::vector<T> expected1;
std::vector<T> expected2;
absl::Notification stage1_filled;
absl::Notification stage1_grabbed;
absl::Notification stage2_filled;
auto producer = absl::WrapUnique(Env::Default()->StartThread(
ThreadOptions(), "producer", [&, gen, event_count1, event_count2]() {
FillEvents2Stage(queue, gen, event_count1, event_count2, stage1_filled,
stage1_grabbed, stage2_filled, expected1, expected2);
}));
stage1_filled.WaitForNotification();
TBlockedQueue dumped_queue1 = queue.PopAll();
std::vector<T> result1;
while (auto event = dumped_queue1.Pop()) {
result1.emplace_back(*event);
}
EXPECT_THAT(result1, ::testing::ContainerEq(expected1));
stage1_grabbed.Notify();
producer.reset();
TBlockedQueue dumped_queue2 = queue.PopAll();
std::vector<T> result2;
for (auto it = dumped_queue2.begin(), ite = dumped_queue2.end(); it != ite;
++it) {
result2.emplace_back(*it);
}
EXPECT_THAT(result2, ::testing::ContainerEq(expected2));
}
template <typename T, size_t block_size_in_bytes, typename Generator>
void TestIterator(size_t event_count1, size_t event_count2, Generator gen) {
BlockedQueue<T, block_size_in_bytes> queue;
std::vector<T> expected1;
for (size_t i = 0; i < event_count1; i++) {
queue.Push(gen(i));
expected1.emplace_back(gen(i));
}
std::vector<T> result1;
while (auto event = queue.Pop()) {
result1.emplace_back(*event);
}
EXPECT_THAT(result1, ::testing::ContainerEq(expected1));
std::vector<T> expected2;
for (size_t i = 0; i < event_count2; i++) {
queue.Push(gen(i + event_count1));
expected2.emplace_back(gen(i + event_count1));
}
std::vector<T> result2;
for (auto it = queue.begin(), ite = queue.end(); it != ite; ++it) {
result2.emplace_back(*it);
}
EXPECT_THAT(result2, ::testing::ContainerEq(expected2));
}
TEST(LockFreeQueueTest, Int64Event_ProducerConsumer) {
auto gen = [](size_t i) -> int64_t { return static_cast<int64_t>(i); };
using T = decltype(gen(0));
constexpr size_t kBS = 512;
using G = decltype(gen);
constexpr size_t kNumSlots =
LockFreeQueue<T, kBS>::kNumSlotsPerBlockForTesting;
EXPECT_GE(kNumSlots, 10);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, 2, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, 3, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, 5, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, kNumSlots + 3, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, kNumSlots * 2 + 5, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots, 2, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots, kNumSlots - 1, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots, kNumSlots, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots, kNumSlots + 1, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 2, kNumSlots + 1, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 3, kNumSlots - 1, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots + 3, 2, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots + 3, kNumSlots - 4, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots + 3, kNumSlots - 3, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots + 3, kNumSlots - 2, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots - 5, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots + 3, gen);
}
TEST(LockFreeQueueTest, StringEvent_ProducerConsumer) {
auto gen = [](size_t i) { return std::to_string(i); };
using T = decltype(gen(0));
constexpr size_t kBS = 512;
using G = decltype(gen);
constexpr size_t kNumSlots =
LockFreeQueue<T, kBS>::kNumSlotsPerBlockForTesting;
EXPECT_GE(kNumSlots, 10);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, 2, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, 3, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, 5, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, kNumSlots + 3, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, kNumSlots * 2 + 5, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots, 2, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots, kNumSlots - 1, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots, kNumSlots, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots, kNumSlots + 1, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 2, kNumSlots + 1, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 3, kNumSlots - 1, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots + 3, 2, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots + 3, kNumSlots - 4, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots + 3, kNumSlots - 3, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots + 3, kNumSlots - 2, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots - 5, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots + 3, gen);
}
TEST(LockFreeQueueTest, Int64Event_PopAll) {
auto gen = [](size_t i) -> int64_t { return static_cast<int64_t>(i); };
using T = decltype(gen(0));
constexpr size_t kBS = 512;
using G = decltype(gen);
constexpr size_t kNumSlots =
LockFreeQueue<T, kBS>::kNumSlotsPerBlockForTesting;
EXPECT_GE(kNumSlots, 10);
TestPopAll<T, kBS, G>(kNumSlots - 3, 2, gen);
TestPopAll<T, kBS, G>(kNumSlots - 3, 3, gen);
TestPopAll<T, kBS, G>(kNumSlots - 3, 5, gen);
TestPopAll<T, kBS, G>(kNumSlots - 3, kNumSlots + 3, gen);
TestPopAll<T, kBS, G>(kNumSlots - 3, kNumSlots * 2 + 5, gen);
TestPopAll<T, kBS, G>(kNumSlots, 2, gen);
TestPopAll<T, kBS, G>(kNumSlots, kNumSlots - 1, gen);
TestPopAll<T, kBS, G>(kNumSlots, kNumSlots, gen);
TestPopAll<T, kBS, G>(kNumSlots, kNumSlots + 1, gen);
TestPopAll<T, kBS, G>(kNumSlots * 2, kNumSlots + 1, gen);
TestPopAll<T, kBS, G>(kNumSlots * 3, kNumSlots - 1, gen);
TestPopAll<T, kBS, G>(kNumSlots + 3, 2, gen);
TestPopAll<T, kBS, G>(kNumSlots + 3, kNumSlots - 4, gen);
TestPopAll<T, kBS, G>(kNumSlots + 3, kNumSlots - 3, gen);
TestPopAll<T, kBS, G>(kNumSlots + 3, kNumSlots - 2, gen);
TestPopAll<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots - 5, gen);
TestPopAll<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots, gen);
TestPopAll<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots + 3, gen);
}
TEST(LockFreeQueueTest, StringEvent_PopAll) {
auto gen = [](size_t i) -> std::string { return std::to_string(i); };
using T = decltype(gen(0));
constexpr size_t kBS = 512;
using G = decltype(gen);
constexpr size_t kNumSlots =
LockFreeQueue<T, kBS>::kNumSlotsPerBlockForTesting;
EXPECT_GE(kNumSlots, 10);
TestPopAll<T, kBS, G>(kNumSlots - 3, 2, gen);
TestPopAll<T, kBS, G>(kNumSlots - 3, 3, gen);
TestPopAll<T, kBS, G>(kNumSlots - 3, 5, gen);
TestPopAll<T, kBS, G>(kNumSlots - 3, kNumSlots + 3, gen);
TestPopAll<T, kBS, G>(kNumSlots - 3, kNumSlots * 2 + 5, gen);
TestPopAll<T, kBS, G>(kNumSlots, 2, gen);
TestPopAll<T, kBS, G>(kNumSlots, kNumSlots - 1, gen);
TestPopAll<T, kBS, G>(kNumSlots, kNumSlots, gen);
TestPopAll<T, kBS, G>(kNumSlots, kNumSlots + 1, gen);
TestPopAll<T, kBS, G>(kNumSlots * 2, kNumSlots + 1, gen);
TestPopAll<T, kBS, G>(kNumSlots * 3, kNumSlots - 1, gen);
TestPopAll<T, kBS, G>(kNumSlots + 3, 2, gen);
TestPopAll<T, kBS, G>(kNumSlots + 3, kNumSlots - 4, gen);
TestPopAll<T, kBS, G>(kNumSlots + 3, kNumSlots - 3, gen);
TestPopAll<T, kBS, G>(kNumSlots + 3, kNumSlots - 2, gen);
TestPopAll<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots - 5, gen);
TestPopAll<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots, gen);
TestPopAll<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots + 3, gen);
}
TEST(LockFreeQueueTest, Int64Event_Iterator) {
auto gen = [](size_t i) -> int64_t { return static_cast<int64_t>(i); };
using T = decltype(gen(0));
constexpr size_t kBS = 512;
using G = decltype(gen);
constexpr size_t kNumSlots =
LockFreeQueue<T, kBS>::kNumSlotsPerBlockForTesting;
EXPECT_GE(kNumSlots, 10);
TestIterator<T, kBS, G>(kNumSlots - 3, 2, gen);
TestIterator<T, kBS, G>(kNumSlots - 3, 3, gen);
TestIterator<T, kBS, G>(kNumSlots - 3, 5, gen);
TestIterator<T, kBS, G>(kNumSlots - 3, kNumSlots + 3, gen);
TestIterator<T, kBS, G>(kNumSlots - 3, kNumSlots * 2 + 5, gen);
TestIterator<T, kBS, G>(kNumSlots, 2, gen);
TestIterator<T, kBS, G>(kNumSlots, kNumSlots - 1, gen);
TestIterator<T, kBS, G>(kNumSlots, kNumSlots, gen);
TestIterator<T, kBS, G>(kNumSlots, kNumSlots + 1, gen);
TestIterator<T, kBS, G>(kNumSlots * 2, kNumSlots + 1, gen);
TestIterator<T, kBS, G>(kNumSlots * 3, kNumSlots - 1, gen);
TestIterator<T, kBS, G>(kNumSlots + 3, 2, gen);
TestIterator<T, kBS, G>(kNumSlots + 3, kNumSlots - 4, gen);
TestIterator<T, kBS, G>(kNumSlots + 3, kNumSlots - 3, gen);
TestIterator<T, kBS, G>(kNumSlots + 3, kNumSlots - 2, gen);
TestIterator<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots - 5, gen);
TestIterator<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots, gen);
TestIterator<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots + 3, gen);
}
TEST(LockFreeQueueTest, StringEvent_Iterator) {
auto gen = [](size_t i) { return std::to_string(i); };
using T = decltype(gen(0));
constexpr size_t kBS = 512;
using G = decltype(gen);
constexpr size_t kNumSlots =
LockFreeQueue<T, kBS>::kNumSlotsPerBlockForTesting;
EXPECT_GE(kNumSlots, 10);
TestIterator<T, kBS, G>(kNumSlots - 3, 2, gen);
TestIterator<T, kBS, G>(kNumSlots - 3, 3, gen);
TestIterator<T, kBS, G>(kNumSlots - 3, 5, gen);
TestIterator<T, kBS, G>(kNumSlots - 3, kNumSlots + 3, gen);
TestIterator<T, kBS, G>(kNumSlots - 3, kNumSlots * 2 + 5, gen);
TestIterator<T, kBS, G>(kNumSlots, 2, gen);
TestIterator<T, kBS, G>(kNumSlots, kNumSlots - 1, gen);
TestIterator<T, kBS, G>(kNumSlots, kNumSlots, gen);
TestIterator<T, kBS, G>(kNumSlots, kNumSlots + 1, gen);
TestIterator<T, kBS, G>(kNumSlots * 2, kNumSlots + 1, gen);
TestIterator<T, kBS, G>(kNumSlots * 3, kNumSlots - 1, gen);
TestIterator<T, kBS, G>(kNumSlots + 3, 2, gen);
TestIterator<T, kBS, G>(kNumSlots + 3, kNumSlots - 4, gen);
TestIterator<T, kBS, G>(kNumSlots + 3, kNumSlots - 3, gen);
TestIterator<T, kBS, G>(kNumSlots + 3, kNumSlots - 2, gen);
TestIterator<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots - 5, gen);
TestIterator<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots, gen);
TestIterator<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots + 3, gen);
}
TEST(LockFreeQueueTest, Iterator_Basics) {
BlockedQueue<int32_t, 512> queue;
auto it = queue.begin();
EXPECT_EQ(it, queue.end());
EXPECT_EQ(++it, queue.end());
queue.Push(1);
it = queue.begin();
EXPECT_NE(it, queue.end());
++it;
EXPECT_EQ(it, queue.end());
it = queue.begin();
auto it2 = it++;
EXPECT_NE(it2, queue.end());
EXPECT_EQ(it, queue.end());
it2 = it++;
EXPECT_EQ(it2, queue.end());
EXPECT_EQ(it, queue.end());
queue.Push(2);
queue.Pop();
it = queue.begin();
EXPECT_NE(it, queue.end());
++it;
EXPECT_EQ(it, queue.end());
it = queue.begin();
it2 = it++;
EXPECT_NE(it2, queue.end());
EXPECT_EQ(it, queue.end());
it2 = it++;
EXPECT_EQ(it2, queue.end());
EXPECT_EQ(it, queue.end());
BlockedQueue<std::string, 512> str_queue;
str_queue.Push("abcd");
auto str_it = str_queue.begin();
EXPECT_EQ(*str_it, std::string("abcd"));
EXPECT_EQ(str_it->size(), 4);
str_queue.Push("123456");
str_it++;
EXPECT_EQ(*str_it, std::string("123456"));
EXPECT_EQ(str_it->size(), 6);
str_it++;
EXPECT_EQ(str_it, str_queue.end());
const auto const_str_it = str_queue.begin();
EXPECT_EQ(*const_str_it, std::string("abcd"));
EXPECT_EQ(const_str_it->size(), 4);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/lock_free_queue.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/lock_free_queue_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
26622fdc-cf97-44a2-8a8a-26e4c1b2bad4 | cpp | tensorflow/tensorflow | numbers | third_party/xla/third_party/tsl/tsl/platform/numbers.cc | third_party/xla/third_party/tsl/tsl/platform/numbers_test.cc | #include "tsl/platform/numbers.h"
#include <ctype.h>
#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdint>
#include <locale>
#include <unordered_map>
#include "double-conversion/double-conversion.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/stringprintf.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace {
template <typename T>
const std::unordered_map<std::string, T>* GetSpecialNumsSingleton() {
static const std::unordered_map<std::string, T>* special_nums =
CHECK_NOTNULL((new const std::unordered_map<std::string, T>{
{"inf", std::numeric_limits<T>::infinity()},
{"+inf", std::numeric_limits<T>::infinity()},
{"-inf", -std::numeric_limits<T>::infinity()},
{"infinity", std::numeric_limits<T>::infinity()},
{"+infinity", std::numeric_limits<T>::infinity()},
{"-infinity", -std::numeric_limits<T>::infinity()},
{"nan", std::numeric_limits<T>::quiet_NaN()},
{"+nan", std::numeric_limits<T>::quiet_NaN()},
{"-nan", -std::numeric_limits<T>::quiet_NaN()},
}));
return special_nums;
}
template <typename T>
T locale_independent_strtonum(const char* str, const char** endptr) {
auto special_nums = GetSpecialNumsSingleton<T>();
std::stringstream s(str);
std::string special_num_str;
s >> special_num_str;
for (size_t i = 0; i < special_num_str.length(); ++i) {
special_num_str[i] =
std::tolower(special_num_str[i], std::locale::classic());
}
auto entry = special_nums->find(special_num_str);
if (entry != special_nums->end()) {
*endptr = str + (s.eof() ? static_cast<std::iostream::pos_type>(strlen(str))
: s.tellg());
return entry->second;
} else {
if (special_num_str.compare(0, 2, "0x") == 0 ||
special_num_str.compare(0, 3, "-0x") == 0) {
return strtol(str, const_cast<char**>(endptr), 16);
}
}
s.str(str);
s.clear();
s.imbue(std::locale::classic());
T result;
s >> result;
if (s.fail()) {
if (result == std::numeric_limits<T>::max() ||
result == std::numeric_limits<T>::infinity()) {
result = std::numeric_limits<T>::infinity();
s.clear(s.rdstate() & ~std::ios::failbit);
} else if (result == -std::numeric_limits<T>::max() ||
result == -std::numeric_limits<T>::infinity()) {
result = -std::numeric_limits<T>::infinity();
s.clear(s.rdstate() & ~std::ios::failbit);
}
}
if (endptr) {
*endptr =
str +
(s.fail() ? static_cast<std::iostream::pos_type>(0)
: (s.eof() ? static_cast<std::iostream::pos_type>(strlen(str))
: s.tellg()));
}
return result;
}
static inline const double_conversion::StringToDoubleConverter&
StringToFloatConverter() {
static const double_conversion::StringToDoubleConverter converter(
double_conversion::StringToDoubleConverter::ALLOW_LEADING_SPACES |
double_conversion::StringToDoubleConverter::ALLOW_HEX |
double_conversion::StringToDoubleConverter::ALLOW_TRAILING_SPACES |
double_conversion::StringToDoubleConverter::ALLOW_CASE_INSENSIBILITY,
0., 0., "inf", "nan");
return converter;
}
}
namespace strings {
size_t FastInt32ToBufferLeft(int32_t i, char* buffer) {
uint32_t u = i;
size_t length = 0;
if (i < 0) {
*buffer++ = '-';
++length;
u = 0 - u;
}
length += FastUInt32ToBufferLeft(u, buffer);
return length;
}
size_t FastUInt32ToBufferLeft(uint32_t i, char* buffer) {
char* start = buffer;
do {
*buffer++ = ((i % 10) + '0');
i /= 10;
} while (i > 0);
*buffer = 0;
std::reverse(start, buffer);
return buffer - start;
}
size_t FastInt64ToBufferLeft(int64_t i, char* buffer) {
uint64_t u = i;
size_t length = 0;
if (i < 0) {
*buffer++ = '-';
++length;
u = 0 - u;
}
length += FastUInt64ToBufferLeft(u, buffer);
return length;
}
size_t FastUInt64ToBufferLeft(uint64_t i, char* buffer) {
char* start = buffer;
do {
*buffer++ = ((i % 10) + '0');
i /= 10;
} while (i > 0);
*buffer = 0;
std::reverse(start, buffer);
return buffer - start;
}
static const double kDoublePrecisionCheckMax = DBL_MAX / 1.000000000000001;
size_t DoubleToBuffer(double value, char* buffer) {
static_assert(DBL_DIG < 20, "DBL_DIG is too big");
if (std::isnan(value)) {
int snprintf_result = snprintf(buffer, kFastToBufferSize, "%snan",
std::signbit(value) ? "-" : "");
DCHECK(snprintf_result > 0 && snprintf_result < kFastToBufferSize);
return snprintf_result;
}
if (std::abs(value) <= kDoublePrecisionCheckMax) {
int snprintf_result =
snprintf(buffer, kFastToBufferSize, "%.*g", DBL_DIG, value);
DCHECK(snprintf_result > 0 && snprintf_result < kFastToBufferSize);
if (locale_independent_strtonum<double>(buffer, nullptr) == value) {
return snprintf_result;
}
}
int snprintf_result =
snprintf(buffer, kFastToBufferSize, "%.*g", DBL_DIG + 2, value);
DCHECK(snprintf_result > 0 && snprintf_result < kFastToBufferSize);
return snprintf_result;
}
namespace {
char SafeFirstChar(absl::string_view str) {
if (str.empty()) return '\0';
return str[0];
}
void SkipSpaces(absl::string_view* str) {
while (isspace(SafeFirstChar(*str))) str->remove_prefix(1);
}
}
bool safe_strto64(absl::string_view str, int64_t* value) {
SkipSpaces(&str);
int64_t vlimit = kint64max;
int sign = 1;
if (absl::ConsumePrefix(&str, "-")) {
sign = -1;
vlimit = kint64min;
}
if (!isdigit(SafeFirstChar(str))) return false;
int64_t result = 0;
if (sign == 1) {
do {
int digit = SafeFirstChar(str) - '0';
if ((vlimit - digit) / 10 < result) {
return false;
}
result = result * 10 + digit;
str.remove_prefix(1);
} while (isdigit(SafeFirstChar(str)));
} else {
do {
int digit = SafeFirstChar(str) - '0';
if ((vlimit + digit) / 10 > result) {
return false;
}
result = result * 10 - digit;
str.remove_prefix(1);
} while (isdigit(SafeFirstChar(str)));
}
SkipSpaces(&str);
if (!str.empty()) return false;
*value = result;
return true;
}
bool safe_strtou64(absl::string_view str, uint64_t* value) {
SkipSpaces(&str);
if (!isdigit(SafeFirstChar(str))) return false;
uint64_t result = 0;
do {
int digit = SafeFirstChar(str) - '0';
if ((kuint64max - digit) / 10 < result) {
return false;
}
result = result * 10 + digit;
str.remove_prefix(1);
} while (isdigit(SafeFirstChar(str)));
SkipSpaces(&str);
if (!str.empty()) return false;
*value = result;
return true;
}
bool safe_strto32(absl::string_view str, int32_t* value) {
SkipSpaces(&str);
int64_t vmax = kint32max;
int sign = 1;
if (absl::ConsumePrefix(&str, "-")) {
sign = -1;
++vmax;
}
if (!isdigit(SafeFirstChar(str))) return false;
int64_t result = 0;
do {
result = result * 10 + SafeFirstChar(str) - '0';
if (result > vmax) {
return false;
}
str.remove_prefix(1);
} while (isdigit(SafeFirstChar(str)));
SkipSpaces(&str);
if (!str.empty()) return false;
*value = static_cast<int32_t>(result * sign);
return true;
}
bool safe_strtou32(absl::string_view str, uint32_t* value) {
SkipSpaces(&str);
if (!isdigit(SafeFirstChar(str))) return false;
int64_t result = 0;
do {
result = result * 10 + SafeFirstChar(str) - '0';
if (result > kuint32max) {
return false;
}
str.remove_prefix(1);
} while (isdigit(SafeFirstChar(str)));
SkipSpaces(&str);
if (!str.empty()) return false;
*value = static_cast<uint32_t>(result);
return true;
}
bool safe_strtof(absl::string_view str, float* value) {
int processed_characters_count = -1;
auto len = str.size();
if (len >= kFastToBufferSize) return false;
if (len > std::numeric_limits<int>::max()) return false;
*value = StringToFloatConverter().StringToFloat(
str.data(), static_cast<int>(len), &processed_characters_count);
return processed_characters_count > 0;
}
bool safe_strtod(absl::string_view str, double* value) {
int processed_characters_count = -1;
auto len = str.size();
if (len >= kFastToBufferSize) return false;
if (len > std::numeric_limits<int>::max()) return false;
*value = StringToFloatConverter().StringToDouble(
str.data(), static_cast<int>(len), &processed_characters_count);
return processed_characters_count > 0;
}
size_t FloatToBuffer(float value, char* buffer) {
static_assert(FLT_DIG < 10, "FLT_DIG is too big");
if (std::isnan(value)) {
int snprintf_result = snprintf(buffer, kFastToBufferSize, "%snan",
std::signbit(value) ? "-" : "");
DCHECK(snprintf_result > 0 && snprintf_result < kFastToBufferSize);
return snprintf_result;
}
int snprintf_result =
snprintf(buffer, kFastToBufferSize, "%.*g", FLT_DIG, value);
DCHECK(snprintf_result > 0 && snprintf_result < kFastToBufferSize);
float parsed_value;
if (!safe_strtof(buffer, &parsed_value) || parsed_value != value) {
snprintf_result =
snprintf(buffer, kFastToBufferSize, "%.*g", FLT_DIG + 3, value);
DCHECK(snprintf_result > 0 && snprintf_result < kFastToBufferSize);
}
return snprintf_result;
}
std::string FpToString(Fprint fp) {
char buf[17];
snprintf(buf, sizeof(buf), "%016llx", static_cast<long long>(fp));
return std::string(buf);
}
bool StringToFp(const std::string& s, Fprint* fp) {
char junk;
uint64_t result;
if (sscanf(s.c_str(), "%" SCNx64 "%c", &result, &junk) == 1) {
*fp = result;
return true;
} else {
return false;
}
}
absl::string_view Uint64ToHexString(uint64_t v, char* buf) {
static const char* hexdigits = "0123456789abcdef";
const int num_byte = 16;
buf[num_byte] = '\0';
for (int i = num_byte - 1; i >= 0; i--) {
buf[i] = hexdigits[v & 0xf];
v >>= 4;
}
return absl::string_view(buf, num_byte);
}
bool HexStringToUint64(const absl::string_view& s, uint64_t* result) {
uint64_t v = 0;
if (s.empty()) {
return false;
}
for (size_t i = 0; i < s.size(); i++) {
char c = s[i];
if (c >= '0' && c <= '9') {
v = (v << 4) + (c - '0');
} else if (c >= 'a' && c <= 'f') {
v = (v << 4) + 10 + (c - 'a');
} else if (c >= 'A' && c <= 'F') {
v = (v << 4) + 10 + (c - 'A');
} else {
return false;
}
}
*result = v;
return true;
}
std::string HumanReadableNum(int64_t value) {
std::string s;
if (value < 0) {
s += "-";
value = -value;
}
if (value < 1000) {
Appendf(&s, "%lld", static_cast<long long>(value));
} else if (value >= static_cast<int64_t>(1e15)) {
Appendf(&s, "%0.3G", static_cast<double>(value));
} else {
static const char units[] = "kMBT";
const char* unit = units;
while (value >= static_cast<int64_t>(1000000)) {
value /= static_cast<int64_t>(1000);
++unit;
CHECK(unit < units + TF_ARRAYSIZE(units));
}
Appendf(&s, "%.2f%c", value / 1000.0, *unit);
}
return s;
}
std::string HumanReadableNumBytes(int64_t num_bytes) {
if (num_bytes == kint64min) {
return "-8E";
}
const char* neg_str = (num_bytes < 0) ? "-" : "";
if (num_bytes < 0) {
num_bytes = -num_bytes;
}
if (num_bytes < 1024) {
char buf[8];
snprintf(buf, sizeof(buf), "%s%lldB", neg_str,
static_cast<long long>(num_bytes));
return std::string(buf);
}
static const char units[] = "KMGTPE";
const char* unit = units;
while (num_bytes >= static_cast<int64_t>(1024) * 1024) {
num_bytes /= 1024;
++unit;
CHECK(unit < units + TF_ARRAYSIZE(units));
}
char buf[16];
snprintf(buf, sizeof(buf), ((*unit == 'K') ? "%s%.1f%ciB" : "%s%.2f%ciB"),
neg_str, num_bytes / 1024.0, *unit);
return std::string(buf);
}
std::string HumanReadableElapsedTime(double seconds) {
std::string human_readable;
if (seconds < 0) {
human_readable = "-";
seconds = -seconds;
}
const double microseconds = seconds * 1.0e6;
if (microseconds < 999.5) {
strings::Appendf(&human_readable, "%0.3g us", microseconds);
return human_readable;
}
double milliseconds = seconds * 1e3;
if (milliseconds >= .995 && milliseconds < 1) {
milliseconds = 1.0;
}
if (milliseconds < 999.5) {
strings::Appendf(&human_readable, "%0.3g ms", milliseconds);
return human_readable;
}
if (seconds < 60.0) {
strings::Appendf(&human_readable, "%0.3g s", seconds);
return human_readable;
}
seconds /= 60.0;
if (seconds < 60.0) {
strings::Appendf(&human_readable, "%0.3g min", seconds);
return human_readable;
}
seconds /= 60.0;
if (seconds < 24.0) {
strings::Appendf(&human_readable, "%0.3g h", seconds);
return human_readable;
}
seconds /= 24.0;
if (seconds < 30.0) {
strings::Appendf(&human_readable, "%0.3g days", seconds);
return human_readable;
}
if (seconds < 365.2425) {
strings::Appendf(&human_readable, "%0.3g months", seconds / 30.436875);
return human_readable;
}
seconds /= 365.2425;
strings::Appendf(&human_readable, "%0.3g years", seconds);
return human_readable;
}
}
} | #include "tsl/platform/numbers.h"
#include <cmath>
#include <string>
#include "tsl/platform/test.h"
namespace tsl {
namespace strings {
TEST(FpToString, Ints) {
for (int s = 0; s < 64; s++) {
for (int delta = -1; delta <= 1; delta++) {
uint64 fp = (1ull << s) + delta;
string s = FpToString(fp);
uint64 fp2;
EXPECT_TRUE(StringToFp(s, &fp2));
EXPECT_EQ(fp, fp2);
}
}
Fprint dummy;
EXPECT_FALSE(StringToFp("", &dummy));
EXPECT_FALSE(StringToFp("xyz", &dummy));
EXPECT_FALSE(StringToFp("0000000000000000xyz", &dummy));
}
TEST(Uint64ToHexString, Ints) {
for (int s = 0; s < 64; s++) {
for (int delta = -1; delta <= 1; delta++) {
uint64 fp = (1ull << s) + delta;
char buf[kFastToBufferSize];
absl::string_view s = Uint64ToHexString(fp, buf);
uint64 fp2;
EXPECT_TRUE(HexStringToUint64(s, &fp2));
EXPECT_EQ(fp, fp2) << s;
}
}
uint64 dummy;
EXPECT_FALSE(HexStringToUint64("", &dummy));
EXPECT_FALSE(HexStringToUint64("xyz", &dummy));
EXPECT_FALSE(HexStringToUint64("0000000000000000xyz", &dummy));
}
TEST(HumanReadableNum, Basic) {
EXPECT_EQ(HumanReadableNum(823), "823");
EXPECT_EQ(HumanReadableNum(1024), "1.02k");
EXPECT_EQ(HumanReadableNum(4000), "4.00k");
EXPECT_EQ(HumanReadableNum(999499), "999.50k");
EXPECT_EQ(HumanReadableNum(1000000), "1.00M");
EXPECT_EQ(HumanReadableNum(1048575), "1.05M");
EXPECT_EQ(HumanReadableNum(1048576), "1.05M");
EXPECT_EQ(HumanReadableNum(23956812342), "23.96B");
EXPECT_EQ(HumanReadableNum(123456789012345678), "1.23E+17");
}
TEST(HumanReadableNumBytes, Bytes) {
EXPECT_EQ("0B", HumanReadableNumBytes(0));
EXPECT_EQ("4B", HumanReadableNumBytes(4));
EXPECT_EQ("1023B", HumanReadableNumBytes(1023));
EXPECT_EQ("1.0KiB", HumanReadableNumBytes(1024));
EXPECT_EQ("1.0KiB", HumanReadableNumBytes(1025));
EXPECT_EQ("1.5KiB", HumanReadableNumBytes(1500));
EXPECT_EQ("1.9KiB", HumanReadableNumBytes(1927));
EXPECT_EQ("2.0KiB", HumanReadableNumBytes(2048));
EXPECT_EQ("1.00MiB", HumanReadableNumBytes(1 << 20));
EXPECT_EQ("11.77MiB", HumanReadableNumBytes(12345678));
EXPECT_EQ("1.00GiB", HumanReadableNumBytes(1 << 30));
EXPECT_EQ("1.00TiB", HumanReadableNumBytes(1LL << 40));
EXPECT_EQ("1.00PiB", HumanReadableNumBytes(1LL << 50));
EXPECT_EQ("1.00EiB", HumanReadableNumBytes(1LL << 60));
EXPECT_EQ("-1B", HumanReadableNumBytes(-1));
EXPECT_EQ("-4B", HumanReadableNumBytes(-4));
EXPECT_EQ("-1000B", HumanReadableNumBytes(-1000));
EXPECT_EQ("-11.77MiB", HumanReadableNumBytes(-12345678));
EXPECT_EQ("-8E", HumanReadableNumBytes(kint64min));
}
TEST(HumanReadableElapsedTime, Basic) {
EXPECT_EQ(HumanReadableElapsedTime(-10), "-10 s");
EXPECT_EQ(HumanReadableElapsedTime(-0.001), "-1 ms");
EXPECT_EQ(HumanReadableElapsedTime(-60.0), "-1 min");
EXPECT_EQ(HumanReadableElapsedTime(0.00000001), "0.01 us");
EXPECT_EQ(HumanReadableElapsedTime(0.0000012), "1.2 us");
EXPECT_EQ(HumanReadableElapsedTime(0.0012), "1.2 ms");
EXPECT_EQ(HumanReadableElapsedTime(0.12), "120 ms");
EXPECT_EQ(HumanReadableElapsedTime(1.12), "1.12 s");
EXPECT_EQ(HumanReadableElapsedTime(90.0), "1.5 min");
EXPECT_EQ(HumanReadableElapsedTime(600.0), "10 min");
EXPECT_EQ(HumanReadableElapsedTime(9000.0), "2.5 h");
EXPECT_EQ(HumanReadableElapsedTime(87480.0), "1.01 days");
EXPECT_EQ(HumanReadableElapsedTime(7776000.0), "2.96 months");
EXPECT_EQ(HumanReadableElapsedTime(78840000.0), "2.5 years");
EXPECT_EQ(HumanReadableElapsedTime(382386614.40), "12.1 years");
EXPECT_EQ(HumanReadableElapsedTime(DBL_MAX), "5.7e+300 years");
}
TEST(safe_strto32, Int32s) {
int32 result;
EXPECT_EQ(true, safe_strto32("1", &result));
EXPECT_EQ(1, result);
EXPECT_EQ(true, safe_strto32("123", &result));
EXPECT_EQ(123, result);
EXPECT_EQ(true, safe_strto32(" -123 ", &result));
EXPECT_EQ(-123, result);
EXPECT_EQ(true, safe_strto32("2147483647", &result));
EXPECT_EQ(2147483647, result);
EXPECT_EQ(true, safe_strto32("-2147483648", &result));
EXPECT_EQ(-2147483648, result);
EXPECT_EQ(false, safe_strto32(" 132as ", &result));
EXPECT_EQ(false, safe_strto32(" 132.2 ", &result));
EXPECT_EQ(false, safe_strto32(" -", &result));
EXPECT_EQ(false, safe_strto32("", &result));
EXPECT_EQ(false, safe_strto32(" ", &result));
EXPECT_EQ(false, safe_strto32("123 a", &result));
EXPECT_EQ(false, safe_strto32("2147483648", &result));
EXPECT_EQ(false, safe_strto32("-2147483649", &result));
EXPECT_EQ(true, safe_strto32(absl::string_view("123", 1), &result));
EXPECT_EQ(1, result);
EXPECT_EQ(true, safe_strto32(absl::string_view(" -123", 4), &result));
EXPECT_EQ(-12, result);
EXPECT_EQ(false, safe_strto32(absl::string_view(nullptr, 0), &result));
}
TEST(safe_strtou32, UInt32s) {
uint32 result;
EXPECT_TRUE(safe_strtou32("0", &result));
EXPECT_EQ(0, result);
EXPECT_TRUE(safe_strtou32("1", &result));
EXPECT_EQ(1, result);
EXPECT_TRUE(safe_strtou32("123", &result));
EXPECT_EQ(123, result);
EXPECT_TRUE(safe_strtou32("4294967295", &result));
EXPECT_EQ(4294967295, result);
EXPECT_FALSE(safe_strtou32(" 132as ", &result));
EXPECT_FALSE(safe_strtou32(" 132.2 ", &result));
EXPECT_FALSE(safe_strtou32(" -", &result));
EXPECT_FALSE(safe_strtou32("", &result));
EXPECT_FALSE(safe_strtou32(" ", &result));
EXPECT_FALSE(safe_strtou32("123 a", &result));
EXPECT_FALSE(safe_strtou32("123 456", &result));
EXPECT_FALSE(safe_strtou32("4294967296", &result));
EXPECT_FALSE(safe_strtou32("-1", &result));
EXPECT_TRUE(safe_strtou32(absl::string_view("123", 1), &result));
EXPECT_EQ(1, result);
EXPECT_TRUE(safe_strtou32(absl::string_view(" 123", 3), &result));
EXPECT_EQ(12, result);
EXPECT_FALSE(safe_strtou32(absl::string_view(nullptr, 0), &result));
}
TEST(safe_strto64, Int64s) {
int64 result;
EXPECT_EQ(true, safe_strto64("1", &result));
EXPECT_EQ(1, result);
EXPECT_EQ(true, safe_strto64("123", &result));
EXPECT_EQ(123, result);
EXPECT_EQ(true, safe_strto64(" -123 ", &result));
EXPECT_EQ(-123, result);
EXPECT_EQ(true, safe_strto64("9223372036854775807", &result));
EXPECT_EQ(9223372036854775807, result);
EXPECT_EQ(true, safe_strto64("-9223372036854775808", &result));
EXPECT_EQ(kint64min, result);
EXPECT_EQ(false, safe_strto64(" 132as ", &result));
EXPECT_EQ(false, safe_strto64(" 132.2 ", &result));
EXPECT_EQ(false, safe_strto64(" -", &result));
EXPECT_EQ(false, safe_strto64("", &result));
EXPECT_EQ(false, safe_strto64(" ", &result));
EXPECT_EQ(false, safe_strto64("123 a", &result));
EXPECT_EQ(false, safe_strto64("9223372036854775808", &result));
EXPECT_EQ(false, safe_strto64("-9223372036854775809", &result));
EXPECT_EQ(true, safe_strto64(absl::string_view("123", 1), &result));
EXPECT_EQ(1, result);
EXPECT_EQ(true, safe_strto64(absl::string_view(" -123", 4), &result));
EXPECT_EQ(-12, result);
EXPECT_EQ(false, safe_strto64(absl::string_view(nullptr, 0), &result));
}
TEST(safe_strtou64, UInt64s) {
uint64 result;
EXPECT_TRUE(safe_strtou64("0", &result));
EXPECT_EQ(0, result);
EXPECT_TRUE(safe_strtou64("1", &result));
EXPECT_EQ(1, result);
EXPECT_TRUE(safe_strtou64("123", &result));
EXPECT_EQ(123, result);
EXPECT_TRUE(safe_strtou64(" 345 ", &result));
EXPECT_EQ(345, result);
EXPECT_TRUE(safe_strtou64("18446744073709551615", &result));
EXPECT_EQ(18446744073709551615UL, result);
EXPECT_FALSE(safe_strtou64(" 132.2 ", &result));
EXPECT_FALSE(safe_strtou64(" 132.2 ", &result));
EXPECT_FALSE(safe_strtou64(" -", &result));
EXPECT_FALSE(safe_strtou64("", &result));
EXPECT_FALSE(safe_strtou64(" ", &result));
EXPECT_FALSE(safe_strtou64("123 a", &result));
EXPECT_FALSE(safe_strtou64("123 456", &result));
EXPECT_FALSE(safe_strtou64("18446744073709551616", &result));
EXPECT_FALSE(safe_strtou64("-1", &result));
EXPECT_TRUE(safe_strtou64(absl::string_view("123", 1), &result));
EXPECT_EQ(1, result);
EXPECT_TRUE(safe_strtou64(absl::string_view(" 123", 3), &result));
EXPECT_EQ(12, result);
EXPECT_FALSE(safe_strtou64(absl::string_view(nullptr, 0), &result));
}
TEST(safe_strtof, Float) {
float result = 0;
EXPECT_TRUE(safe_strtof("0.123456", &result));
EXPECT_EQ(0.123456f, result);
EXPECT_FALSE(safe_strtof("0.12345abc", &result));
EXPECT_TRUE(safe_strtof("1e39", &result));
EXPECT_EQ(std::numeric_limits<float>::infinity(), result);
EXPECT_TRUE(safe_strtof("-1e39", &result));
EXPECT_EQ(-std::numeric_limits<float>::infinity(), result);
EXPECT_TRUE(safe_strtof("1e-50", &result));
EXPECT_EQ(0, result);
EXPECT_TRUE(safe_strtof("0xF", &result));
EXPECT_EQ(0xF, result);
EXPECT_TRUE(safe_strtof("-0x2A", &result));
EXPECT_EQ(-42.0f, result);
EXPECT_TRUE(safe_strtof(" -0x2", &result));
EXPECT_EQ(-2.0f, result);
EXPECT_TRUE(safe_strtof("8 \t", &result));
EXPECT_EQ(8.0f, result);
EXPECT_TRUE(safe_strtof("\t20.0\t ", &result));
EXPECT_EQ(20.0f, result);
EXPECT_FALSE(safe_strtof("-infinity is awesome", &result));
char test_str[2 * kFastToBufferSize];
for (int i = 0; i < 2 * kFastToBufferSize; ++i) test_str[i] = 'a';
test_str[kFastToBufferSize + 1] = '\0';
EXPECT_FALSE(safe_strtof(test_str, &result));
EXPECT_TRUE(safe_strtof("-inf", &result));
EXPECT_EQ(-std::numeric_limits<float>::infinity(), result);
EXPECT_TRUE(safe_strtof("+inf", &result));
EXPECT_EQ(std::numeric_limits<float>::infinity(), result);
EXPECT_TRUE(safe_strtof("InF", &result));
EXPECT_EQ(std::numeric_limits<float>::infinity(), result);
EXPECT_TRUE(safe_strtof("-INF", &result));
EXPECT_EQ(-std::numeric_limits<float>::infinity(), result);
EXPECT_TRUE(safe_strtof("nan", &result));
EXPECT_TRUE(std::isnan(result));
EXPECT_TRUE(safe_strtof("-nan", &result));
EXPECT_TRUE(std::isnan(result));
EXPECT_TRUE(safe_strtof("-NaN", &result));
EXPECT_TRUE(std::isnan(result));
EXPECT_TRUE(safe_strtof("+NAN", &result));
EXPECT_TRUE(std::isnan(result));
}
TEST(safe_strtod, Double) {
double result = 0;
EXPECT_TRUE(safe_strtod("0.1234567890123", &result));
EXPECT_EQ(0.1234567890123, result);
EXPECT_FALSE(safe_strtod("0.1234567890123abc", &result));
char test_str[2 * kFastToBufferSize];
for (int i = 0; i < 2 * kFastToBufferSize; ++i) test_str[i] = 'a';
test_str[kFastToBufferSize + 1] = '\0';
EXPECT_FALSE(safe_strtod(test_str, &result));
EXPECT_TRUE(safe_strtod("1e310", &result));
EXPECT_EQ(std::numeric_limits<double>::infinity(), result);
EXPECT_TRUE(safe_strtod("-1e310", &result));
EXPECT_EQ(-std::numeric_limits<double>::infinity(), result);
EXPECT_TRUE(safe_strtod("1e-325", &result));
EXPECT_EQ(0, result);
EXPECT_TRUE(safe_strtod(" -0x1c", &result));
EXPECT_EQ(-28.0, result);
EXPECT_TRUE(safe_strtod("50 \t", &result));
EXPECT_EQ(50.0, result);
EXPECT_TRUE(safe_strtod("\t82.0\t ", &result));
EXPECT_EQ(82.0, result);
EXPECT_FALSE(safe_strtod("infinity", &result));
EXPECT_TRUE(safe_strtod("-inf", &result));
EXPECT_EQ(-std::numeric_limits<double>::infinity(), result);
EXPECT_TRUE(safe_strtod("+inf", &result));
EXPECT_EQ(std::numeric_limits<double>::infinity(), result);
EXPECT_TRUE(safe_strtod("InF", &result));
EXPECT_EQ(std::numeric_limits<double>::infinity(), result);
EXPECT_TRUE(safe_strtod("-INF", &result));
EXPECT_EQ(-std::numeric_limits<double>::infinity(), result);
EXPECT_TRUE(safe_strtod("nan", &result));
EXPECT_TRUE(std::isnan(result));
EXPECT_TRUE(safe_strtod("-nan", &result));
EXPECT_TRUE(std::isnan(result));
EXPECT_TRUE(safe_strtod("-NaN", &result));
EXPECT_TRUE(std::isnan(result));
EXPECT_TRUE(safe_strtod("+NAN", &result));
EXPECT_TRUE(std::isnan(result));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/numbers.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/numbers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.